language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | getsentry__sentry | tests/sentry/stacktraces/test_in_app_normalization.py | {
"start": 2028,
"end": 6033
} | class ____(TestCase):
def test_changes_in_app_None_into_in_app_False(self) -> None:
event_data = make_event(
[
make_stacktrace(
frame_0_in_app=True,
frame_1_in_app=None,
)
]
)
normalize_stacktraces_for_grouping(event_data)
frames = event_data["exception"]["values"][0]["stacktrace"]["frames"]
assert frames[0]["in_app"] is True
assert frames[1]["in_app"] is False
def test_changes_in_app_not_set_into_in_app_False(self) -> None:
event_data = make_event(
[
make_stacktrace(
frame_0_in_app=True,
# `frame_1_in_app` not set
)
]
)
normalize_stacktraces_for_grouping(event_data)
frames = event_data["exception"]["values"][0]["stacktrace"]["frames"]
assert frames[0]["in_app"] is True
assert frames[1]["in_app"] is False
def test_skips_None_frames(self) -> None:
# No arguments passed to `make_stacktrace` means neither example frame will have an `in_app` value
stacktrace = make_stacktrace()
stacktrace["frames"].insert(0, None)
event_data = make_event([stacktrace])
normalize_stacktraces_for_grouping(event_data)
frames = event_data["exception"]["values"][0]["stacktrace"]["frames"]
# The values here weren't set before we called `normalize_stacktraces_for_grouping`,
# so the fact that they now are shows that it didn't bail when it hit the `None` frame
assert frames[1]["in_app"] is False
assert frames[2]["in_app"] is False
def test_detects_frame_mix_correctly_with_single_stacktrace(self) -> None:
# Each case is `(frame1_in_app, frame2_in_app, expected_result)`
cases = [
(True, True, "in-app-only"),
(True, False, "mixed"),
(False, False, "system-only"),
]
for frame_0_in_app, frame_1_in_app, expected_frame_mix in cases:
event_data = make_event([make_stacktrace(frame_0_in_app, frame_1_in_app)])
normalize_stacktraces_for_grouping(event_data)
computed_frame_mix = event_data["metadata"]["in_app_frame_mix"]
assert (
computed_frame_mix == expected_frame_mix
), f"Expected {expected_frame_mix}, got {computed_frame_mix} with `in_app` values {frame_0_in_app}, {frame_1_in_app}"
def test_detects_frame_mix_correctly_with_multiple_stacktraces(self) -> None:
# Each case is `(stacktrace1_in_app_values, stacktrace2_in_app_values, expected_result)`
cases = [
# Two in-app-only stacktrces
((True, True), (True, True), "in-app-only"),
# One in-app-only stacktrace and one system-only stacktrace
((True, True), (False, False), "mixed"),
# One mixed stacktrace and one in-app-only stacktrace
((True, False), (True, True), "mixed"),
# One mixed stacktrace and one system-only stacktrace
((True, False), (False, False), "mixed"),
# Two mixed stacktraces
((True, False), (True, False), "mixed"),
# Two system-only stacktraces
((False, False), (False, False), "system-only"),
]
for stacktrace_0_mix, stacktrace_1_mix, expected_frame_mix in cases:
event_data = make_event(
[
make_stacktrace(*stacktrace_0_mix),
make_stacktrace(*stacktrace_1_mix),
]
)
normalize_stacktraces_for_grouping(event_data)
frame_mix = event_data["metadata"]["in_app_frame_mix"]
assert (
frame_mix == expected_frame_mix
), f"Expected {expected_frame_mix}, got {frame_mix} with stacktrace `in-app` values {stacktrace_0_mix}, {stacktrace_1_mix}"
| NormalizeInApptest |
python | django-import-export__django-import-export | tests/core/tests/test_base_formats.py | {
"start": 404,
"end": 2017
} | class ____(TestCase):
def setUp(self):
self.format = base_formats.Format()
@mock.patch(
"import_export.formats.base_formats.HTML.get_format", side_effect=ImportError
)
def test_format_non_available1(self, mocked):
self.assertFalse(base_formats.HTML.is_available())
@mock.patch(
"import_export.formats.base_formats.HTML.get_format",
side_effect=UnsupportedFormat,
)
def test_format_non_available2(self, mocked):
self.assertFalse(base_formats.HTML.is_available())
def test_format_available(self):
self.assertTrue(base_formats.CSV.is_available())
def test_get_title(self):
self.assertEqual(
"<class 'import_export.formats.base_formats.Format'>",
str(self.format.get_title()),
)
def test_create_dataset_NotImplementedError(self):
with self.assertRaises(NotImplementedError):
self.format.create_dataset(None)
def test_export_data_NotImplementedError(self):
with self.assertRaises(NotImplementedError):
self.format.export_data(None)
def test_get_extension(self):
self.assertEqual("", self.format.get_extension())
def test_get_content_type(self):
self.assertEqual("application/octet-stream", self.format.get_content_type())
def test_is_available_default(self):
self.assertTrue(self.format.is_available())
def test_can_import_default(self):
self.assertFalse(self.format.can_import())
def test_can_export_default(self):
self.assertFalse(self.format.can_export())
| FormatTest |
python | kennethreitz__tablib | src/tablib/formats/_yaml.py | {
"start": 61,
"end": 1455
} | class ____:
title = 'yaml'
extensions = ('yaml', 'yml')
@classmethod
def export_set(cls, dataset):
"""Returns YAML representation of Dataset."""
return yaml.safe_dump(dataset._package(ordered=False), default_flow_style=None)
@classmethod
def export_book(cls, databook):
"""Returns YAML representation of Databook."""
return yaml.safe_dump(databook._package(ordered=False), default_flow_style=None)
@classmethod
def import_set(cls, dset, in_stream):
"""Returns dataset from YAML stream."""
dset.wipe()
dset.dict = yaml.safe_load(in_stream)
@classmethod
def import_book(cls, dbook, in_stream):
"""Returns databook from YAML stream."""
dbook.wipe()
for sheet in yaml.safe_load(in_stream):
data = tablib.Dataset()
data.title = sheet['title']
data.dict = sheet['data']
dbook.add_sheet(data)
@classmethod
def detect(cls, stream):
"""Returns True if given stream is valid YAML."""
try:
_yaml = yaml.safe_load(stream)
if isinstance(_yaml, (list, tuple, dict)):
return True
else:
return False
except (yaml.parser.ParserError, yaml.reader.ReaderError,
yaml.scanner.ScannerError):
return False
| YAMLFormat |
python | pandas-dev__pandas | pandas/tests/generic/test_to_xarray.py | {
"start": 447,
"end": 3127
} | class ____:
@pytest.fixture
def df(self):
return DataFrame(
{
"a": list("abcd"),
"b": list(range(1, 5)),
"c": np.arange(3, 7).astype("u1"),
"d": np.arange(4.0, 8.0, dtype="float64"),
"e": [True, False, True, False],
"f": Categorical(list("abcd")),
"g": date_range("20130101", periods=4),
"h": date_range("20130101", periods=4, tz="US/Eastern"),
}
)
def test_to_xarray_index_types(self, index_flat, df, request):
index = index_flat
# MultiIndex is tested in test_to_xarray_with_multiindex
if len(index) == 0:
pytest.skip("Test doesn't make sense for empty index")
if Version(xarray.__version__) < Version("2025.9.0"):
pytest.skip("Xarray bug https://github.com/pydata/xarray/issues/9661")
df.index = index[:4]
df.index.name = "foo"
df.columns.name = "bar"
result = df.to_xarray()
assert result.sizes["foo"] == 4
assert len(result.coords) == 1
assert len(result.data_vars) == 8
tm.assert_almost_equal(list(result.coords.keys()), ["foo"])
assert isinstance(result, xarray.Dataset)
# idempotency
# datetimes w/tz are preserved
# column names are lost
expected = df.copy()
expected.columns.name = None
tm.assert_frame_equal(result.to_dataframe(), expected)
def test_to_xarray_empty(self, df):
df.index.name = "foo"
result = df[0:0].to_xarray()
assert result.sizes["foo"] == 0
assert isinstance(result, xarray.Dataset)
def test_to_xarray_with_multiindex(self, df, using_infer_string):
# MultiIndex
df.index = MultiIndex.from_product([["a"], range(4)], names=["one", "two"])
result = df.to_xarray()
assert result.sizes["one"] == 1
assert result.sizes["two"] == 4
assert len(result.coords) == 2
assert len(result.data_vars) == 8
tm.assert_almost_equal(list(result.coords.keys()), ["one", "two"])
assert isinstance(result, xarray.Dataset)
result = result.to_dataframe()
expected = df.copy()
expected["f"] = expected["f"].astype(
object if not using_infer_string else "str"
)
if Version(xarray.__version__) < Version("2025.1.0"):
expected["g"] = expected["g"].astype("M8[ns]")
expected["h"] = expected["h"].astype("M8[ns, US/Eastern]")
expected.columns.name = None
tm.assert_frame_equal(result, expected)
| TestDataFrameToXArray |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/cover/test_lookup_py38.py | {
"start": 2019,
"end": 2577
} | class ____(A, total=False):
# a is required, b is optional
b: bool
@given(from_type(B))
def test_typeddict_with_optional(value):
assert type(value) == dict
assert set(value).issubset({"a", "b"})
assert isinstance(value["a"], int)
if "b" in value:
assert isinstance(value["b"], bool)
def test_simple_optional_key_is_optional():
# Optional keys are not currently supported, as PEP-589 leaves no traces
# at runtime. See https://github.com/python/cpython/pull/17214
find_any(from_type(B), lambda d: "b" not in d)
| B |
python | kamyu104__LeetCode-Solutions | Python/happy-students.py | {
"start": 101,
"end": 606
} | class ____(object):
def countWays(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
cnt = [0]*(len(nums)+1)
for x in nums:
cnt[x] += 1
result = prefix = 0
for i in xrange(len(nums)+1):
if prefix == i and cnt[i] == 0:
result += 1
prefix += cnt[i]
return result
# Time: O(nlogn)
# Space: O(1)
# codeforce, https://codeforces.com/contest/1782/problem/B
# sort, greedy
| Solution |
python | huggingface__transformers | src/transformers/models/helium/modeling_helium.py | {
"start": 18486,
"end": 21439
} | class ____(HeliumPreTrainedModel, GenerationMixin):
_tied_weights_keys = {"lm_head.weight": "model.embed_tokens.weight"}
_tp_plan = {"lm_head": "colwise_rep"}
_pp_plan = {"lm_head": (["hidden_states"], ["logits"])}
def __init__(self, config):
super().__init__(config)
self.model = HeliumModel(config)
self.vocab_size = config.vocab_size
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs: Unpack[TransformersKwargs],
) -> CausalLMOutputWithPast:
r"""
Example:
```python
>>> from transformers import AutoTokenizer, HeliumForCausalLM
>>> model = HeliumForCausalLM.from_pretrained("google/helium-7b")
>>> tokenizer = AutoTokenizer.from_pretrained("google/helium-7b")
>>> prompt = "What is your favorite condiment?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"What is your favorite condiment?"
```"""
outputs: BaseModelOutputWithPast = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = outputs.last_hidden_state
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
return CausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
| HeliumForCausalLM |
python | PyCQA__pylint | pylint/extensions/overlapping_exceptions.py | {
"start": 579,
"end": 3270
} | class ____(checkers.BaseChecker):
"""Checks for two or more exceptions in the same exception handler
clause that are identical or parts of the same inheritance hierarchy.
(i.e. overlapping).
"""
name = "overlap-except"
msgs = {
"W0714": (
"Overlapping exceptions (%s)",
"overlapping-except",
"Used when exceptions in handler overlap or are identical",
)
}
options = ()
@utils.only_required_for_messages("overlapping-except")
def visit_try(self, node: nodes.Try) -> None:
"""Check for empty except."""
for handler in node.handlers:
if handler.type is None:
continue
if isinstance(handler.type, nodes.BoolOp):
continue
try:
excs = list(_annotated_unpack_infer(handler.type))
except astroid.InferenceError:
continue
handled_in_clause: list[tuple[Any, Any]] = []
for part, exc in excs:
if isinstance(exc, util.UninferableBase):
continue
if isinstance(exc, astroid.Instance) and utils.inherit_from_std_ex(exc):
exc = exc._proxied
if not isinstance(exc, nodes.ClassDef):
continue
exc_ancestors = [
a for a in exc.ancestors() if isinstance(a, nodes.ClassDef)
]
for prev_part, prev_exc in handled_in_clause:
prev_exc_ancestors = [
a for a in prev_exc.ancestors() if isinstance(a, nodes.ClassDef)
]
if exc == prev_exc:
self.add_message(
"overlapping-except",
node=handler.type,
args=f"{prev_part.as_string()} and {part.as_string()} are the same",
)
elif prev_exc in exc_ancestors or exc in prev_exc_ancestors:
ancestor = part if exc in prev_exc_ancestors else prev_part
descendant = part if prev_exc in exc_ancestors else prev_part
self.add_message(
"overlapping-except",
node=handler.type,
args=f"{ancestor.as_string()} is an ancestor class of {descendant.as_string()}",
)
handled_in_clause += [(part, exc)]
def register(linter: PyLinter) -> None:
linter.register_checker(OverlappingExceptionsChecker(linter))
| OverlappingExceptionsChecker |
python | sqlalchemy__sqlalchemy | examples/association/proxied_association.py | {
"start": 1688,
"end": 3860
} | class ____(Base):
__tablename__ = "orderitem"
order_id: Mapped[int] = mapped_column(
ForeignKey("order.order_id"), primary_key=True
)
item_id: Mapped[int] = mapped_column(
ForeignKey("item.item_id"), primary_key=True
)
price: Mapped[float]
item: Mapped[Item] = relationship(lazy="joined")
def __init__(self, item: Item, price: float | None = None):
self.item = item
self.price = price or item.price
if __name__ == "__main__":
engine = create_engine("sqlite://")
Base.metadata.create_all(engine)
with Session(engine) as session:
# create catalog
tshirt, mug, hat, crowbar = (
Item("SA T-Shirt", 10.99),
Item("SA Mug", 6.50),
Item("SA Hat", 8.99),
Item("MySQL Crowbar", 16.99),
)
session.add_all([tshirt, mug, hat, crowbar])
session.commit()
# create an order
order = Order("john smith")
# add items via the association proxy.
# the OrderItem is created automatically.
order.items.append(mug)
order.items.append(hat)
# add an OrderItem explicitly.
order.order_items.append(OrderItem(crowbar, 10.99))
session.add(order)
session.commit()
# query the order, print items
order = session.scalars(
select(Order).filter_by(customer_name="john smith")
).one()
# print items based on the OrderItem collection directly
print(
[
(assoc.item.description, assoc.price, assoc.item.price)
for assoc in order.order_items
]
)
# print items based on the "proxied" items collection
print([(item.description, item.price) for item in order.items])
# print customers who bought 'MySQL Crowbar' on sale
orders_stmt = (
select(Order)
.join(OrderItem)
.join(Item)
.filter(Item.description == "MySQL Crowbar")
.filter(Item.price > OrderItem.price)
)
print([o.customer_name for o in session.scalars(orders_stmt)])
| OrderItem |
python | getsentry__sentry | tests/sentry/incidents/test_logic.py | {
"start": 138643,
"end": 139459
} | class ____(TestCase):
def test_simple(self) -> None:
aggregate = "count_unique(user)"
translated = translate_aggregate_field(aggregate)
assert translated == "count_unique(tags[sentry:user])"
# Make sure it doesn't double encode:
translated_2 = translate_aggregate_field(translated)
assert translated_2 == "count_unique(tags[sentry:user])"
def test_reverse(self) -> None:
aggregate = "count_unique(tags[sentry:user])"
translated = translate_aggregate_field(aggregate, reverse=True)
assert translated == "count_unique(user)"
# Make sure it doesn't do anything wonky running twice:
translated_2 = translate_aggregate_field(translated, reverse=True)
assert translated_2 == "count_unique(user)"
| MetricTranslationTest |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeVarDefault3.py | {
"start": 752,
"end": 929
} | class ____(Generic[T0]):
def method1(self, a: T0, b: T1, /) -> T0 | T1: ...
Ts0 = TypeVarTuple("Ts0")
T3 = TypeVar("T3", default=int)
# This should generate an error.
| ClassD |
python | kubernetes-client__python | kubernetes/client/models/admissionregistration_v1_service_reference.py | {
"start": 383,
"end": 6812
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'name': 'str',
'namespace': 'str',
'path': 'str',
'port': 'int'
}
attribute_map = {
'name': 'name',
'namespace': 'namespace',
'path': 'path',
'port': 'port'
}
def __init__(self, name=None, namespace=None, path=None, port=None, local_vars_configuration=None): # noqa: E501
"""AdmissionregistrationV1ServiceReference - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._name = None
self._namespace = None
self._path = None
self._port = None
self.discriminator = None
self.name = name
self.namespace = namespace
if path is not None:
self.path = path
if port is not None:
self.port = port
@property
def name(self):
"""Gets the name of this AdmissionregistrationV1ServiceReference. # noqa: E501
`name` is the name of the service. Required # noqa: E501
:return: The name of this AdmissionregistrationV1ServiceReference. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this AdmissionregistrationV1ServiceReference.
`name` is the name of the service. Required # noqa: E501
:param name: The name of this AdmissionregistrationV1ServiceReference. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def namespace(self):
"""Gets the namespace of this AdmissionregistrationV1ServiceReference. # noqa: E501
`namespace` is the namespace of the service. Required # noqa: E501
:return: The namespace of this AdmissionregistrationV1ServiceReference. # noqa: E501
:rtype: str
"""
return self._namespace
@namespace.setter
def namespace(self, namespace):
"""Sets the namespace of this AdmissionregistrationV1ServiceReference.
`namespace` is the namespace of the service. Required # noqa: E501
:param namespace: The namespace of this AdmissionregistrationV1ServiceReference. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and namespace is None: # noqa: E501
raise ValueError("Invalid value for `namespace`, must not be `None`") # noqa: E501
self._namespace = namespace
@property
def path(self):
"""Gets the path of this AdmissionregistrationV1ServiceReference. # noqa: E501
`path` is an optional URL path which will be sent in any request to this service. # noqa: E501
:return: The path of this AdmissionregistrationV1ServiceReference. # noqa: E501
:rtype: str
"""
return self._path
@path.setter
def path(self, path):
"""Sets the path of this AdmissionregistrationV1ServiceReference.
`path` is an optional URL path which will be sent in any request to this service. # noqa: E501
:param path: The path of this AdmissionregistrationV1ServiceReference. # noqa: E501
:type: str
"""
self._path = path
@property
def port(self):
"""Gets the port of this AdmissionregistrationV1ServiceReference. # noqa: E501
If specified, the port on the service that hosting webhook. Default to 443 for backward compatibility. `port` should be a valid port number (1-65535, inclusive). # noqa: E501
:return: The port of this AdmissionregistrationV1ServiceReference. # noqa: E501
:rtype: int
"""
return self._port
@port.setter
def port(self, port):
"""Sets the port of this AdmissionregistrationV1ServiceReference.
If specified, the port on the service that hosting webhook. Default to 443 for backward compatibility. `port` should be a valid port number (1-65535, inclusive). # noqa: E501
:param port: The port of this AdmissionregistrationV1ServiceReference. # noqa: E501
:type: int
"""
self._port = port
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AdmissionregistrationV1ServiceReference):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, AdmissionregistrationV1ServiceReference):
return True
return self.to_dict() != other.to_dict()
| AdmissionregistrationV1ServiceReference |
python | ray-project__ray | release/ray_release/tests/utils.py | {
"start": 163,
"end": 572
} | class ____(RuntimeError):
pass
def fail_always(*a, **kw):
raise UnitTestError()
def fail_once(result: Any):
class _Failer:
def __init__(self):
self.failed = False
def __call__(self, *args, **kwargs):
if not self.failed:
self.failed = True
raise UnitTestError()
return result
return _Failer()
| UnitTestError |
python | huggingface__transformers | src/transformers/models/siglip2/modular_siglip2.py | {
"start": 4975,
"end": 5034
} | class ____(SiglipTextModelOutput):
pass
| Siglip2TextOutput |
python | langchain-ai__langchain | libs/cli/langchain_cli/integration_template/tests/unit_tests/test_tools.py | {
"start": 134,
"end": 897
} | class ____(ToolsUnitTests):
@property
def tool_constructor(self) -> Type[__ModuleName__Tool]:
return __ModuleName__Tool
@property
def tool_constructor_params(self) -> dict:
# If your tool constructor instead required initialization arguments like
# `def __init__(self, some_arg: int):`, you would return those here
# as a dictionary, e.g.: `return {'some_arg': 42}`
return {}
@property
def tool_invoke_params_example(self) -> dict:
"""
Returns a dictionary representing the "args" of an example tool call.
This should NOT be a ToolCall dict - i.e. it should not have
`{"name", "id", "args"}` keys.
"""
return {"a": 2, "b": 3}
| TestParrotMultiplyToolUnit |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/ruff/RUF053.py | {
"start": 1462,
"end": 1522
} | class ____(Generic[_B]):
class D[T](Generic[_B, T]): ...
| C |
python | huggingface__transformers | src/transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py | {
"start": 144297,
"end": 161709
} | class ____(SeamlessM4Tv2PreTrainedModel, GenerationMixin):
output_modalities = ("audio",)
_keys_to_ignore_on_load_missing = ["speech_encoder"]
main_input_name = "input_ids"
_tied_weights_keys = {
"lm_head.weight": "shared.weight",
"text_encoder.embed_tokens.weight": "shared.weight",
"text_decoder.embed_tokens.weight": "shared.weight",
}
# Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForTextToSpeech.__init__ with SeamlessM4T->SeamlessM4Tv2
def __init__(self, config: SeamlessM4Tv2Config):
super().__init__(config)
self.shared = nn.Embedding(config.vocab_size, config.hidden_size, config.pad_token_id)
self.text_encoder = SeamlessM4Tv2Encoder(config)
self.text_decoder = SeamlessM4Tv2Decoder(config)
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.t2u_model = SeamlessM4Tv2TextToUnitForConditionalGeneration(config)
self.vocoder = SeamlessM4Tv2CodeHifiGan(config)
# Initialize weights and apply final processing
self.post_init()
# Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForTextToSpeech.get_encoder
def get_encoder(self):
return self.text_encoder
# Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForTextToSpeech.get_decoder
def get_decoder(self):
return self.text_decoder
# Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForTextToSpeech.get_input_embeddings
def get_input_embeddings(self):
return self.text_decoder.embed_tokens
# Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForTextToSpeech.set_input_embeddings
def set_input_embeddings(self, value):
self.text_encoder.embed_tokens = value
self.text_decoder.embed_tokens = value
self.shared = value
@auto_docstring(custom_args=SEAMLESS_M4T_V2_COMMON_CUSTOM_ARGS)
# Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForTextToSpeech.forward with SeamlessM4T->SeamlessM4Tv2
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
decoder_input_ids: Optional[torch.LongTensor] = None,
decoder_attention_mask: Optional[torch.LongTensor] = None,
encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.Tensor] = None,
) -> Union[Seq2SeqLMOutput, tuple[torch.FloatTensor]]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
"""
if labels is not None:
if use_cache:
logger.warning("The `use_cache` argument is changed to `False` since `labels` is provided.")
use_cache = False
if decoder_input_ids is None and decoder_inputs_embeds is None:
decoder_input_ids = shift_tokens_right(
labels, self.config.pad_token_id, self.config.decoder_start_token_id
)
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if encoder_outputs is None:
# if encoder_outputs is not None, it's probably used within a .generate method so no need to warn
logger.warning(
"This is the same forward method as `SeamlessM4Tv2ForTextToText`."
"It doesn't use the text-to-unit model `SeamlessM4Tv2TextToUnitForConditionalGeneration`."
"If you want to generate speech, use the `.generate` method."
)
encoder_outputs = self.text_encoder(
input_ids=input_ids,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
# If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True
elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
encoder_outputs = BaseModelOutput(
last_hidden_state=encoder_outputs[0],
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
)
encoder_attention_mask = attention_mask
# decoder outputs consists of (dec_features, past_key_values, dec_hidden, dec_attn)
decoder_outputs = self.text_decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
encoder_hidden_states=encoder_outputs[0],
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
cache_position=cache_position,
)
lm_logits = self.lm_head(decoder_outputs[0])
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
labels = labels.to(lm_logits.device)
masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
outputs = decoder_outputs + encoder_outputs
output = (lm_logits,) + outputs[1:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return Seq2SeqLMOutput(
loss=masked_lm_loss,
logits=lm_logits,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
@torch.no_grad()
def generate(
self,
input_ids: Optional[torch.Tensor] = None,
return_intermediate_token_ids: Optional[bool] = None,
tgt_lang: Optional[str] = None,
speaker_id: Optional[int] = 0,
**kwargs,
) -> Union[torch.Tensor, SeamlessM4Tv2GenerationOutput]:
"""
Generates translated audio waveforms.
<Tip>
This method successively calls the `.generate` function of two different sub-models. You can specify keyword
arguments at two different levels: general arguments that will be passed to both models, or prefixed arguments
that will be passed to one of them.
For example, calling `.generate(input_ids, num_beams=4, speech_do_sample=True)` will successively perform
beam-search decoding on the text model, and multinomial beam-search sampling on the speech model.
For an overview of generation strategies and code examples, check out the [following
guide](./generation_strategies).
</Tip>
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`SeamlessM4TTokenizer`] or [`SeamlessM4TProcessor`]. See
[`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
return_intermediate_token_ids (`bool`, *optional*):
If `True`, also returns the intermediate generated text and unit tokens. Set to `True` if you also want
to get translated text alongside the audio.
tgt_lang (`str`, *optional*):
The language to use as target language for translation.
speaker_id (`int`, *optional*, defaults to 0):
The id of the speaker used for speech synthesis. Must be lower than `config.vocoder_num_spkrs`.
kwargs (*optional*):
Remaining dictionary of keyword arguments that will be passed to [`GenerationMixin.generate`]. Keyword
arguments are of two types:
- Without a prefix, they will be entered as `**kwargs` for the `generate` method of each sub-model,
except for `decoder_input_ids` which will only be passed through the text components.
- With a *text_* or *speech_* prefix, they will be input for the `generate` method of the
text model and speech model respectively. It has the priority over the keywords without a prefix.
This means you can, for example, specify a generation strategy for one generation but not for the
other.
Returns:
`Union[SeamlessM4Tv2GenerationOutput, tuple[Tensor]]`:
- If `return_intermediate_token_ids`, returns [`SeamlessM4Tv2GenerationOutput`].
- If not `return_intermediate_token_ids`, returns a tuple composed of waveforms of shape `(batch_size,
sequence_length)` and `waveform_lengths` which gives the length of each sample.
"""
batch_size = len(input_ids) if input_ids is not None else len(kwargs.get("inputs_embeds"))
if tgt_lang is None:
raise ValueError("You must specify a `tgt_lang` to generate translated speech.")
else:
# also accept __xxx__
tgt_lang = tgt_lang.replace("__", "")
for key in ["text_decoder_lang_to_code_id", "t2u_lang_code_to_id", "vocoder_lang_code_to_id"]:
lang_code_to_id = getattr(self.generation_config, key, None)
if lang_code_to_id is None:
raise ValueError(
f"""This model generation config doesn't have a `{key}` key which maps the target language
to the right token id. Make sure to load the right generation config."""
)
elif tgt_lang not in lang_code_to_id:
raise ValueError(
f"""`tgt_lang={tgt_lang}` is not supported by this model.
Please specify a `tgt_lang` in {",".join(lang_code_to_id.keys())}. Note that SeamlessM4Tv2 supports
more languages for text translation than for speech synthesis."""
)
kwargs_text, kwargs_speech = format_speech_generation_kwargs(kwargs)
kwargs_text["output_hidden_states"] = True
kwargs_text["return_dict_in_generate"] = True
kwargs_text["output_scores"] = True
text_decoder_input_ids = kwargs_text.get("decoder_input_ids")
# overwrite text_decoder_input_ids if tgt_lang is passed. The latter gets priority over decoder_input_ids.
text_tgt_lang_id = self.generation_config.text_decoder_lang_to_code_id.get(tgt_lang)
text_decoder_input_ids = torch.tensor([[text_tgt_lang_id]] * batch_size, device=self.device)
kwargs_text["decoder_input_ids"] = text_decoder_input_ids
# first generation
text_generation_output = super().generate(input_ids, **kwargs_text)
sequences = text_generation_output.sequences
# prepare second generation
num_return_sequences = len(sequences) // batch_size
attention_mask = kwargs_speech.get("attention_mask", kwargs_text.get("attention_mask", None))
if attention_mask is not None:
# repeat attention mask alongside batch dimension
attention_mask = torch.repeat_interleave(attention_mask, num_return_sequences, dim=0)
encoder_hidden_states = text_generation_output.encoder_hidden_states[-1]
# repeat attention mask alongside batch dimension
encoder_hidden_states = torch.repeat_interleave(encoder_hidden_states, num_return_sequences, dim=0)
# get decoder last hidden state - must do a pass through the text decoder
t2u_input_embeds = self.text_decoder(
input_ids=sequences[:, :-1], # Manually trim the final EOS token
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=attention_mask,
).last_hidden_state
pad_token_id = self.generation_config.pad_token_id
# Compute new attention mask
seq_lens = (sequences[:, :-1] != pad_token_id).int().sum(1)
t2u_model_attention_mask = _compute_new_attention_mask(t2u_input_embeds, seq_lens)
kwargs_speech["attention_mask"] = t2u_model_attention_mask
# REMOVE EOS and lang_id
t2u_input_ids = sequences[:, 2:-1]
# replace every other EOS
t2u_input_ids = torch.masked_fill(
t2u_input_ids, t2u_input_ids == self.generation_config.eos_token_id, pad_token_id
)
# compute t2u_char_input_ids
t2u_subwords = self._indices_to_subwords(t2u_input_ids)
t2u_char_count_per_id = self._count_character_length_in_subword(
t2u_input_ids, t2u_subwords, pad_token_id=pad_token_id
)
# Add pads for lang, EOS tokens as per NLLB "source" tokenizer mode.
pad_zero = t2u_char_count_per_id.new_zeros((t2u_char_count_per_id.shape[0], 1))
t2u_char_count_per_id = torch.cat([pad_zero, t2u_char_count_per_id, pad_zero], dim=1)
t2u_char_input_ids = self._get_char_input_ids(
t2u_input_ids, t2u_subwords, t2u_char_count_per_id, pad_token_id=pad_token_id
)
# second pass
t2u_output = self.t2u_model(
inputs_embeds=t2u_input_embeds,
char_input_ids=t2u_char_input_ids,
char_count_per_id=t2u_char_count_per_id,
**kwargs_speech,
)
t2u_logits = t2u_output[0]
padding_mask = t2u_output[1].bool()
# The text-to-unit model is non auto-regressive. We keep the ability to use sampling with temperature
temperature = kwargs_speech.get("temperature", None)
if (temperature is None or temperature == 1.0) or not kwargs_speech.get("do_sample", False):
unit_ids = t2u_logits.argmax(dim=-1)
else:
t2u_logits = t2u_logits / temperature
# apply softmax
probs = nn.functional.softmax(t2u_logits, dim=-1)
# reshape to 2D: (batch_size, seq_len, t2u_vocab_size) -> (batch_size*seq_len, t2u_vocab_size)
probs = probs.reshape((-1, probs.shape[2]))
# multinomial then reshape : (batch_size*seq_len)-> (batch_size,seq_len)
unit_ids = torch.multinomial(probs, num_samples=1).view(t2u_logits.shape[0], -1)
output_unit_ids = unit_ids.detach().clone()
replace_mask = (unit_ids == self.config.t2u_eos_token_id) | (~padding_mask)
# replace eos per pad
unit_ids = unit_ids.masked_fill(replace_mask, self.config.t2u_pad_token_id)
# offset of control symbols
unit_ids = torch.where(
unit_ids == self.config.t2u_pad_token_id, unit_ids, unit_ids - self.config.vocoder_offset
)
vocoder_tgt_lang_id = self.generation_config.vocoder_lang_code_to_id.get(tgt_lang)
vocoder_tgt_lang_id = torch.tensor([[vocoder_tgt_lang_id]] * len(unit_ids), device=self.device)
speaker_id = torch.tensor([[speaker_id]] * len(unit_ids), device=self.device)
waveform, waveform_lengths = self.vocoder(
input_ids=unit_ids, speaker_id=speaker_id, lang_id=vocoder_tgt_lang_id
)
if return_intermediate_token_ids:
return SeamlessM4Tv2GenerationOutput(
waveform=waveform,
waveform_lengths=waveform_lengths,
sequences=sequences,
unit_sequences=output_unit_ids,
)
return waveform, waveform_lengths
@auto_docstring(
custom_intro="""
The speech-to-speech SeamlessM4Tv2 Model transformer which can be used for S2ST.
"""
)
| SeamlessM4Tv2ForTextToSpeech |
python | walkccc__LeetCode | solutions/220. Contains Duplicate III/220-2.py | {
"start": 0,
"end": 851
} | class ____:
def containsNearbyAlmostDuplicate(
self,
nums: list[int],
indexDiff: int,
valueDiff: int,
) -> bool:
if not nums or indexDiff <= 0 or valueDiff < 0:
return False
mn = min(nums)
diff = valueDiff + 1 # In case that `valueDiff` equals 0.
bucket = {}
def getKey(num: int) -> int:
return (num - mn) // diff
for i, num in enumerate(nums):
key = getKey(num)
if key in bucket: # the current bucket
return True
# the left adjacent bucket
if key - 1 in bucket and num - bucket[key - 1] < diff:
return True
# the right adjacent bucket
if key + 1 in bucket and bucket[key + 1] - num < diff:
return True
bucket[key] = num
if i >= indexDiff:
del bucket[getKey(nums[i - indexDiff])]
return False
| Solution |
python | sphinx-doc__sphinx | sphinx/domains/c/_ast.py | {
"start": 30759,
"end": 33936
} | class ____(ASTBaseBase):
def __init__(
self,
storage: str,
threadLocal: str,
inline: bool,
restrict: bool,
volatile: bool,
const: bool,
attrs: ASTAttributeList,
) -> None:
self.storage = storage
self.threadLocal = threadLocal
self.inline = inline
self.restrict = restrict
self.volatile = volatile
self.const = const
self.attrs = attrs
def __eq__(self, other: object) -> bool:
if not isinstance(other, ASTDeclSpecsSimple):
return NotImplemented
return (
self.storage == other.storage
and self.threadLocal == other.threadLocal
and self.inline == other.inline
and self.restrict == other.restrict
and self.volatile == other.volatile
and self.const == other.const
and self.attrs == other.attrs
)
def __hash__(self) -> int:
return hash((
self.storage,
self.threadLocal,
self.inline,
self.restrict,
self.volatile,
self.const,
self.attrs,
))
def mergeWith(self, other: ASTDeclSpecsSimple) -> ASTDeclSpecsSimple:
if not other:
return self
return ASTDeclSpecsSimple(
self.storage or other.storage,
self.threadLocal or other.threadLocal,
self.inline or other.inline,
self.volatile or other.volatile,
self.const or other.const,
self.restrict or other.restrict,
self.attrs + other.attrs,
)
def _stringify(self, transform: StringifyTransform) -> str:
res: list[str] = []
if len(self.attrs) != 0:
res.append(transform(self.attrs))
if self.storage:
res.append(self.storage)
if self.threadLocal:
res.append(self.threadLocal)
if self.inline:
res.append('inline')
if self.restrict:
res.append('restrict')
if self.volatile:
res.append('volatile')
if self.const:
res.append('const')
return ' '.join(res)
def describe_signature(self, modifiers: list[Node]) -> None:
def _add(modifiers: list[Node], text: str) -> None:
if len(modifiers) != 0:
modifiers.append(addnodes.desc_sig_space())
modifiers.append(addnodes.desc_sig_keyword(text, text))
if len(modifiers) != 0 and len(self.attrs) != 0:
modifiers.append(addnodes.desc_sig_space())
temp_node = nodes.TextElement()
self.attrs.describe_signature(temp_node)
modifiers.extend(temp_node.children)
if self.storage:
_add(modifiers, self.storage)
if self.threadLocal:
_add(modifiers, self.threadLocal)
if self.inline:
_add(modifiers, 'inline')
if self.restrict:
_add(modifiers, 'restrict')
if self.volatile:
_add(modifiers, 'volatile')
if self.const:
_add(modifiers, 'const')
| ASTDeclSpecsSimple |
python | sympy__sympy | sympy/functions/special/bessel.py | {
"start": 66326,
"end": 67470
} | class ____(DefinedFunction):
"""
Helper function to make the $\\mathrm{besseli}(nu, z)$
function tractable for the Gruntz algorithm.
"""
def _eval_aseries(self, n, args0, x, logx):
from sympy.functions.combinatorial.factorials import RisingFactorial
from sympy.series.order import Order
point = args0[1]
if point in [S.Infinity, S.NegativeInfinity]:
nu, z = self.args
l = [((RisingFactorial(Rational(2*nu - 1, 2), k)*RisingFactorial(
Rational(2*nu + 1, 2), k))/((2)**(k)*z**(Rational(2*k + 1, 2))*factorial(k))) for k in range(n)]
return sqrt(pi/(2))*(Add(*l)) + Order(1/z**(Rational(2*n + 1, 2)), x)
return super()._eval_aseries(n, args0, x, logx)
def _eval_rewrite_as_intractable(self, nu, z, **kwargs):
return exp(-z)*besseli(nu, z)
def _eval_nseries(self, x, n, logx, cdir=0):
x0 = self.args[0].limit(x, 0)
if x0.is_zero:
f = self._eval_rewrite_as_intractable(*self.args)
return f._eval_nseries(x, n, logx)
return super()._eval_nseries(x, n, logx)
| _besseli |
python | getsentry__sentry | tests/sentry/relocation/tasks/test_process.py | {
"start": 94701,
"end": 100679
} | class ____(RelocationTaskTestCase):
def setUp(self) -> None:
RelocationTaskTestCase.setUp(self)
TransactionTestCase.setUp(self)
self.relocation.step = Relocation.Step.NOTIFYING.value
self.relocation.latest_task = OrderedTask.NOTIFYING_UNHIDE.name
self.relocation.want_usernames = ["admin@example.com", "member@example.com"]
self.relocation.save()
with open(IMPORT_JSON_FILE_PATH, "rb") as fp:
import_in_organization_scope(
fp,
flags=ImportFlags(
import_uuid=str(self.uuid),
hide_organizations=True,
merge_users=False,
overwrite_configs=False,
),
org_filter=set(self.relocation.want_org_slugs),
printer=Printer(),
)
self.imported_orgs = sorted(
RegionImportChunk.objects.get(
import_uuid=self.uuid, model="sentry.organization"
).inserted_identifiers.values()
)
assert len(self.imported_orgs) == 1
self.imported_users = ControlImportChunkReplica.objects.get(
import_uuid=self.uuid, model="sentry.user"
).inserted_map
assert len(self.imported_users) == 2
def test_success(
self,
notifying_owner_mock: Mock,
fake_message_builder: Mock,
):
self.mock_message_builder(fake_message_builder)
with patch.object(LostPasswordHash, "send_relocate_account_email") as mock_relocation_email:
notifying_users(self.uuid)
# Called once for each user imported, which is 2 for `fresh-install.json`
assert mock_relocation_email.call_count == 2
email_targets = [
mock_relocation_email.call_args_list[0][0][0].username,
mock_relocation_email.call_args_list[1][0][0].username,
]
assert sorted(mock_relocation_email.call_args_list[0][0][2]) == self.imported_orgs
assert sorted(mock_relocation_email.call_args_list[1][0][2]) == self.imported_orgs
assert "admin@example.com" in email_targets
assert "member@example.com" in email_targets
assert fake_message_builder.call_count == 0
assert notifying_owner_mock.call_count == 1
relocation: Relocation = Relocation.objects.get(uuid=self.uuid)
assert relocation.latest_unclaimed_emails_sent_at is not None
def test_success_ignore_manually_claimed_users(
self,
notifying_owner_mock: Mock,
fake_message_builder: Mock,
):
with assume_test_silo_mode(SiloMode.CONTROL):
admin: User = User.objects.get(id=self.imported_users["1"], email="admin@example.com")
admin.is_unclaimed = False
admin.save()
self.mock_message_builder(fake_message_builder)
with patch.object(LostPasswordHash, "send_relocate_account_email") as mock_relocation_email:
notifying_users(self.uuid)
# Called once for each user imported that has not been manually claimed. Since we
# imported 2 users in `fresh-install.json`, but then manually claimed one at the top of
# this test, only one user remains.
assert mock_relocation_email.call_count == 1
email_targets = [
mock_relocation_email.call_args_list[0][0][0].username,
]
assert sorted(mock_relocation_email.call_args_list[0][0][2]) == self.imported_orgs
assert "member@example.com" in email_targets
assert "admin@example.com" not in email_targets
assert fake_message_builder.call_count == 0
assert notifying_owner_mock.call_count == 1
relocation: Relocation = Relocation.objects.get(uuid=self.uuid)
assert relocation.latest_unclaimed_emails_sent_at is not None
def test_retry_if_attempts_left(
self,
notifying_owner_mock: Mock,
fake_message_builder: Mock,
):
self.mock_message_builder(fake_message_builder)
self.relocation.want_usernames = ["doesnotexist"]
self.relocation.save()
# An exception being raised will trigger a retry task.
with pytest.raises(Exception):
notifying_users(self.uuid)
assert fake_message_builder.call_count == 0
assert notifying_owner_mock.call_count == 0
relocation = Relocation.objects.get(uuid=self.uuid)
assert relocation.status == Relocation.Status.IN_PROGRESS.value
assert relocation.latest_notified != Relocation.EmailKind.FAILED.value
assert not relocation.failure_reason
def test_fail_if_no_attempts_left(
self,
notifying_owner_mock: Mock,
fake_message_builder: Mock,
):
self.mock_message_builder(fake_message_builder)
self.relocation.latest_task = OrderedTask.NOTIFYING_USERS.name
self.relocation.latest_task_attempts = MAX_FAST_TASK_RETRIES
self.relocation.want_usernames = ["doesnotexist"]
self.relocation.save()
with pytest.raises(Exception):
notifying_users(self.uuid)
assert fake_message_builder.call_count == 1
assert fake_message_builder.call_args.kwargs["type"] == "relocation.failed"
fake_message_builder.return_value.send_async.assert_called_once_with(
to=[self.owner.email, self.superuser.email]
)
assert notifying_owner_mock.call_count == 0
relocation = Relocation.objects.get(uuid=self.uuid)
assert relocation.status == Relocation.Status.FAILURE.value
assert relocation.latest_notified == Relocation.EmailKind.FAILED.value
assert relocation.failure_reason == ERR_NOTIFYING_INTERNAL
@patch("sentry.relocation.utils.MessageBuilder")
@patch("sentry.relocation.tasks.process.completed.apply_async")
| NotifyingUsersTest |
python | ansible__ansible | lib/ansible/galaxy/token.py | {
"start": 4107,
"end": 5890
} | class ____(object):
""" Class to storing and retrieving local galaxy token """
token_type = 'Token'
def __init__(self, token=None):
self.b_file = to_bytes(C.GALAXY_TOKEN_PATH, errors='surrogate_or_strict')
# Done so the config file is only opened when set/get/save is called
self._config = None
self._token = token
@property
def config(self):
if self._config is None:
self._config = self._read()
# Prioritise the token passed into the constructor
if self._token:
self._config['token'] = None if self._token is NoTokenSentinel else self._token
return self._config
def _read(self):
action = 'Opened'
if not os.path.isfile(self.b_file):
# token file not found, create and chmod u+rw
open(self.b_file, 'w').close()
os.chmod(self.b_file, S_IRUSR | S_IWUSR) # owner has +rw
action = 'Created'
with open(self.b_file, 'r') as f:
config = yaml_load(f)
display.vvv('%s %s' % (action, to_text(self.b_file)))
if config and not isinstance(config, dict):
display.vvv('Galaxy token file %s malformed, unable to read it' % to_text(self.b_file))
return {}
return config or {}
def set(self, token):
self._token = token
self.save()
def get(self):
return self.config.get('token', None)
def save(self):
with open(self.b_file, 'w') as f:
yaml_dump(self.config, f, default_flow_style=False)
def headers(self):
headers = {}
token = self.get()
if token:
headers['Authorization'] = '%s %s' % (self.token_type, self.get())
return headers
| GalaxyToken |
python | spack__spack | lib/spack/spack/verify.py | {
"start": 5300,
"end": 6072
} | class ____:
def __init__(self):
self.errors = {}
def add_error(self, path, field):
self.errors[path] = self.errors.get(path, []) + [field]
def __add__(self, vr):
for path, fields in vr.errors.items():
self.errors[path] = self.errors.get(path, []) + fields
return self
def has_errors(self):
return bool(self.errors)
def json_string(self):
return sjson.dump(self.errors)
def __str__(self):
res = ""
for path, fields in self.errors.items():
res += "%s verification failed with error(s):\n" % path
for error in fields:
res += " %s\n" % error
if not res:
res += "No Errors"
return res
| VerificationResults |
python | google__jax | tests/pallas/tpu_pallas_test.py | {
"start": 57686,
"end": 61315
} | class ____(PallasCallDMATest):
INTERPRET = True
def test_interpret_local_dma(self):
# We run this test in interpret mode to test semaphore counting.
# On a physical device the values update asynchronously so we cannot
# deterministically check the values.
def test_kernel(x_ref,
o_ref,
sem_out_ref,
copy_sem,
):
o_ref[...] = jnp.zeros_like(o_ref[...])
input_to_output_copy = pltpu.make_async_copy(
src_ref=x_ref.at[0:8],
dst_ref=o_ref.at[0:8],
sem=copy_sem.at[0],
)
input_to_output_copy.start()
sem_out_ref[0, :] = jnp.ones_like(
sem_out_ref[0, :]) * pltpu.semaphore_read(copy_sem.at[0])
input_to_output_copy.wait()
sem_out_ref[1, :] = jnp.ones_like(
sem_out_ref[0, :]) * pltpu.semaphore_read(copy_sem.at[0])
out_shape = (jax.ShapeDtypeStruct((16, 128), jnp.int32),
jax.ShapeDtypeStruct((2, 1), jnp.int32))
grid_spec = pltpu.PrefetchScalarGridSpec(
num_scalar_prefetch=0,
in_specs=[
pl.BlockSpec(memory_space=pl.ANY),
],
scratch_shapes=(
[pltpu.SemaphoreType.DMA(2,)]
)
)
kernel = pl.pallas_call(
test_kernel,
out_shape=out_shape,
grid_spec=grid_spec,
interpret=True,
)
x = jax.random.randint(
jax.random.key(0), shape=(16, 128), minval=0, maxval=128)
result, semaphores = kernel(x)
np.testing.assert_array_equal(result[0:8], x[0:8])
np.testing.assert_array_equal(result[8:], jnp.zeros_like(result[8:]))
# Make sure semaphores have the correct value before and after DMA wait.
result_sem_pre_wait = semaphores[0, 0]
np.testing.assert_array_equal(result_sem_pre_wait, result[0:8].size)
result_sem_post_wait = semaphores[1, 0]
np.testing.assert_array_equal(result_sem_post_wait, 0)
def test_interpreter_semaphore_counting(self):
# We run this test in interpret mode because the kernel exits with
# non-zero values. In normal Pallas this would crash the kernel.
def test_kernel(o_ref,
sem_ref,
):
o_ref[...] = jnp.zeros_like(o_ref)
pltpu.semaphore_signal(sem_ref.at[0], 1)
pltpu.semaphore_signal(sem_ref.at[1], 2)
pltpu.semaphore_signal(sem_ref.at[2], 3)
pltpu.semaphore_signal(sem_ref.at[3], 4)
o_ref[0, 0] = pltpu.semaphore_read(sem_ref.at[0])
o_ref[1, 0] = pltpu.semaphore_read(sem_ref.at[1])
o_ref[2, 0] = pltpu.semaphore_read(sem_ref.at[2])
o_ref[3, 0] = pltpu.semaphore_read(sem_ref.at[3])
pltpu.semaphore_wait(sem_ref.at[0], 4)
pltpu.semaphore_wait(sem_ref.at[1], 3)
pltpu.semaphore_wait(sem_ref.at[2], 2)
pltpu.semaphore_wait(sem_ref.at[3], 1)
o_ref[4, 0] = pltpu.semaphore_read(sem_ref.at[0])
o_ref[5, 0] = pltpu.semaphore_read(sem_ref.at[1])
o_ref[6, 0] = pltpu.semaphore_read(sem_ref.at[2])
o_ref[7, 0] = pltpu.semaphore_read(sem_ref.at[3])
out_shape = jax.ShapeDtypeStruct((8, 1), jnp.int32)
grid_spec = pltpu.PrefetchScalarGridSpec(
num_scalar_prefetch=0,
scratch_shapes=(
[pltpu.SemaphoreType.REGULAR(4,)]
)
)
results = pl.pallas_call(
test_kernel,
out_shape=out_shape,
grid_spec=grid_spec,
interpret=True,
)()
expected = jnp.array([1, 2, 3, 4, -3, -1, 1, 3]).reshape(out_shape.shape)
np.testing.assert_array_equal(results, expected)
| PallasCallDMAInterpretTest |
python | tensorflow__tensorflow | tensorflow/lite/python/lite.py | {
"start": 112468,
"end": 115610
} | class ____(TFLiteConverterBaseV1):
"""Converts the given frozen graph def into TensorFlow Lite model."""
def __init__(
self,
graph_def,
input_tensors,
output_tensors,
input_arrays_with_shape=None,
output_arrays=None,
experimental_debug_info_func=None,
):
"""Constructor for TFLiteConverter.
Args:
graph_def: Frozen TensorFlow GraphDef.
input_tensors: List of input tensors. Type and shape are computed using
`foo.shape` and `foo.dtype`.
output_tensors: List of output tensors (only .name is used from this).
input_arrays_with_shape: Tuple of strings representing input tensor names
and list of integers representing input shapes (e.g., [("foo", [1, 16,
16, 3])]). Use only when graph cannot be loaded into TensorFlow and when
`input_tensors` and `output_tensors` are None. (default None)
output_arrays: List of output tensors to freeze graph with. Use only when
graph cannot be loaded into TensorFlow and when `input_tensors` and
`output_tensors` are None. (default None)
experimental_debug_info_func: An experimental function to retrieve the
graph debug info for a set of nodes from the `graph_def`.
Raises:
ValueError: Invalid arguments.
"""
super(TFLiteFrozenGraphConverter, self).__init__(
experimental_debug_info_func
)
self._graph_def = graph_def
self._input_tensors = input_tensors
self._output_tensors = output_tensors
self._control_output_arrays = None
# Attributes are used by models that cannot be loaded into TensorFlow.
if not self._has_valid_tensors():
self._input_arrays_with_shape = input_arrays_with_shape
self._output_arrays = output_arrays
if input_tensors is not None and input_arrays_with_shape is not None:
logging.warning(
"input_arrays_with_shape will be ignored when both the "
"given input_tensors and input_arrays_with_shape are not "
"None."
)
if output_tensors is not None and output_arrays is not None:
logging.warning(
"output_arrays will be ignored when both the given "
"output_tensors and output_arrays are not None."
)
@_export_metrics
def convert(self):
"""Converts a TensorFlow GraphDef based on instance variables.
Returns:
The converted data in serialized format, either a TFLite Flatbuffer or
a Graphviz graph depending on value in `output_format`.
Raises:
ValueError:
Input shape is not specified.
None value for dimension in input_tensor.
"""
if not self._has_valid_tensors():
if not self._input_arrays_with_shape or not (
self._output_arrays or self._control_output_arrays
):
raise ValueError(
"If input_tensors and output_tensors are None, both "
"input_arrays_with_shape and output_arrays|control_output_arrays "
"must be defined."
)
return super(TFLiteFrozenGraphConverter, self).convert()
@_tf_export(v1=["lite.TFLiteConverter"])
| TFLiteFrozenGraphConverter |
python | sqlalchemy__sqlalchemy | test/base/test_utils.py | {
"start": 10348,
"end": 13676
} | class ____(fixtures.TestBase):
def test_union_no_change(self):
d = util.immutabledict({1: 2, 3: 4})
d2 = d.union({})
is_(d2, d)
def test_merge_with_no_change(self):
d = util.immutabledict({1: 2, 3: 4})
d2 = d.merge_with({}, None)
eq_(d2, {1: 2, 3: 4})
is_(d2, d)
def test_merge_with_dicts(self):
d = util.immutabledict({1: 2, 3: 4})
d2 = d.merge_with({3: 5, 7: 12}, {9: 18, 15: 25})
eq_(d, {1: 2, 3: 4})
eq_(d2, {1: 2, 3: 5, 7: 12, 9: 18, 15: 25})
assert isinstance(d2, util.immutabledict)
d3 = d.merge_with({17: 42})
eq_(d3, {1: 2, 3: 4, 17: 42})
def test_merge_with_tuples(self):
d = util.immutabledict({1: 2, 3: 4})
d2 = d.merge_with([(3, 5), (7, 12)], [(9, 18), (15, 25)])
eq_(d, {1: 2, 3: 4})
eq_(d2, {1: 2, 3: 5, 7: 12, 9: 18, 15: 25})
def test_union_dictionary(self):
d = util.immutabledict({1: 2, 3: 4})
d2 = d.union({3: 5, 7: 12})
assert isinstance(d2, util.immutabledict)
eq_(d, {1: 2, 3: 4})
eq_(d2, {1: 2, 3: 5, 7: 12})
def _dont_test_union_kw(self):
d = util.immutabledict({"a": "b", "c": "d"})
d2 = d.union(e="f", g="h")
assert isinstance(d2, util.immutabledict)
eq_(d, {"a": "b", "c": "d"})
eq_(d2, {"a": "b", "c": "d", "e": "f", "g": "h"})
def test_union_tuples(self):
d = util.immutabledict({1: 2, 3: 4})
d2 = d.union([(3, 5), (7, 12)])
eq_(d, {1: 2, 3: 4})
eq_(d2, {1: 2, 3: 5, 7: 12})
def test_keys(self):
d = util.immutabledict({1: 2, 3: 4})
eq_(set(d.keys()), {1, 3})
def test_values(self):
d = util.immutabledict({1: 2, 3: 4})
eq_(set(d.values()), {2, 4})
def test_items(self):
d = util.immutabledict({1: 2, 3: 4})
eq_(set(d.items()), {(1, 2), (3, 4)})
def test_contains(self):
d = util.immutabledict({1: 2, 3: 4})
assert 1 in d
assert "foo" not in d
def test_rich_compare(self):
d = util.immutabledict({1: 2, 3: 4})
d2 = util.immutabledict({1: 2, 3: 4})
d3 = util.immutabledict({5: 12})
d4 = {5: 12}
eq_(d, d2)
ne_(d, d3)
ne_(d, d4)
eq_(d3, d4)
def test_serialize(self):
d = util.immutabledict({1: 2, 3: 4})
for loads, dumps in picklers():
d2 = loads(dumps(d))
eq_(d2, {1: 2, 3: 4})
assert isinstance(d2, util.immutabledict)
def test_repr(self):
# this is used by the stub generator in alembic
i = util.immutabledict()
eq_(str(i), "immutabledict({})")
i2 = util.immutabledict({"a": 42, 42: "a"})
eq_(str(i2), "immutabledict({'a': 42, 42: 'a'})")
def test_pep584(self):
i = util.immutabledict({"a": 2})
with expect_raises_message(TypeError, "object is immutable"):
i |= {"b": 42}
eq_(i, {"a": 2})
i2 = i | {"x": 3}
eq_(i, {"a": 2})
eq_(i2, {"a": 2, "x": 3})
is_true(isinstance(i2, util.immutabledict))
i2 = {"x": 3} | i2
eq_(i, {"a": 2})
eq_(i2, {"a": 2, "x": 3})
is_true(isinstance(i2, util.immutabledict))
| ImmutableDictTest |
python | sqlalchemy__sqlalchemy | test/ext/test_horizontal_shard.py | {
"start": 27242,
"end": 28887
} | class ____(ShardTest, fixtures.MappedTest):
__only_on__ = "postgresql"
schema = "changeme"
def _init_dbs(self):
e1 = testing_engine("sqlite://")
with e1.connect() as conn:
for i in [1, 3]:
conn.exec_driver_sql(
'ATTACH DATABASE "shard%s_%s.db" AS shard%s'
% (i, provision.FOLLOWER_IDENT, i)
)
e2 = testing_engine()
with e2.begin() as conn:
for i in [2, 4]:
conn.exec_driver_sql(
"CREATE SCHEMA IF NOT EXISTS shard%s" % (i,)
)
db1 = e1.execution_options(schema_translate_map={"changeme": "shard1"})
db2 = e2.execution_options(schema_translate_map={"changeme": "shard2"})
db3 = e1.execution_options(schema_translate_map={"changeme": "shard3"})
db4 = e2.execution_options(schema_translate_map={"changeme": "shard4"})
self.sqlite_engine = e1
self.postgresql_engine = e2
return db1, db2, db3, db4
def teardown_test(self):
clear_mappers()
# the tests in this suite don't cleanly close out the Session
# at the moment so use the reaper to close all connections
testing_reaper.checkin_all()
for i in [1, 3]:
os.remove("shard%d_%s.db" % (i, provision.FOLLOWER_IDENT))
with self.postgresql_engine.begin() as conn:
self.tables_test_metadata.drop_all(conn)
for i in [2, 4]:
conn.exec_driver_sql("DROP SCHEMA shard%s CASCADE" % (i,))
self.postgresql_engine.dispose()
| MultipleDialectShardTest |
python | huggingface__transformers | tests/models/marian/test_tokenization_marian.py | {
"start": 1251,
"end": 8912
} | class ____(TokenizerTesterMixin, unittest.TestCase):
from_pretrained_id = "Helsinki-NLP/opus-mt-en-de"
tokenizer_class = MarianTokenizer
test_rust_tokenizer = False
test_sentencepiece = True
@classmethod
def setUpClass(cls):
super().setUpClass()
vocab = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
vocab_tokens = dict(zip(vocab, range(len(vocab))))
save_dir = Path(cls.tmpdirname)
save_json(vocab_tokens, save_dir / VOCAB_FILES_NAMES["vocab"])
save_json(mock_tokenizer_config, save_dir / VOCAB_FILES_NAMES["tokenizer_config_file"])
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(SAMPLE_SP, save_dir / VOCAB_FILES_NAMES["source_spm"])
copyfile(SAMPLE_SP, save_dir / VOCAB_FILES_NAMES["target_spm"])
tokenizer = MarianTokenizer.from_pretrained(cls.tmpdirname)
tokenizer.save_pretrained(cls.tmpdirname)
@classmethod
def get_tokenizer(cls, pretrained_name=None, **kwargs) -> MarianTokenizer:
pretrained_name = pretrained_name or cls.tmpdirname
return MarianTokenizer.from_pretrained(pretrained_name, **kwargs)
def get_input_output_texts(self, tokenizer):
return (
"This is a test",
"This is a test",
)
def test_convert_token_and_id(self):
"""Test ``_convert_token_to_id`` and ``_convert_id_to_token``."""
token = "</s>"
token_id = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(token), token_id)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(token_id), token)
def test_get_vocab(self):
vocab_keys = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0], "</s>")
self.assertEqual(vocab_keys[1], "<unk>")
self.assertEqual(vocab_keys[-1], "<pad>")
self.assertEqual(len(vocab_keys), 9)
def test_vocab_size(self):
self.assertEqual(self.get_tokenizer().vocab_size, 9)
def test_tokenizer_equivalence_en_de(self):
en_de_tokenizer = MarianTokenizer.from_pretrained(f"{ORG_NAME}opus-mt-en-de")
batch = en_de_tokenizer(["I am a small frog"], return_tensors=None)
self.assertIsInstance(batch, BatchEncoding)
expected = [38, 121, 14, 697, 38848, 0]
self.assertListEqual(expected, batch.input_ids[0])
save_dir = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(save_dir)
contents = [x.name for x in Path(save_dir).glob("*")]
self.assertIn("source.spm", contents)
MarianTokenizer.from_pretrained(save_dir)
def test_outputs_not_longer_than_maxlen(self):
tok = self.get_tokenizer()
batch = tok(
["I am a small frog" * 1000, "I am a small frog"], padding=True, truncation=True, return_tensors="pt"
)
self.assertIsInstance(batch, BatchEncoding)
self.assertEqual(batch.input_ids.shape, (2, 512))
def test_outputs_can_be_shorter(self):
tok = self.get_tokenizer()
batch_smaller = tok(["I am a tiny frog", "I am a small frog"], padding=True, return_tensors="pt")
self.assertIsInstance(batch_smaller, BatchEncoding)
self.assertEqual(batch_smaller.input_ids.shape, (2, 6))
@slow
def test_tokenizer_integration(self):
expected_encoding = {'input_ids': [[43495, 462, 20, 42164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 38999, 6, 8, 464, 132, 1703, 492, 13, 4669, 37867, 13, 7525, 27, 1593, 988, 13, 33972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 12338, 2, 13958, 387, 2, 3629, 6953, 188, 2900, 2, 13958, 8011, 11501, 23, 8460, 4073, 34009, 20, 435, 11439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 37867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 26453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10767, 6, 316, 304, 4239, 3, 0], [148, 15722, 19, 1839, 12, 1350, 13, 22327, 5082, 5418, 47567, 35938, 59, 318, 19552, 108, 2183, 54, 14976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 19088, 3, 0, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100], [36, 6395, 12570, 39147, 11597, 6, 266, 4, 45405, 7296, 3, 0, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: skip
self.tokenizer_integration_test_util(
expected_encoding=expected_encoding,
model_name="Helsinki-NLP/opus-mt-en-de",
revision="1a8c2263da11e68e50938f97e10cd57820bd504c",
decode_kwargs={"use_source_tokenizer": True},
)
def test_tokenizer_integration_separate_vocabs(self):
tokenizer = MarianTokenizer.from_pretrained("hf-internal-testing/test-marian-two-vocabs")
source_text = "Tämä on testi"
target_text = "This is a test"
expected_src_ids = [76, 7, 2047, 2]
expected_target_ids = [69, 12, 11, 940, 2]
src_ids = tokenizer(source_text).input_ids
self.assertListEqual(src_ids, expected_src_ids)
target_ids = tokenizer(text_target=target_text).input_ids
self.assertListEqual(target_ids, expected_target_ids)
decoded = tokenizer.decode(target_ids, skip_special_tokens=True)
self.assertEqual(decoded, target_text)
def test_tokenizer_decode(self):
tokenizer = MarianTokenizer.from_pretrained("Helsinki-NLP/opus-mt-en-es")
source_text = "Hello World"
ids = tokenizer(source_text)["input_ids"]
output_text = tokenizer.decode(ids, skip_special_tokens=True)
self.assertEqual(source_text, output_text)
def test_internal_consistency(self):
self.skipTest("TODO: failing for v5")
| MarianTokenizationTest |
python | catalyst-team__catalyst | catalyst/contrib/losses/recsys.py | {
"start": 10120,
"end": 11901
} | class ____(ListwiseLoss):
"""Weighted Approximate-Rank Pairwise (WARP) loss function.
It has been proposed in `WSABIE\: Scaling Up To Large Vocabulary Image Annotation`_ paper.
.. _WSABIE\: Scaling Up To Large Vocabulary Image Annotation:
https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/37180.pdf
WARP loss randomly sample output labels of a model, until it finds a pair
which it knows are wrongly labelled and will then only apply an update to
these two incorrectly labelled examples.
Adapted from:
https://github.com/gabrieltseng/datascience-projects/blob/master/misc/warp.py
Args:
max_num_trials: Number of attempts allowed to find a violating negative example.
In practice it means that we optimize for ranks 1 to max_num_trials-1.
Example:
.. code-block:: python
import torch
from catalyst.contrib.losses import recsys
outputs = torch.randn(5, 3, requires_grad=True)
targets = torch.randn(5, 3, requires_grad=True)
output = recsys.WARPLoss()(outputs, targets)
output.backward()
"""
def __init__(self, max_num_trials: Optional[int] = None):
super().__init__()
self.max_num_trials = max_num_trials
def forward(self, outputs: torch.Tensor, targets: torch.Tensor) -> torch.Tensor:
"""Forward propagation method for the WARP loss.
Args:
outputs: Iterable of tensors containing predictions for all items.
targets: Iterable of tensors containing true labels for all items.
Returns:
computed loss
"""
self._assert_equal_size(outputs, targets)
return WARP.apply(outputs, targets, self.max_num_trials)
| WARPLoss |
python | google__jax | tests/pallas/tpu_paged_attention_kernel_test.py | {
"start": 6001,
"end": 8863
} | class ____(jtu.JaxTestCase):
@parameterized.product(
dtype=(jnp.float32, jnp.bfloat16),
case=(0, 1, 2, 3),
)
def test_grouped_query_attention(self, dtype: jnp.dtype, case: int):
# generate queries, kv pages, and seq_lens
seq_lens, queries, k_pages, v_pages, expected = _generate_qkv(dtype, case)
jax.debug.print("seq_lens: {seq_lens}", seq_lens=seq_lens)
jax.debug.print("queries: {queries}", queries=queries)
jax.debug.print("k_pages: {k_pages}", k_pages=k_pages)
jax.debug.print("v_pages: {v_pages}", v_pages=v_pages)
jax.debug.print("expected: {expected}", expected=expected)
# calculate grouped query attention
attention = util.grouped_query_attention_reference(
queries, k_pages, v_pages, seq_lens
)
jax.debug.print("attention: {attention}", attention=attention)
# compare the results
atol, rtol = (3e-3, 5e-3) if dtype == jnp.bfloat16 else (2e-4, 2e-4)
self.assertAllClose(attention, expected, atol=atol, rtol=rtol)
def _generate_random_qkv(
seq_lens,
page_size,
max_seq_len,
num_kv_heads,
num_q_heads,
head_dim,
prng_key,
dtype=jnp.float32,
are_kv_quantized=False,
):
assert max_seq_len % page_size == 0
pages_per_sequence = max_seq_len // page_size
batch_size = len(seq_lens)
total_pages = batch_size * pages_per_sequence
k1, k2, k3, k4 = jax.random.split(prng_key, 4)
k_pages = jax.random.normal(
k1, (num_kv_heads, total_pages, page_size, head_dim), dtype=dtype
)
v_pages = jax.random.normal(
k2, (num_kv_heads, total_pages, page_size, head_dim), dtype=dtype
)
if are_kv_quantized:
k_pages = quantization_utils.quantize_to_int8(k_pages)
v_pages = quantization_utils.quantize_to_int8(v_pages)
page_indices = jnp.arange(batch_size * pages_per_sequence, dtype=jnp.int32)
page_indices = jax.random.permutation(k3, page_indices, independent=True)
page_indices = page_indices.reshape(batch_size, pages_per_sequence)
q = jax.random.normal(k4, (batch_size, num_q_heads, head_dim), dtype=dtype)
return q, k_pages, v_pages, page_indices
def _reconstruct_kv(page_indices, pages):
if isinstance(pages, quantization_utils.QuantizedTensor):
pages = quantization_utils.unquantize_from_int8(pages, dtype=jnp.float32)
batch_size = page_indices.shape[0]
num_kv_heads, _, _, head_dim = pages.shape
def per_sequence_page_gather(pages, page_indices):
return jnp.take(pages, page_indices, 1)
gathered = jax.vmap(per_sequence_page_gather, in_axes=(None, 0))(
pages, page_indices
)
return gathered.reshape(batch_size, num_kv_heads, -1, head_dim)
def _megacore_enabled():
return jax.devices()[0].device_kind == "TPU v4" or jtu.is_device_tpu(
version=5, variant="p"
)
@jtu.with_config(jax_numpy_dtype_promotion="standard")
| JaxGroupedQueryAttentionReferenceTest |
python | pytorch__pytorch | test/distributed/test_c10d_nccl.py | {
"start": 9993,
"end": 79806
} | class ____(MultiProcessTestCase):
def _create_process_group_nccl(self, store, opts, device_id=None):
# create nccl processgroup with opts
c10d.init_process_group(
"nccl",
world_size=self.world_size,
rank=self.rank,
store=store,
pg_options=opts,
device_id=device_id,
)
pg = c10d.distributed_c10d._get_default_group()
return pg
def opts(self, high_priority_stream=False):
opts = c10d.ProcessGroupNCCL.Options()
opts.is_high_priority_stream = high_priority_stream
return opts
def setUp(self):
super().setUp()
# These tests are expected to throw SIGABRT(6);
# But if we are in Sandcastle, `skip_but_pass_in_sandcastle` would return 0.
TEST_NAN_ASSERT_RETURN = (
0
if (IS_SANDCASTLE and not (TEST_MULTIGPU and CUDA_12_AND_ABOVE))
else signal.SIGABRT
)
self.special_return_code_checks = {
self.test_nan_assert_float16.__wrapped__: TEST_NAN_ASSERT_RETURN,
self.test_nan_assert_float32.__wrapped__: TEST_NAN_ASSERT_RETURN,
self.test_nan_assert_float64.__wrapped__: TEST_NAN_ASSERT_RETURN,
self.test_nan_assert_bfloat16.__wrapped__: TEST_NAN_ASSERT_RETURN,
self.test_nan_assert_float8_e4m3fn.__wrapped__: TEST_NAN_ASSERT_RETURN,
self.test_nan_assert_float8_e5m2.__wrapped__: TEST_NAN_ASSERT_RETURN,
}
# TORCH_NCCL_BLOCKING_WAIT overrides TORCH_NCCL_ASYNC_ERROR_HANDLING hence tests
# that use TORCH_NCCL_BLOCKING_WAIT will test it as expected.
os.environ["TORCH_NCCL_ASYNC_ERROR_HANDLING"] = "1"
# self.num_gpus = torch.cuda.device_count()
self._spawn_processes()
def tearDown(self):
super().tearDown()
try:
os.remove(self.file_name)
except OSError:
pass
@property
def world_size(self):
return get_required_world_size(self, 2)
@property
def rank_to_GPU(self):
# return rank to GPU map
return init_multigpu_helper(self.world_size, "nccl")
@property
def destroy_pg_upon_exit(self) -> bool:
# This TestCase focuses on creation, destroy and abort of PG's. So it
# does not need auto-destroy upon exit.
return False
@requires_nccl()
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "NCCL test requires 1 GPU")
@skip_if_lt_x_gpu(1)
def test_nccl_dist_backend_error(self):
store = c10d.FileStore(self.file_name, self.world_size)
self._create_process_group_nccl(store, self.opts())
# Both rank 0 and 1 will use the same CUDA device resulting in ncclInvalidUsage
with self.assertRaises(dist.DistBackendError) as cm:
dist.broadcast(torch.tensor([1, 2, 3]).cuda(), 0)
self.assertTrue(isinstance(cm.exception, dist.DistError))
self.assertIsInstance(cm.exception, RuntimeError)
@requires_nccl()
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "NCCL test requires 2+ GPUs")
def test_abort_pg(self):
# Disable ASYNC_ERROR_HANDLING for this test to ensure we can programmatically
# abort the process group.
os.environ["TORCH_NCCL_ASYNC_ERROR_HANDLING"] = "0"
store = c10d.FileStore(self.file_name, self.world_size)
self._create_process_group_nccl(store, self.opts())
device = self.rank_to_GPU[self.rank][0]
t = torch.rand(10, 10, device=device)
# First allreduce to initialize state.
dist.all_reduce(t)
def abortpg():
c10d.distributed_c10d._get_default_group()._get_backend(
torch.device(device)
).abort()
# Initialize DDP to ensure "destroy_process_group" will not call
# ProcessGroupNCCL destructor since DDP holds a reference to process group.
# Run a single iteration of DDP to initialize state.
model = DistributedDataParallel(
torch.nn.Linear(10, 10).to(device), device_ids=[device]
)
model(t).sum().backward()
# Now simulate collective getting stuck and abort gets us unstuck
if self.rank == 0:
dist.all_reduce(t)
# Schedule thread before we get stuck to abort pg.
thread = threading.Thread(target=abortpg)
thread.start()
# We would get stuck here due to d2h if we didn't abort.
t.cpu()
thread.join()
@requires_nccl()
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "NCCL test requires 2+ GPUs")
@parametrize("eager_init", [True, False])
def test_close_pg(self, eager_init: bool):
# Disable ASYNC_ERROR_HANDLING for this test to ensure we can programmatically
# abort the process group.
os.environ["TORCH_NCCL_ASYNC_ERROR_HANDLING"] = "0"
store = c10d.FileStore(self.file_name, self.world_size)
device = torch.device(f"cuda:{self.rank % torch.cuda.device_count()}")
c10d.init_process_group(
"nccl",
world_size=self.world_size,
rank=self.rank,
store=store,
device_id=device if eager_init else None,
)
t = torch.rand(10, 10, device=device)
# First allreduce to initialize state.
dist.all_reduce(t)
# Destroy pg and validate pg is no longer valid
dist.destroy_process_group()
with self.assertRaises(ValueError):
dist.all_reduce(t)
@requires_nccl()
@skip_if_rocm_multiprocess
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "NCCL test requires 2+ GPUs")
def test_restart_pg(self):
# Note: restart test passes steadily only for blocking mode for now.
# TODO: expand this test to non-blocking mode
store = c10d.FileStore(self.file_name, self.world_size)
device = torch.device(f"cuda:{self.rank % torch.cuda.device_count()}")
# initialize pg for the first time
c10d.init_process_group(
"nccl",
world_size=self.world_size,
rank=self.rank,
store=store,
)
t0 = torch.rand(10, 10, device=device)
# First allreduce to lazy initialize default pg
dist.all_reduce(t0)
torch.cuda.synchronize()
# Destroy pg
dist.destroy_process_group()
# we need a new Store for the new PG, achieving it by adding prefix
new_store = c10d.PrefixStore("2nd", store)
# re-initialize pg
c10d.init_process_group(
"nccl",
world_size=self.world_size,
rank=self.rank,
store=new_store,
)
t1 = torch.rand(5, 5, device=device)
dist.all_reduce(t1)
torch.cuda.synchronize()
dist.destroy_process_group()
# validate default pg is no longer valid
with self.assertRaises(ValueError):
dist.all_reduce(t1)
@requires_nccl()
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "NCCL test requires 2+ GPUs")
def test_cuda_event_cache_mthd_race(self):
# This unit test is to test the case when the collective is launched in
# a side thread and the thread dies before the cache has been fully recycled.
# More details can be found in this issue: https://github.com/pytorch/pytorch/issues/143470.
# initiate collectives here
def init_collective_task(t):
dist.all_reduce(t)
dist.all_reduce(t)
dist.all_reduce(t)
os.environ["TORCH_NCCL_CUDA_EVENT_CACHE"] = "1"
store = c10d.FileStore(self.file_name, self.world_size)
self._create_process_group_nccl(store, self.opts())
device = self.rank_to_GPU[self.rank][0]
t = torch.rand(10, 10, device=device)
# First allreduce to initialize state.
dist.all_reduce(t)
dist.all_reduce(t)
dist.all_reduce(t)
side_thread = threading.Thread(target=init_collective_task, args=(t,))
side_thread.start()
side_thread.join()
torch.cuda.synchronize()
# reset ENV
os.environ["TORCH_NCCL_CUDA_EVENT_CACHE"] = "0"
@requires_nccl()
@skip_but_pass_in_sandcastle_if(
# skip for cu126 as well due to https://github.com/pytorch/pytorch/issues/153479
not (TEST_MULTIGPU and CUDA_12_AND_ABOVE),
"NCCL test requires 2+ GPUs and Device side assert could cause unexpected errors in lower versions of CUDA",
)
@parametrize(
"type",
[
torch.float16,
torch.float32,
torch.float64,
torch.bfloat16,
torch.float8_e4m3fn,
torch.float8_e5m2,
],
)
@skip_if_rocm_multiprocess
def test_nan_assert(self, type):
# Expecting a device-side error when NaN is detected
os.environ["TORCH_NCCL_NAN_CHECK"] = "1"
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_nccl(store, self.opts())
backend = pg._get_backend(torch.device("cuda"))
device = self.rank_to_GPU[self.rank][0]
# Cover different buffer sizes
if type == torch.float64:
size = (1024,) # 1K elements
elif type == torch.float32:
size = (1024, 1024) # 1M elements
elif type == torch.float16:
size = (1024, 1024, 1024) # 1G elements
else:
size = (1,) # 1 element
# Note: currently we cannot fill values into a FP8 tensor, thus we
# create the NaN tensor in float32 type and cast it to FP8
if type == torch.float8_e4m3fn or type == torch.float8_e5m2:
init_type = torch.float32
else:
init_type = type
nan_tensor = torch.zeros(*size, dtype=init_type, device=device)
# randomly pick an nan element
index = tuple([random.randrange(size[i]) for i in range(len(size))])
nan_tensor[index] = float("nan")
if init_type != type:
# Now cast to the targeted dtype
nan_tensor = nan_tensor.to(type)
output = torch.empty(self.world_size, *size, dtype=type, device=device)
# confirm enable/disable flag works
backend._set_enable_nan_check(False)
# Note: using all-gather here bc some NCCL/SM version does not support
# FP8 reduction
# temporarily skip due to https://github.com/pytorch/pytorch/issues/153479
# pg._allgather_base(output, nan_tensor)
backend._set_enable_nan_check(True)
try:
pg._allgather_base(output, nan_tensor)
except Exception:
sys.exit(signal.SIGABRT)
dist.destroy_process_group()
# reset env
os.environ["TORCH_NCCL_NAN_CHECK"] = "0"
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nan_rank_filter(self):
# Putting NaN at recv buffer, program should not fail as NaN checker
# should not check on receive buffer
os.environ["TORCH_NCCL_NAN_CHECK"] = "1"
store = c10d.FileStore(self.file_name, self.world_size)
device = torch.device(f"cuda:{self.rank:d}")
c10d.init_process_group(
backend="nccl", store=store, rank=self.rank, world_size=self.world_size
)
t = torch.ones(3, 4, dtype=torch.bfloat16, device=device)
if self.rank != 0:
# Putting NaN at recv buffer
t[1, 1] = float("nan")
# Against broadcast
c10d.broadcast(t, 0)
# Against P2P
if self.rank == 0:
c10d.send(t, 1)
elif self.rank == 1:
c10d.recv(t, 0)
c10d.destroy_process_group()
# reset env
os.environ["TORCH_NCCL_NAN_CHECK"] = "0"
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nan_check(self):
# Not expecting an error, NaN check should not make legit code fail
device = torch.device(f"cuda:{self.rank:d}")
os.environ["TORCH_NCCL_NAN_CHECK"] = "1"
store = c10d.FileStore(self.file_name, self.world_size)
c10d.init_process_group(
backend="nccl", store=store, rank=self.rank, world_size=self.world_size
)
x = torch.ones((10,), device=device) * self.rank
t = torch.ones(3, 4, device=device)
c10d.broadcast(x, src=0)
c10d.all_reduce(t)
c10d.barrier()
c10d.destroy_process_group()
# reset env
os.environ["TORCH_NCCL_NAN_CHECK"] = "0"
def _helper_test_extra_cuda_context_by_nvml(self):
"""
A helper for `test_extra_cuda_context`, if pynvml is available.
pynvml provides python bindings for NVIDIA NVML functionalities.
Here we are interested in: nvmlDeviceGetComputeRunningProcesses
"""
import pynvml
pynvml.nvmlInit()
device = torch.device(f"cuda:{self.rank:d}")
x = torch.empty((1,), device=device)
work = c10d.all_reduce(x, async_op=True)
# Wait for non-0 ranks to garbage collect Work -- this is the latest
# point where extra CUDA context can be created
if self.rank == 0:
time.sleep(5)
del work
handle = pynvml.nvmlDeviceGetHandleByIndex(self.rank)
processes = pynvml.nvmlDeviceGetComputeRunningProcesses(handle)
nprocs = len(processes)
# A barrier for non-0 ranks
c10d.all_reduce(x)
torch.cuda.synchronize(device)
c10d.destroy_process_group()
self.assertLessEqual(
nprocs,
1,
f"Found {nprocs} processes creating contexts on {device}, expecting 1 at most",
)
def _helper_test_extra_cuda_context_by_memory(self):
"""
A helper for `test_extra_cuda_context`, if pynvml is NOT available.
If extra context is created, it would manifest into device 0's memory usage.
"""
device = torch.device(f"cuda:{self.rank:d}")
x = torch.empty((1,), device=device)
# Rank 0 takes a snapshot before collective -- this snapshot should have
# included rank 0's own context.
if self.rank == 0:
free, total = torch.cuda.mem_get_info(device)
used_before = float(total - free)
work = c10d.all_reduce(x, async_op=True)
# Wait for non-0 ranks to garbage collect Work -- this is the latest
# point where extra CUDA context can be created
if self.rank == 0:
time.sleep(5)
free, total = torch.cuda.mem_get_info(device)
used_after = float(total - free)
del work
# A barrier for non-0 ranks
c10d.all_reduce(x)
torch.cuda.synchronize(device)
c10d.destroy_process_group()
if self.rank == 0:
# If non-0 rank creates a context on device 0, this assert would
# fail because one context takes about 1 GB -- much more than the
# tensor size created in this test.
self.assertTrue(
# Bump the heuristic from 1.5 to 1.7 due to
# https://github.com/pytorch/pytorch/issues/153122
used_after < used_before * 1.7,
f"{device} used {used_after} bytes after collective, "
f"70% more than the status before ({used_before} bytes). "
f"Extra CUDA context may have been created.",
)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_extra_cuda_context(self):
# Check if non-0 ranks would create extra CUDA context on device 0
store = c10d.FileStore(self.file_name, self.world_size)
device = torch.device(f"cuda:{self.rank:d}")
c10d.init_process_group(
backend="nccl",
store=store,
rank=self.rank,
world_size=self.world_size,
device_id=device,
)
try:
self._helper_test_extra_cuda_context_by_nvml()
except ModuleNotFoundError:
self._helper_test_extra_cuda_context_by_memory()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_extra_cuda_context_sync_ops(self):
# Loop a bunch of sync ops and see if any of them creates extra context.
# Requires nvml to check number of processes resident on a device.
try:
import pynvml
pynvml.nvmlInit()
except Exception:
self.skipTest("pynvml not available")
# Check if non-0 ranks would create extra CUDA context on device 0
store = c10d.FileStore(self.file_name, self.world_size)
device = torch.device(f"cuda:{self.rank:d}")
c10d.init_process_group(
backend="nccl",
store=store,
rank=self.rank,
world_size=self.world_size,
device_id=device,
)
x = torch.empty((1,), device=device)
y = torch.empty((self.world_size,), device=device)
c10d.all_reduce(x)
c10d.reduce(x, dst=0)
c10d.broadcast(x, src=0)
c10d.all_gather_into_tensor(y, x)
c10d.reduce_scatter_tensor(x, y)
c10d.barrier()
# Wait a bit for remote processes to touch my device
if self.rank == 0:
time.sleep(5)
handle = pynvml.nvmlDeviceGetHandleByIndex(self.rank)
processes = pynvml.nvmlDeviceGetComputeRunningProcesses(handle)
nprocs = len(processes)
# Don't exit till rank 0 is done with the nvml detection
c10d.barrier()
c10d.destroy_process_group()
self.assertLessEqual(
nprocs,
1,
f"Found {nprocs} processes creating contexts on {device}, expecting 1 at most",
)
@requires_nccl()
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "NCCL test requires 2+ GPUs")
def test_destruct_before_terminate_pg(self):
# Disable ASYNC_ERROR_HANDLING for this test to ensure we can programmatically
# abort the process group.
os.environ["TORCH_NCCL_ASYNC_ERROR_HANDLING"] = "0"
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_nccl(store, self.opts())
device = self.rank_to_GPU[self.rank][0]
t = torch.rand(10, 10, device=device)
# First allreduce to initialize state.
pg.allreduce(t)
# force destruction before terminating comms, destructor would terminate comms
del pg
@requires_nccl()
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "NCCL test requires 2+ GPUs")
def test_abort_in_destroy_pg(self):
# Disable ASYNC_ERROR_HANDLING for this test to ensure we can programmatically
# abort the process group.
os.environ["TORCH_NCCL_ASYNC_ERROR_HANDLING"] = "0"
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_nccl(store, self.opts())
device = self.rank_to_GPU[self.rank][0]
t = torch.rand(10, 10, device=device)
# First allreduce to initialize state.
pg.allreduce(t)
# Destroy pg and validate pg is NOT in working condition since
# we have shutdown comms
dist.destroy_process_group()
with self.assertRaises(dist.DistBackendError):
pg.allreduce([t])
@requires_nccl()
@skip_but_pass_in_sandcastle_if(
torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs"
)
def test_abort_in_destroy_multi_pgs(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_nccl(store, self.opts())
device = self.rank_to_GPU[self.rank][0]
t = torch.rand(10, 10, device=device)
# First allreduce to initialize default PG's communicator.
pg.allreduce(t).wait()
new_pg1 = c10d.new_group([0, 1])
new_pg2 = c10d.new_group([0, 1])
t1 = torch.rand(10, 10, device=device)
t2 = torch.rand(10, 10, device=device)
new_pg1.allreduce(t1).wait()
new_pg2.allreduce(t2).wait()
backend = pg._get_backend(torch.device(device))
# default PG's backend should have a split count of 0 because
# it's not eager initialized
self.assertEqual(backend.comm_split_count(), 0)
# shutdown all NCCL PGs in one shot
dist.destroy_process_group()
@requires_nccl()
@skip_but_pass_in_sandcastle_if(
torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs"
)
def test_abort_in_destroy_mixed_empty_pgs(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_nccl(store, self.opts())
device = self.rank_to_GPU[self.rank][0]
t = torch.rand(10, 10, device=device)
# First allreduce to initialize default PG's communicator.
pg.allreduce(t).wait()
# PG1 is an PG without comms initialized, since we don't call collective on it
new_pg1 = c10d.new_group([0, 1]) # noqa: F841
new_pg2 = c10d.new_group([0, 1])
t2 = torch.rand(10, 10, device=device)
new_pg2.allreduce(t2).wait()
backend = pg._get_backend(torch.device(device))
# default PG's backend should have a split count of 0
self.assertEqual(backend.comm_split_count(), 0)
# shutdown all NCCL PGs in one shot
dist.destroy_process_group()
@requires_nccl()
@skip_but_pass_in_sandcastle_if(
torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs"
)
def test_file_store_check(self):
os.environ["TORCH_NCCL_ASYNC_ERROR_HANDLING"] = "0"
os.environ["TORCH_NCCL_ENABLE_MONITORING"] = "0"
# FileStore check() would be executed
os.environ["TORCH_NCCL_DUMP_ON_TIMEOUT"] = "1"
os.environ["TORCH_NCCL_HEARTBEAT_TIMEOUT_SEC"] = "0"
# self.file_name is created using "delete=False"
# e.g., self.file_name = tempfile.NamedTemporaryFile(delete=False).name
store = dist.FileStore(self.file_name, self.world_size)
dist.init_process_group(
backend="nccl", rank=self.rank, world_size=self.world_size, store=store
)
pg = dist.distributed_c10d._get_default_group()
self.assertEqual(pg.rank(), self.rank)
self.assertEqual(pg.size(), self.world_size)
# give enough time for check() to be executed multiple times
time.sleep(2)
dist.destroy_process_group()
def _check_nccl_timeout(self, expected_timeout):
pg = dist.distributed_c10d._get_default_group()
options = pg._get_backend(torch.device(f"cuda:{self.rank}")).options
self.assertEqual(options._timeout, expected_timeout)
@requires_nccl()
@skip_but_pass_in_sandcastle_if(not TEST_CUDA, "No GPUs available, skipping test")
def test_init_process_group_nccl_timeout(self):
# nccl is handled 'specially' inside init_process_group and its options class is different from the options
# used by the other PG's. There are specific edge cases for nccl that need to be tested.
store = c10d.FileStore(self.file_name, self.world_size)
base_opts = dict(
backend="nccl", store=store, rank=self.rank, world_size=self.world_size
)
# test the default value coming from the `init_process_group` kwarg default
dist.init_process_group(**base_opts)
self._check_nccl_timeout(torch.distributed.constants.default_pg_nccl_timeout)
dist.destroy_process_group()
# test that `kwarg` timeout takes effect
new_timeout = timedelta(seconds=123)
dist.init_process_group(**base_opts, timeout=new_timeout)
self._check_nccl_timeout(new_timeout)
dist.destroy_process_group()
# test that timeout value provided via `pg_options` kwarg is ignored and issues warning,
# 'timeout' kwarg (or its kwdefault) taking precedence
opts = dist.ProcessGroupNCCL.Options()
opts._timeout = timedelta(seconds=123)
with warnings.catch_warnings(record=True):
dist.init_process_group(**base_opts, pg_options=opts)
# TODO(whc) i verified that we are indeed emitting this warning, and i can't figure out why i can't catch it.
# self.assertEqual(len(w), 1)
# self.assertTrue("pg_options._timeout was specified" in str(w[-1].message))
self._check_nccl_timeout(torch.distributed.constants.default_pg_nccl_timeout)
dist.destroy_process_group()
# test that timeout value provided via `pg_options` kwarg is ignored and issues warning,
# 'timeout' kwarg taking precedence
opts = dist.ProcessGroupNCCL.Options()
opts._timeout = timedelta(seconds=123)
dist.init_process_group(
**base_opts, pg_options=opts, timeout=timedelta(seconds=1240)
)
self._check_nccl_timeout(timedelta(seconds=1240))
dist.destroy_process_group()
@requires_nccl()
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "NCCL test requires 2+ GPUs")
@parametrize("backend", [None, "nccl"])
def test_set_nccl_pg_timeout(self, backend):
store = c10d.FileStore(self.file_name, self.world_size)
opts = dict(
backend=backend,
store=store,
rank=self.rank,
world_size=self.world_size,
timeout=timedelta(seconds=123),
)
dist.init_process_group(**opts)
pg = dist.distributed_c10d._get_default_group()
pg.allreduce(torch.rand(10).cuda(self.rank))
self._check_nccl_timeout(timedelta(seconds=123))
pg._get_backend(torch.device(f"cuda:{self.rank}"))._set_default_timeout(
timedelta(seconds=23)
)
self._check_nccl_timeout(timedelta(seconds=23))
pg.allreduce(torch.rand(10).cuda(self.rank))
c10d.distributed_c10d._set_pg_timeout(timedelta(seconds=252), pg)
self._check_nccl_timeout(timedelta(seconds=252))
@requires_nccl()
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "NCCL test requires 2+ GPUs")
@parametrize("backend", [None, "nccl"])
def test_extend_nccl_pg_timeout(self, backend):
torch.cuda.set_device(self.rank)
store = c10d.FileStore(self.file_name, self.world_size)
opts = dict(
backend=backend,
store=store,
rank=self.rank,
world_size=self.world_size,
timeout=timedelta(seconds=123),
)
dist.init_process_group(**opts)
pg = dist.distributed_c10d._get_default_group()
bankend = pg._get_backend(torch.device(f"cuda:{self.rank}"))
w = pg.allreduce(torch.rand(10).cuda(self.rank))
self.assertTrue(bankend._verify_work_timeout(w, timedelta(seconds=123)))
w.wait()
bankend._set_default_timeout(timedelta(seconds=3))
if self.rank == 0:
# Ideally we want to sleep for a very long time, but this is not
# feasible in unit test. So this is only a very tiny case.
time.sleep(5)
pg.allreduce(torch.rand(10).cuda(self.rank))
time.sleep(5)
pg.allreduce(torch.rand(5).cuda(self.rank))
w = pg.allreduce(torch.rand(10).cuda(self.rank))
self.assertTrue(bankend._verify_work_timeout(w, timedelta(seconds=3)))
w.wait()
else:
dist.distributed_c10d._add_ephemeral_timeout_for_all_pgs(
timedelta(seconds=10)
)
w1 = pg.allreduce(torch.rand(10).cuda(self.rank))
w2 = pg.allreduce(torch.rand(5).cuda(self.rank))
self.assertTrue(bankend._verify_work_timeout(w1, timedelta(seconds=13)))
self.assertTrue(bankend._verify_work_timeout(w2, timedelta(seconds=13)))
w1.wait()
dist.distributed_c10d._add_ephemeral_timeout_for_all_pgs(
timedelta(seconds=5)
)
# Since we are not block wait so use a sync here to leave enough time
# for watchdog to reset first timeout extension.
torch.cuda.synchronize(torch.device(f"cuda:{self.rank}"))
w = pg.allreduce(torch.rand(10).cuda(self.rank))
self.assertTrue(bankend._verify_work_timeout(w, timedelta(seconds=8)))
w.wait()
@requires_nccl_version((2, 18), "Need NCCL 2.18+ for ncclCommSplit")
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "NCCL test requires 2+ GPUs")
@parametrize("eager_init", [True, False])
def test_new_group(self, eager_init: bool):
# Test the optimization of new groups that contain all world
# ranks use the "transparent" `ncclCommSplit` optimization.
store = c10d.FileStore(self.file_name, self.world_size)
device = torch.device(f"cuda:{self.rank % torch.cuda.device_count()}")
c10d.init_process_group(
"nccl",
world_size=self.world_size,
rank=self.rank,
store=store,
device_id=device if eager_init else None,
)
ng = c10d.new_group()
tensor = torch.tensor([self.rank], device=device)
dist.broadcast(tensor, 0)
dist.broadcast(tensor, 0, group=ng)
dist.destroy_process_group()
@requires_nccl_version((2, 18), "Need NCCL 2.18+ for ncclCommSplit")
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "NCCL test requires 2+ GPUs")
@skip_but_pass_in_sandcastle_if(
torch.cuda.nccl.version()[-1] == "x", "NCCL test not for NCCLX"
)
def test_comm_split_subgroup(self):
# Test `ncclCommSplit` for smaller subgroups of the world when
# we've passed a specific device_id to init_process_group.
store = c10d.FileStore(self.file_name, self.world_size)
device = torch.device(f"cuda:{self.rank}")
pg = self._create_process_group_nccl(store, self.opts(), device_id=device)
backend = pg._get_backend(torch.device(device))
tensor = torch.full((1,), self.rank).cuda(device)
original_tensor = tensor.clone()
ng = c10d.new_group([0])
# comm split happens eagerly since device_id is passed to init_process_group.
self.assertEqual(backend.comm_split_count(), 1)
if self.rank == 0:
dist.broadcast(tensor, 0, group=ng)
# no additional comm split happens after a collective.
self.assertEqual(backend.comm_split_count(), 1)
self.assertEqual(tensor, original_tensor)
dist.destroy_process_group()
@requires_nccl_version((2, 18), "Need NCCL 2.18+ for ncclCommSplit")
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "NCCL test requires 2+ GPUs")
def test_comm_eager_init_subgroup(self):
# Test `ncclCommSplit` for smaller subgroups of the world when
# we've passed a specific device_id to init_process_group.
store = c10d.FileStore(self.file_name, self.world_size)
device = torch.device(f"cuda:{self.rank}")
# default PG comm is not initialized yet
pg = self._create_process_group_nccl(store, self.opts())
backend = pg._get_backend(torch.device(device))
self.assertEqual(backend._is_initialized(), False)
# create a subgroup eagerly
new_group = c10d.new_group([0, 1], device_id=device)
tensor = torch.full((1,), self.rank).cuda(device)
dist.broadcast(tensor, 0, group=new_group)
# the default group should stay lazy
self.assertEqual(backend._is_initialized(), False)
torch.cuda.synchronize()
dist.destroy_process_group()
@requires_nccl_version((2, 18), "Need NCCL 2.18+ for ncclCommSplit")
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "NCCL test requires 2+ GPUs")
def test_comm_split_group(self):
# Test `ncclCommSplit` for smaller subgroups of the world when
# we've passed a specific device_id to init_process_group.
store = c10d.FileStore(self.file_name, self.world_size)
device = torch.device(f"cuda:{self.rank}")
pg = self._create_process_group_nccl(store, self.opts(), device_id=device)
backend = pg._get_backend(torch.device(device))
tensor = torch.full((1,), self.rank).cuda(device)
# Create subgroup between ranks 0, 1
subg_ranks = [0, 1]
ng1 = c10d.split_group(pg, [subg_ranks])
backend1 = ng1._get_backend(torch.device(device))
# check basic options are the same between parent and child
self.assertEqual(backend.options._timeout, backend1.options._timeout)
self.assertEqual(
backend.options.is_high_priority_stream,
backend1.options.is_high_priority_stream,
)
self.assertEqual(ng1.group_desc, "default_pg:split:0")
# comm split happens eagerly since device_id is passed to init_process_group.
self.assertEqual(backend.comm_split_count(), 1)
# dist.get_process_group_ranks returns the global ranks in the subgroup.
self.assertEqual(
dist.get_process_group_ranks(ng1),
subg_ranks if self.rank in subg_ranks else [],
)
# is part of ng1; otherwise, -1
if dist.get_rank(ng1) >= 0:
dist.broadcast(tensor, dist.get_global_rank(ng1, 0), group=ng1)
self.assertEqual(tensor, torch.full((1,), 0))
ng2 = c10d.split_group(pg, [subg_ranks])
self.assertEqual(ng2.group_desc, "default_pg:split:1")
self.assertEqual(backend.comm_split_count(), 2)
dist.destroy_process_group()
@requires_nccl_version((2, 18), "Need NCCL 2.18+ for ncclCommSplit")
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "NCCL test requires 2+ GPUs")
def test_comm_split_group_mixed_backend(self):
# Test `ncclCommSplit` for smaller subgroups of the world when
# we've passed a specific device_id to init_process_group.
store = c10d.FileStore(self.file_name, self.world_size)
device = torch.device(f"cuda:{self.rank}")
# pg = self._create_process_group_nccl(store, self.opts(), device_id=device)
# create nccl processgroup with opts
c10d.init_process_group(
"cpu:gloo,cuda:nccl",
world_size=self.world_size,
rank=self.rank,
store=store,
pg_options=self.opts(),
device_id=device,
)
pg = c10d.distributed_c10d._get_default_group()
backend = pg._get_backend(torch.device(device))
cuda_tensor = torch.full((1,), self.rank).cuda(device)
cpu_tensor = torch.full((1,), self.rank)
# Create subgroup between ranks 0, 1
subg_ranks = [0, 1]
ng1 = c10d.split_group(pg, [subg_ranks])
backend1 = ng1._get_backend(torch.device(device))
# check basic options are the same between parent and child
self.assertEqual(backend.options._timeout, backend1.options._timeout)
self.assertEqual(
backend.options.is_high_priority_stream,
backend1.options.is_high_priority_stream,
)
self.assertEqual(ng1.group_desc, "default_pg:split:0")
# comm split happens eagerly since device_id is passed to init_process_group.
self.assertEqual(backend.comm_split_count(), 1)
# dist.get_process_group_ranks returns the global ranks in the subgroup.
self.assertEqual(
dist.get_process_group_ranks(ng1),
subg_ranks if self.rank in subg_ranks else [],
)
# is part of ng1; otherwise, -1
if dist.get_rank(ng1) >= 0:
dist.broadcast(cuda_tensor, dist.get_global_rank(ng1, 0), group=ng1)
self.assertEqual(cuda_tensor, torch.full((1,), 0))
dist.broadcast(cpu_tensor, dist.get_global_rank(ng1, 0), group=ng1)
self.assertEqual(cpu_tensor, torch.full((1,), 0))
ng2 = c10d.split_group(pg, [subg_ranks])
self.assertEqual(ng2.group_desc, "default_pg:split:1")
self.assertEqual(backend.comm_split_count(), 2)
dist.destroy_process_group()
@requires_nccl_version((2, 18), "Need NCCL 2.18+ for ncclCommSplit")
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "NCCL test requires 2+ GPUs")
def test_non_blocking_init(self):
# Test creating a pg using nonblocking mode but not eagerly
os.environ["TORCH_NCCL_USE_COMM_NONBLOCKING"] = "1"
os.environ["TORCH_NCCL_NONBLOCKING_TIMEOUT"] = "100"
store = c10d.FileStore(self.file_name, self.world_size)
device = self.rank_to_GPU[self.rank][0]
pg = self._create_process_group_nccl(store, self.opts())
backend = pg._get_backend(torch.device(device))
self.assertEqual(backend.comm_split_count(), 0)
reduce_tensor = torch.rand(10, 10, device=device)
# Run an allreduce, which should trigger a comm init for pg
pg.allreduce(reduce_tensor).wait()
new_pg = c10d.new_group()
# even after pg's collective call, new pg's comm is not initialized until its own collectcive calls
self.assertEqual(backend.comm_split_count(), 0)
broadcast_tensor = torch.tensor([self.rank]).cuda(device)
new_pg.broadcast(broadcast_tensor, 0).wait()
self.assertEqual(backend.comm_split_count(), 0)
dist.destroy_process_group()
@requires_nccl_version((2, 18), "Need NCCL 2.18+ for ncclCommSplit")
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "NCCL test requires 2+ GPUs")
def test_non_blocking_with_eager_init(self):
# Test creating a pg eagerly with nonblocking mode when
# we've passed a specific device_id to init_process_group.
os.environ["TORCH_NCCL_USE_COMM_NONBLOCKING"] = "1"
os.environ["TORCH_NCCL_NONBLOCKING_TIMEOUT"] = "100"
store = c10d.FileStore(self.file_name, self.world_size)
device = torch.device(f"cuda:{self.rank}")
# bound device to trigger eager init mode
pg = self._create_process_group_nccl(store, self.opts(), device_id=device)
backend = pg._get_backend(torch.device(device))
self.assertEqual(backend.comm_split_count(), 0)
reduce_tensor = torch.rand(10, 10, device=device)
# Run an allreduce, comm should have already started initilizaing,
# but allreduce is issued to CUDA STREAM only after the initialization is a success
pg.allreduce(reduce_tensor).wait()
new_pg = c10d.new_group()
# new pg's comm is initialized eagerly
self.assertEqual(backend.comm_split_count(), 1)
broadcast_tensor = torch.tensor([self.rank]).cuda(device)
new_pg.broadcast(broadcast_tensor, 0).wait()
self.assertEqual(backend.comm_split_count(), 1)
dist.destroy_process_group()
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "NCCL test requires 2+ GPUs")
def test_non_blocking_p2p(self):
# Test creating a pg using nonblocking mode but not eagerly
os.environ["TORCH_NCCL_USE_COMM_NONBLOCKING"] = "1"
os.environ["TORCH_NCCL_NONBLOCKING_TIMEOUT"] = "100"
store = c10d.FileStore(self.file_name, self.world_size)
device = self.rank_to_GPU[self.rank][0]
self._create_process_group_nccl(store, self.opts())
# Generate the same tensor
send_tensor = torch.ones(10, 10, device=device)
if self.rank == 0:
dist.send(send_tensor, 1)
if self.rank == 1:
recv_tensor = torch.rand(10, 10, device=device)
dist.recv(recv_tensor, 0)
self.assertEqual(send_tensor, recv_tensor)
dist.destroy_process_group()
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "NCCL test requires 2+ GPUs")
@parametrize("eager_init", [True, False])
def test_subgroup_p2p(self, eager_init: bool):
store = c10d.FileStore(self.file_name, self.world_size)
device = torch.device(f"cuda:{self.rank % torch.cuda.device_count()}")
c10d.init_process_group(
"nccl",
world_size=self.world_size,
rank=self.rank,
store=store,
device_id=device if eager_init else None,
)
send_tensor = torch.ones(10, 10, device=device)
group = dist.new_group()
if self.rank == 0:
dist.send(send_tensor, 1, group=group)
if self.rank == 1:
recv_tensor = torch.rand(10, 10, device=device)
dist.recv(recv_tensor, 0, group=group)
self.assertEqual(send_tensor, recv_tensor)
dist.destroy_process_group()
@requires_nccl()
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "NCCL test requires 2+ GPUs")
def test_get_uid(self):
store = c10d.FileStore(self.file_name, self.world_size)
device = torch.device(f"cuda:{self.rank}")
pg = self._create_process_group_nccl(store, self.opts(), device_id=device)
from torch.distributed.distributed_c10d import _get_process_group_uid
self.assertEqual(_get_process_group_uid(pg), 0)
pg_2 = c10d.new_group([0, 1])
self.assertEqual(_get_process_group_uid(pg_2), 1)
@requires_nccl()
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "NCCL test requires 2+ GPUs")
def test_set_process_group_desc(self):
store = c10d.FileStore(self.file_name, self.world_size)
device = torch.device(f"cuda:{self.rank}")
pg_default = self._create_process_group_nccl(
store, self.opts(), device_id=device
)
self.assertEqual(pg_default.group_desc, "default_pg")
pg_1 = c10d.new_group([0, 1], group_desc="test_purpose")
self.assertEqual(pg_1.group_desc, "test_purpose")
pg_2 = c10d.new_group([0, 1])
self.assertEqual(pg_2.group_desc, "undefined")
@requires_nccl_shrink()
@requires_world_size(2)
def test_shrink_group_basic(self):
"""Test basic shrink_group functionality."""
self._perform_shrink_test([1], "Basic shrink test")
@requires_nccl_shrink()
@requires_world_size(2)
def test_shrink_group_validation(self):
"""Test input validation in shrink_group."""
device, pg = self._setup_shrink_test("validation")
def _test_invalid_input(ranks, description, expected_exception):
"""Helper to test invalid inputs."""
try:
c10d.shrink_group(ranks)
self.fail(f"Expected {expected_exception.__name__} for {description}")
except expected_exception:
log_test_validation(self.rank, f"✓ {description}")
except Exception:
if expected_exception is Exception: # Accept any exception
log_test_validation(self.rank, f"✓ {description}")
else:
raise
# Test cases
_test_invalid_input([], "Empty exclusion list", ValueError)
if self.world_size > 1:
_test_invalid_input([0, 0, 1], "Duplicate ranks", Exception)
_test_invalid_input([self.world_size + 1], "Out of bounds rank", Exception)
log_test_success(self.rank, "All validation tests passed")
dist.destroy_process_group()
@requires_nccl_shrink()
@requires_world_size(2)
def test_shrink_group_backend_properties(self):
"""Test that backend properties are preserved after shrinking."""
test_name = "Backend Properties Test"
ranks_to_exclude = [0]
# Reuse _setup_shrink_test for complete setup (device, environment, and process group)
device, pg = self._setup_shrink_test("backend_properties")
# Follow _perform_shrink_test pattern from here
log_test_info(self.rank, f"{test_name} (world_size={self.world_size})")
is_excluded = self.rank in ranks_to_exclude
log_test_info(
self.rank,
f"Excluding ranks: {ranks_to_exclude}, am_excluded: {is_excluded}",
)
# Store original backend property values (not references) before shrinking
original_timeout = None
original_high_priority = None
if not is_excluded:
original_backend = pg._get_backend(device)
original_timeout = original_backend.options._timeout
original_high_priority = original_backend.options.is_high_priority_stream
log_test_info(
self.rank,
f"Storing original backend properties: timeout={original_timeout}, high_priority={original_high_priority}",
)
if is_excluded:
log_test_info(
self.rank,
f"Excluded rank {self.rank} - setup complete, skipping shrink operation",
)
dist.destroy_process_group() # hang without it
return
# Only non-excluded ranks proceed with shrink (same as _perform_shrink_test)
log_test_info(self.rank, "Non-excluded rank calling shrink_group")
shrunk_pg = c10d.shrink_group(ranks_to_exclude)
# Reuse _validate_shrunk_group helper (same as _perform_shrink_test)
expected_size = self.world_size - len(ranks_to_exclude)
_ = self._validate_shrunk_group(shrunk_pg, expected_size, test_name)
# Add custom backend properties validation
new_backend = shrunk_pg._get_backend(device)
log_test_info(self.rank, "Validating backend properties are preserved")
new_timeout = new_backend.options._timeout
new_high_priority = new_backend.options.is_high_priority_stream
log_test_info(
self.rank,
f"Timeout comparison - original: {original_timeout}, new: {new_timeout}",
)
self.assertEqual(
original_timeout, new_timeout, f"{test_name}: timeout not preserved"
)
log_test_info(
self.rank,
f"High priority stream comparison - original: {original_high_priority}, new: {new_high_priority}",
)
self.assertEqual(
original_high_priority,
new_high_priority,
f"{test_name}: high_priority_stream not preserved",
)
log_test_validation(
self.rank, f"{test_name}: Backend properties preserved successfully"
)
log_test_success(
self.rank, f"{test_name} successful (shrink + backend validation)"
)
# Cleanup (same as _perform_shrink_test)
dist.destroy_process_group()
@requires_nccl_shrink()
@requires_world_size(2)
def test_shrink_group_multiple_comms(self):
"""Test shrink_group with multiple communicators and subgroup invalidation."""
device, pg = self._setup_shrink_test("multiple_comms")
# Create subgroup [0, 1] and test shrinking it
subgroup = c10d.new_group([0, 1])
if self.rank <= 1:
# Shrink subgroup: exclude rank 1
if self.rank == 0: # Only rank 0 remains
shrunk_subgroup = c10d.shrink_group([1], group=subgroup)
self.assertEqual(shrunk_subgroup.size(), 1)
# Test communication on shrunk subgroup
tensor = torch.full((1,), self.rank).cuda(device)
c10d.all_reduce(tensor, group=shrunk_subgroup)
self.assertEqual(tensor.item(), 0) # Only rank 0
log_test_success(self.rank, "Subgroup shrinking successful")
dist.barrier() # Sync before default group test
# Shrink default group: exclude last rank
ranks_to_exclude = [self.world_size - 1]
if self.rank not in ranks_to_exclude:
shrunk_default = c10d.shrink_group(ranks_to_exclude)
expected_size = self.world_size - 1
self.assertEqual(shrunk_default.size(), expected_size)
# Test collective on shrunk default group
tensor = torch.full((1,), self.rank).cuda(device)
c10d.all_reduce(tensor, group=shrunk_default)
expected_sum = sum(
range(self.world_size - 1)
) # 0 + 1 + ... + (world_size-2)
self.assertEqual(tensor.item(), expected_sum)
log_test_success(self.rank, "Default group shrinking successful")
# Note: After shrinking default group, the old subgroup is invalid
# due to global rank reassignment
dist.destroy_process_group()
def _test_shrink_group_with_flag(self, shrink_flag, flag_name, rank_to_exclude):
"""Helper method to test shrink_group with a specific flag."""
if self.world_size < 2:
log_test_info(self.rank, f"Skipping (needs ≥2 GPUs, got {self.world_size})")
return
ranks_to_exclude = [rank_to_exclude]
log_test_info(self.rank, f"Using {flag_name} flag (value: {shrink_flag})")
if flag_name == "NCCL_SHRINK_ABORT":
log_test_info(
self.rank,
"ABORT flag will terminate ongoing operations before shrinking",
)
self._perform_shrink_test(
ranks_to_exclude, f"{flag_name} flag test", shrink_flags=shrink_flag
)
@requires_nccl_shrink()
@requires_world_size(2)
def test_shrink_group_flags(self):
"""Test shrink_group with different shrink flags."""
# Test ABORT flags
log_test_info(self.rank, "Testing NCCL_SHRINK_ABORT flag")
self._test_shrink_group_with_flag(NCCL_SHRINK_ABORT, "NCCL_SHRINK_ABORT", 1)
@requires_nccl_shrink()
@requires_world_size(2)
def test_shrink_group_nccl_config(self):
"""Verify that passing NCCL config via pg_options influences the shrunk group's backend options."""
device, pg = self._setup_shrink_test("config")
if self.rank == self.world_size - 1:
# excluded rank should not call shrink_group
dist.destroy_process_group()
return
# Prepare pg_options with NCCL config overrides
# Capture parent's current backend options to ensure we can prove override vs inherit
parent_backend = pg._get_backend(torch.device("cuda"))
parent_hp = parent_backend.options.is_high_priority_stream
parent_blocking = parent_backend.options.config.blocking
# Choose overrides that differ from the parent (flip where possible)
override_hp = not parent_hp
if parent_blocking in (0, 1):
override_blocking = 1 - parent_blocking
else:
# If undefined or unexpected, set to 1 which is a concrete value
override_blocking = 1
opts = c10d.ProcessGroupNCCL.Options()
opts.is_high_priority_stream = override_hp
opts.config.blocking = override_blocking
shrunk_pg = c10d.shrink_group([self.world_size - 1], pg_options=opts)
# Validate backend options propagated
backend = shrunk_pg._get_backend(torch.device("cuda"))
# is_high_priority_stream should exactly match our override and differ from parent
self.assertEqual(backend.options.is_high_priority_stream, override_hp)
self.assertNotEqual(backend.options.is_high_priority_stream, parent_hp)
# config is a struct; check representative field and difference from parent when meaningful
self.assertEqual(backend.options.config.blocking, override_blocking)
if parent_blocking in (0, 1):
self.assertNotEqual(backend.options.config.blocking, parent_blocking)
dist.destroy_process_group()
@requires_nccl_shrink()
@requires_world_size(2)
def test_shrink_group_performance(self):
"""Test shrink_group performance and regression detection."""
import time
ranks_to_exclude = self._get_default_ranks_to_exclude()
is_excluded = self.rank in ranks_to_exclude
if not ranks_to_exclude:
log_test_info(self.rank, "Skipping performance test (world_size=1)")
return
log_test_info(self.rank, f"Performance test with {self.world_size} processes")
device, pg = self._setup_shrink_test("performance")
if not is_excluded:
log_test_info(self.rank, "Measuring shrink_group performance")
start_time = time.time()
shrunk_pg = c10d.shrink_group(ranks_to_exclude)
end_time = time.time()
elapsed_time = end_time - start_time
log_test_info(self.rank, f"shrink_group: {elapsed_time:.3f}s")
# Regression check: should complete within reasonable time
self.assertLess(
elapsed_time,
30.0,
f"shrink_group took {elapsed_time:.3f}s, possible regression",
)
# Test collective performance
expected_size = self.world_size - len(ranks_to_exclude)
self._validate_shrunk_group(shrunk_pg, expected_size, "performance")
collective_start = time.time()
_ = self._test_collective_on_shrunk_group(
shrunk_pg, device, ranks_to_exclude, "performance"
)
collective_time = time.time() - collective_start
log_test_info(self.rank, f"all_reduce: {collective_time:.3f}s")
log_test_success(self.rank, "Performance test passed")
else:
log_test_info(self.rank, "Excluded rank - waiting")
dist.destroy_process_group()
@requires_nccl_shrink()
@requires_world_size(4)
def test_shrink_group_multiple_exclusions(self):
"""Test shrink_group with multiple ranks excluded at once."""
# Scale exclusions with world size
ranks_to_exclude = list(range(2, self.world_size, 2)) # Every other rank from 2
self._perform_shrink_test(ranks_to_exclude, "Multiple exclusions test")
@requires_nccl_shrink()
@requires_world_size(3)
def test_shrink_group_multiple_iterations(self):
"""Test multiple shrink operations in sequence."""
log_test_info(
self.rank,
f"Starting test_shrink_group_multiple_iterations with world_size={self.world_size}",
)
store = c10d.FileStore(self.file_name, self.world_size)
device = torch.device(f"cuda:{self.rank}")
_ = self._create_process_group_nccl(store, self.opts(), device_id=device)
# Track current effective world size throughout shrinking operations
current_world_size = self.world_size
log_test_info(self.rank, f"Initial world_size: {current_world_size}")
# First shrinking: exclude the last rank(s)
first_exclusion = [self.world_size - 1]
if self.world_size >= 6:
first_exclusion.append(
self.world_size - 2
) # Exclude last two ranks for larger sizes
log_test_info(self.rank, f"First shrinking: excluding ranks {first_exclusion}")
if self.rank not in first_exclusion:
# Only non-excluded ranks should call shrink_group
first_pg = c10d.shrink_group(first_exclusion)
self.assertIsNotNone(first_pg)
# IMPORTANT: Update world size after first shrinking
current_world_size = first_pg.size()
expected_first_size = self.world_size - len(first_exclusion)
log_test_info(
self.rank,
f"After first shrinking: world_size {self.world_size} -> {current_world_size}",
)
self.assertEqual(first_pg.size(), expected_first_size)
# Second shrinking: exclude another rank from the remaining group
# Choose a rank that's in the middle range
if current_world_size >= 3:
second_exclusion = [
current_world_size - 1
] # Exclude the new "last" rank
log_test_info(
self.rank,
f"Second shrinking from group of size {current_world_size}: excluding ranks {second_exclusion}",
)
if self.rank not in second_exclusion:
# Only non-excluded ranks should call shrink_group for second iteration
second_pg = c10d.shrink_group(second_exclusion, group=first_pg)
self.assertIsNotNone(second_pg)
# IMPORTANT: Update world size after second shrinking
final_world_size = second_pg.size()
expected_final_size = current_world_size - len(second_exclusion)
log_test_info(
self.rank,
f"After second shrinking: world_size {current_world_size} -> {final_world_size}",
)
self.assertEqual(second_pg.size(), expected_final_size)
# Test collective on final group
tensor = torch.full((1,), self.rank).cuda(device)
log_test_info(
self.rank,
f"Performing all_reduce on final group (size {final_world_size}) with tensor: {tensor.item()}",
)
c10d.all_reduce(tensor, group=second_pg)
log_test_info(
self.rank,
f"Final all_reduce completed, result: {tensor.item()}",
)
# Calculate expected sum of remaining ranks
all_excluded = set(first_exclusion + second_exclusion)
remaining_ranks = [
r for r in range(self.world_size) if r not in all_excluded
]
expected_sum = sum(remaining_ranks)
log_test_info(
self.rank,
f"Remaining ranks: {remaining_ranks}, expected sum: {expected_sum}, actual: {tensor.item()}",
)
self.assertEqual(tensor.item(), expected_sum)
log_test_info(self.rank, "Final verification passed")
else:
log_test_info(
self.rank,
"This rank excluded in second shrinking, not calling shrink_group",
)
else:
log_test_info(
self.rank, "Skipping second shrinking (remaining group too small)"
)
else:
log_test_info(
self.rank,
"This rank excluded in first shrinking, not calling shrink_group",
)
log_test_info(self.rank, "Destroying process group")
dist.destroy_process_group()
log_test_info(self.rank, "test_shrink_group_multiple_iterations completed")
# Helper methods for optimized shrink group tests
def _setup_shrink_test(self, test_suffix, world_size=None, warmup=True):
"""Common setup for shrink group tests."""
os.environ["TORCH_NCCL_USE_COMM_NONBLOCKING"] = "1"
world_size = world_size or self.world_size
store = c10d.FileStore(self.file_name + f"_{test_suffix}", world_size)
device = torch.device(f"cuda:{self.rank}")
c10d.init_process_group(
"nccl",
world_size=world_size,
rank=self.rank,
store=store,
pg_options=self.opts(),
device_id=device,
)
pg = c10d.distributed_c10d._get_default_group()
if warmup:
c10d.all_reduce(torch.ones(1).cuda(device), group=pg)
return device, pg
def _validate_shrunk_group(self, shrunk_pg, expected_size, test_name=""):
"""Validate properties of a shrunk process group."""
self.assertIsNotNone(shrunk_pg, f"{test_name}: shrunk_pg should not be None")
actual_size = shrunk_pg.size()
self.assertEqual(
actual_size, expected_size, f"{test_name}: group size mismatch"
)
new_rank = shrunk_pg.rank()
self.assertTrue(
0 <= new_rank < expected_size, f"{test_name}: invalid new rank {new_rank}"
)
log_test_info(
self.rank,
f"{test_name}: world_size {self.world_size} -> {actual_size}, rank {self.rank} -> {new_rank}",
)
return new_rank
def _test_collective_on_shrunk_group(
self, shrunk_pg, device, ranks_to_exclude, test_name=""
):
"""Test collective communication on shrunk group and verify correctness."""
test_tensor = torch.full((1,), self.rank, device=device, dtype=torch.float32)
c10d.all_reduce(test_tensor, group=shrunk_pg)
result = test_tensor.item()
expected_sum = sum(
r for r in range(self.world_size) if r not in ranks_to_exclude
)
self.assertEqual(
result, expected_sum, f"{test_name}: collective result mismatch"
)
log_test_info(
self.rank, f"{test_name}: collective passed ({result} == {expected_sum})"
)
return result
def _perform_shrink_test(
self, ranks_to_exclude, test_name, shrink_flags=0, with_collective=True
):
"""Complete shrink test flow: setup, shrink, validate, test collective, cleanup.
Consistent API: All ranks perform setup to initialize distributed environment.
ONLY non-excluded ranks call shrink_group() for both default and non-default groups.
Excluded ranks perform setup, then exit without calling shrink_group() or waiting.
"""
log_test_info(self.rank, f"{test_name} (world_size={self.world_size})")
is_excluded = self.rank in ranks_to_exclude
log_test_info(
self.rank,
f"Excluding ranks: {ranks_to_exclude}, am_excluded: {is_excluded}",
)
# All ranks (including excluded ones) perform setup to initialize distributed environment
device, pg = self._setup_shrink_test(test_name.lower().replace(" ", "_"))
is_default_group = pg == c10d.distributed_c10d._get_default_group()
if is_excluded:
log_test_info(
self.rank,
f"Excluded rank {self.rank} - setup complete, skipping shrink operation",
)
if shrink_flags & NCCL_SHRINK_ABORT:
log_test_info(self.rank, f"Using abort for excluded rank {self.rank}")
pg._get_backend(torch.device(device)).abort()
log_test_info(
self.rank, f"cleanup resources for excluded rank {self.rank}"
)
dist.destroy_process_group()
log_test_info(self.rank, f"Excluded rank {self.rank} - exit")
else:
log_test_info(
self.rank, f"Using regular destroy for excluded rank {self.rank}"
)
dist.destroy_process_group()
return None
# Only non-excluded ranks proceed with shrink
log_test_info(
self.rank,
f"Non-excluded rank calling shrink_group (default_group={is_default_group})",
)
shrunk_pg = c10d.shrink_group(ranks_to_exclude, shrink_flags=shrink_flags)
log_test_info(
self.rank,
f"Non-excluded rank calling shrink_group (default_group={is_default_group}) done",
)
# Non-excluded ranks: validate and test the new group
expected_size = self.world_size - len(ranks_to_exclude)
_ = self._validate_shrunk_group(shrunk_pg, expected_size, test_name)
if with_collective:
_ = self._test_collective_on_shrunk_group(
shrunk_pg, device, ranks_to_exclude, test_name
)
log_test_success(self.rank, f"{test_name} successful (shrink + collective)")
else:
log_test_success(self.rank, f"{test_name} successful (shrink only)")
dist.destroy_process_group()
return shrunk_pg
def _get_default_ranks_to_exclude(self):
"""Get default ranks to exclude based on world size."""
if self.world_size <= 1:
return []
return [self.world_size - 1] # Exclude last rank by default
@requires_nccl_shrink()
@requires_world_size(3)
def test_shrink_group_vs_abort_reinit_performance(self):
"""Compare performance of shrink_group vs traditional abort+reinit (simplified for reliability)."""
log_test_info(self.rank, "=== TEST 1: abort+reinit ===")
device, pg1 = self._setup_shrink_test("_perf_reinit")
torch.cuda.synchronize(device)
# Test 1: Traditional abort + reinit
start_time = time.perf_counter()
dist.destroy_process_group()
device, new_pg = self._setup_shrink_test("perf_shrink_test1")
reinit_time = time.perf_counter() - start_time
# Test collective with original rank values for fair comparison (non-blocking mode)
test_tensor = torch.full((1,), self.rank, device=device, dtype=torch.float32)
work = c10d.all_reduce(test_tensor, group=new_pg, async_op=True)
work.wait()
torch.cuda.synchronize(device)
# Verify correctness
expected_sum = sum(r for r in range(self.world_size))
self.assertEqual(test_tensor.item(), expected_sum, "Reinit collective failed")
log_test_info(self.rank, f"abort+reinit: {reinit_time:.4f}s")
dist.destroy_process_group(new_pg)
# Test 2: shrink_group with NCCL_SHRINK_ABORT
log_test_info(self.rank, "=== TEST 2: shrink_group ===")
ranks_to_exclude = [self.world_size - 1]
is_excluded = self.rank in ranks_to_exclude
log_test_info(
self.rank,
f"Excluding ranks: {ranks_to_exclude}, am_excluded: {is_excluded}",
)
device, pg1 = self._setup_shrink_test("perf_shrink_test2") # Unique suffix
shrink_time = 0
if not is_excluded:
torch.cuda.synchronize(device) # Ensure accurate timing
start_time = time.perf_counter()
shrunk_pg = c10d.shrink_group(
ranks_to_exclude, shrink_flags=NCCL_SHRINK_ABORT
)
c10d.all_reduce(torch.ones(1).cuda(device), group=shrunk_pg)
shrink_time = time.perf_counter() - start_time
# Test collective communication on shrunk group (non-blocking mode)
test_tensor = torch.full(
(1,), self.rank, device=device, dtype=torch.float32
)
work = c10d.all_reduce(test_tensor, group=shrunk_pg, async_op=True)
work.wait()
# Verify correctness
expected_sum = sum(
r for r in range(self.world_size) if r not in ranks_to_exclude
)
self.assertEqual(
test_tensor.item(),
expected_sum,
"shrink_test: collective result mismatch",
)
torch.cuda.synchronize(device) # Ensure operations complete
log_test_info(self.rank, f"shrink_group: {shrink_time:.4f}s")
dist.destroy_process_group()
else:
log_test_info(self.rank, "Excluded from shrink test - exiting immediately")
dist.destroy_process_group()
return
# Performance analysis (only for participating ranks)
if shrink_time > 0 and reinit_time > 0:
speedup = reinit_time / shrink_time
time_saved = reinit_time - shrink_time
log_test_info(self.rank, "=== PERFORMANCE RESULTS ===")
log_test_info(self.rank, f"shrink_group: {shrink_time:.4f}s")
log_test_info(self.rank, f"abort+reinit: {reinit_time:.4f}s")
log_test_info(self.rank, f"time_saved: {time_saved:+.4f}s")
log_test_info(self.rank, f"speedup: {speedup:.2f}x")
if speedup > 1.1:
log_test_success(self.rank, "shrink_group significantly faster")
elif speedup > 0.9:
log_test_info(self.rank, "≈ comparable performance")
else:
log_test_warning(self.rank, "abort+reinit faster")
log_test_info(self.rank, "Performance test completed")
@requires_nccl()
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "NCCL test requires 2+ GPUs")
def test_deterministic_mode_no_break(self):
torch.use_deterministic_algorithms(True)
store = c10d.FileStore(self.file_name, self.world_size)
device = torch.device(f"cuda:{self.rank}")
self._create_process_group_nccl(store, self.opts(), device_id=device)
tensor = torch.empty(10, 10, device=device)
dist.all_reduce(tensor)
@requires_nccl()
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "NCCL test requires 2+ GPUs")
def test_init_with_idx(self):
store = c10d.FileStore(self.file_name, self.world_size)
device_idx = self.rank
dist.init_process_group(
world_size=self.world_size,
rank=self.rank,
store=store,
device_id=device_idx,
)
dist.all_reduce(torch.empty(1, device=torch.device("cuda", device_idx)))
@requires_nccl()
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "NCCL test requires 2+ GPUs")
def test_block_current_stream(self):
store = c10d.FileStore(self.file_name, self.world_size)
device = torch.device(f"cuda:{self.rank}")
pg = self._create_process_group_nccl(store, self.opts(), device_id=device)
t = torch.rand(10, device=device)
work = pg.allreduce(t)
work.block_current_stream()
torch.cuda.current_stream().synchronize()
work.wait()
torch.cuda.synchronize()
| ProcessGroupNCCLGroupTest |
python | spack__spack | lib/spack/spack/vendor/ruamel/yaml/comments.py | {
"start": 23207,
"end": 23769
} | class ____(CommentedMapView, Set): # type: ignore
__slots__ = ()
@classmethod
def _from_iterable(self, it):
# type: (Any) -> Any
return set(it)
def __contains__(self, item):
# type: (Any) -> Any
key, value = item
try:
v = self._mapping[key]
except KeyError:
return False
else:
return v == value
def __iter__(self):
# type: () -> Any
for key in self._mapping._keys():
yield (key, self._mapping[key])
| CommentedMapItemsView |
python | facebookresearch__faiss | contrib/clustering.py | {
"start": 7513,
"end": 12699
} | class ____(DatasetAssign):
"""Wrapper for a matrix that offers a function to assign the vectors
to centroids. All other implementations offer the same interface"""
def __init__(self, x):
assert x.__class__ == scipy.sparse.csr_matrix
self.x = x
self.squared_norms = np.array(x.power(2).sum(1))
def get_subset(self, indices):
return np.array(self.x[indices].todense())
def perform_search(self, centroids):
return sparse_assign_to_dense_blocks(
self.x, centroids, xq_norms=self.squared_norms)
def assign_to(self, centroids, weights=None):
D, I = self.perform_search(centroids)
I = I.ravel()
D = D.ravel()
n = self.x.shape[0]
if weights is None:
weights = np.ones(n, dtype='float32')
nc = len(centroids)
m = scipy.sparse.csc_matrix(
(weights, I, np.arange(n + 1)),
shape=(nc, n))
sum_per_centroid = np.array((m * self.x).todense())
return I, D, sum_per_centroid
def imbalance_factor(k, assign):
assign = np.ascontiguousarray(assign, dtype='int64')
return faiss.imbalance_factor(len(assign), k, faiss.swig_ptr(assign))
def check_if_torch(x):
if x.__class__ == np.ndarray:
return False
import torch
if isinstance(x, torch.Tensor):
return True
raise NotImplementedError(f"Unknown tensor type {type(x)}")
def reassign_centroids(hassign, centroids, rs=None):
""" reassign centroids when some of them collapse """
if rs is None:
rs = np.random
k, d = centroids.shape
nsplit = 0
is_torch = check_if_torch(centroids)
empty_cents = np.where(hassign == 0)[0]
if len(empty_cents) == 0:
return 0
if is_torch:
import torch
fac = torch.ones_like(centroids[0])
else:
fac = np.ones_like(centroids[0])
fac[::2] += 1 / 1024.
fac[1::2] -= 1 / 1024.
# this is a single pass unless there are more than k/2
# empty centroids
while len(empty_cents) > 0:
# choose which centroids to split (numpy)
probas = hassign.astype('float') - 1
probas[probas < 0] = 0
probas /= probas.sum()
nnz = (probas > 0).sum()
nreplace = min(nnz, empty_cents.size)
cjs = rs.choice(k, size=nreplace, p=probas)
for ci, cj in zip(empty_cents[:nreplace], cjs):
c = centroids[cj]
centroids[ci] = c * fac
centroids[cj] = c / fac
hassign[ci] = hassign[cj] // 2
hassign[cj] -= hassign[ci]
nsplit += 1
empty_cents = empty_cents[nreplace:]
return nsplit
def kmeans(k, data, niter=25, seed=1234, checkpoint=None, verbose=True,
return_stats=False):
"""Pure python kmeans implementation. Follows the Faiss C++ version
quite closely, but takes a DatasetAssign instead of a training data
matrix. Also redo is not implemented.
For the torch implementation, the centroids are tensors (possibly on GPU),
but the indices remain numpy on CPU.
"""
n, d = data.count(), data.dim()
log = print if verbose else print_nop
log(("Clustering %d points in %dD to %d clusters, " +
"%d iterations seed %d") % (n, d, k, niter, seed))
rs = np.random.RandomState(seed)
print("preproc...")
t0 = time.time()
# initialization
perm = rs.choice(n, size=k, replace=False)
centroids = data.get_subset(perm)
is_torch = check_if_torch(centroids)
iteration_stats = []
log(" done")
t_search_tot = 0
obj = []
for i in range(niter):
t0s = time.time()
log('assigning', end='\r', flush=True)
assign, D, sums = data.assign_to(centroids)
log('compute centroids', end='\r', flush=True)
t_search_tot += time.time() - t0s
err = D.sum()
if is_torch:
err = err.item()
obj.append(err)
hassign = np.bincount(assign, minlength=k)
fac = hassign.reshape(-1, 1).astype('float32')
fac[fac == 0] = 1 # quiet warning
if is_torch:
import torch
fac = torch.from_numpy(fac).to(sums.device)
centroids = sums / fac
nsplit = reassign_centroids(hassign, centroids, rs)
s = {
"obj": err,
"time": (time.time() - t0),
"time_search": t_search_tot,
"imbalance_factor": imbalance_factor(k, assign),
"nsplit": nsplit
}
log((" Iteration %d (%.2f s, search %.2f s): "
"objective=%g imbalance=%.3f nsplit=%d") % (
i, s["time"], s["time_search"],
err, s["imbalance_factor"],
nsplit)
)
iteration_stats.append(s)
if checkpoint is not None:
log('storing centroids in', checkpoint)
if is_torch:
import torch
torch.save(centroids, checkpoint)
else:
np.save(checkpoint, centroids)
if return_stats:
return centroids, iteration_stats
else:
return centroids
| DatasetAssignSparse |
python | django-guardian__django-guardian | example_project/posts/migrations/0002_auto_20190629_0848.py | {
"start": 92,
"end": 552
} | class ____(migrations.Migration):
dependencies = [
("posts", "0001_initial"),
]
operations = [
migrations.AlterField(
model_name="post",
name="content",
field=models.TextField(verbose_name="content"),
),
migrations.AlterField(
model_name="post",
name="title",
field=models.CharField(max_length=64, verbose_name="title"),
),
]
| Migration |
python | pytorch__pytorch | test/torch_np/numpy_tests/lib/test_twodim_base.py | {
"start": 5657,
"end": 5959
} | class ____(TestCase):
def test_basic(self):
a = get_mat(4)
# b = a[::-1, :]
b = np.flip(a, 0)
assert_equal(flipud(a), b)
a = [[0, 1, 2], [3, 4, 5]]
b = [[3, 4, 5], [0, 1, 2]]
assert_equal(flipud(a), b)
@instantiate_parametrized_tests
| TestFlipud |
python | lepture__authlib | authlib/oidc/core/userinfo.py | {
"start": 356,
"end": 4354
} | class ____:
"""OpenID Connect Core UserInfo Endpoint.
This endpoint returns information about a given user, as a JSON payload or as a JWT.
It must be subclassed and a few methods needs to be manually implemented::
class UserInfoEndpoint(oidc.core.UserInfoEndpoint):
def get_issuer(self):
return "https://auth.example"
def generate_user_info(self, user, scope):
return UserInfo(
sub=user.id,
name=user.name,
...
).filter(scope)
def resolve_private_key(self):
return server_private_jwk_set()
It is also needed to pass a :class:`~authlib.oauth2.rfc6749.ResourceProtector` instance
with a registered :class:`~authlib.oauth2.rfc6749.TokenValidator` at initialization,
so the access to the endpoint can be restricter to valid token bearers::
resource_protector = ResourceProtector()
resource_protector.register_token_validator(BearerTokenValidator())
server.register_endpoint(
UserInfoEndpoint(resource_protector=resource_protector)
)
And then you can plug the endpoint to your application::
@app.route("/oauth/userinfo", methods=["GET", "POST"])
def userinfo():
return server.create_endpoint_response("userinfo")
"""
ENDPOINT_NAME = "userinfo"
def __init__(
self,
server: Optional[AuthorizationServer] = None,
resource_protector: Optional[ResourceProtector] = None,
):
self.server = server
self.resource_protector = resource_protector
def create_endpoint_request(self, request: OAuth2Request):
return self.server.create_oauth2_request(request)
def __call__(self, request: OAuth2Request):
token = self.resource_protector.acquire_token("openid")
client = token.get_client()
user = token.get_user()
user_info = self.generate_user_info(user, token.scope)
if alg := client.client_metadata.get("userinfo_signed_response_alg"):
# If signed, the UserInfo Response MUST contain the Claims iss
# (issuer) and aud (audience) as members. The iss value MUST be
# the OP's Issuer Identifier URL. The aud value MUST be or
# include the RP's Client ID value.
user_info["iss"] = self.get_issuer()
user_info["aud"] = client.client_id
data = jwt.encode({"alg": alg}, user_info, self.resolve_private_key())
return 200, data, [("Content-Type", "application/jwt")]
return 200, user_info, default_json_headers
def generate_user_info(self, user, scope: str) -> UserInfo:
"""
Generate a :class:`~authlib.oidc.core.UserInfo` object for an user::
def generate_user_info(self, user, scope: str) -> UserInfo:
return UserInfo(
given_name=user.given_name,
family_name=user.last_name,
email=user.email,
...
).filter(scope)
This method must be implemented by developers.
"""
raise NotImplementedError()
def get_issuer(self) -> str:
"""The OP's Issuer Identifier URL.
The value is used to fill the ``iss`` claim that is mandatory in signed userinfo::
def get_issuer(self) -> str:
return "https://auth.example"
This method must be implemented by developers to support JWT userinfo.
"""
raise NotImplementedError()
def resolve_private_key(self):
"""Return the server JSON Web Key Set.
This is used to sign userinfo payloads::
def resolve_private_key(self):
return server_private_jwk_set()
This method must be implemented by developers to support JWT userinfo signing.
"""
return None # pragma: no cover
| UserInfoEndpoint |
python | zarr-developers__zarr-python | src/zarr/_cli/cli.py | {
"start": 576,
"end": 5765
} | class ____(str, Enum):
"""Limit CLI choice to only v3"""
v3 = "v3"
@app.command() # type: ignore[misc]
def migrate(
zarr_format: Annotated[
ZarrFormatV3,
typer.Argument(
help="Zarr format to migrate to. Currently only 'v3' is supported.",
),
],
input_store: Annotated[
str,
typer.Argument(
help=(
"Input Zarr to migrate - should be a store, path to directory in file system or name of zip file "
"e.g. 'data/example-1.zarr', 's3://example-bucket/example'..."
)
),
],
output_store: Annotated[
str | None,
typer.Argument(
help=(
"Output location to write generated metadata (no array data will be copied). If not provided, "
"metadata will be written to input_store. Should be a store, path to directory in file system "
"or name of zip file e.g. 'data/example-1.zarr', 's3://example-bucket/example'..."
)
),
] = None,
dry_run: Annotated[
bool,
typer.Option(
help="Enable a dry-run: files that would be converted are logged, but no new files are created or changed."
),
] = False,
overwrite: Annotated[
bool,
typer.Option(
help="Remove any existing v3 metadata at the output location, before migration starts."
),
] = False,
force: Annotated[
bool,
typer.Option(
help=(
"Only used when --overwrite is given. Allows v3 metadata to be removed when no valid "
"v2 metadata exists at the output location."
)
),
] = False,
remove_v2_metadata: Annotated[
bool,
typer.Option(
help="Remove v2 metadata (if any) from the output location, after migration is complete."
),
] = False,
) -> None:
"""Migrate all v2 metadata in a zarr hierarchy to v3. This will create a zarr.json file for each level
(every group / array). v2 files (.zarray, .zattrs etc.) will be left as-is.
"""
if dry_run:
_set_logging_level(verbose=True)
logger.info(
"Dry run enabled - no new files will be created or changed. Log of files that would be created on a real run:"
)
input_zarr_store = sync(make_store(input_store, mode="r+"))
if output_store is not None:
output_zarr_store = sync(make_store(output_store, mode="w-"))
write_store = output_zarr_store
else:
output_zarr_store = None
write_store = input_zarr_store
if overwrite:
sync(migrate_metadata.remove_metadata(write_store, 3, force=force, dry_run=dry_run))
migrate_metadata.migrate_v2_to_v3(
input_store=input_zarr_store, output_store=output_zarr_store, dry_run=dry_run
)
if remove_v2_metadata:
# There should always be valid v3 metadata at the output location after migration, so force=False
sync(migrate_metadata.remove_metadata(write_store, 2, force=False, dry_run=dry_run))
@app.command() # type: ignore[misc]
def remove_metadata(
zarr_format: Annotated[
ZarrFormat,
typer.Argument(help="Which format's metadata to remove - v2 or v3."),
],
store: Annotated[
str,
typer.Argument(
help="Store or path to directory in file system or name of zip file e.g. 'data/example-1.zarr', 's3://example-bucket/example'..."
),
],
force: Annotated[
bool,
typer.Option(
help=(
"Allow metadata to be deleted when no valid alternative exists e.g. allow deletion of v2 metadata, "
"when no v3 metadata is present."
)
),
] = False,
dry_run: Annotated[
bool,
typer.Option(
help="Enable a dry-run: files that would be deleted are logged, but no files are removed or changed."
),
] = False,
) -> None:
"""Remove all v2 (.zarray, .zattrs, .zgroup, .zmetadata) or v3 (zarr.json) metadata files from the given Zarr.
Note - this will remove metadata files at all levels of the hierarchy (every group and array).
"""
if dry_run:
_set_logging_level(verbose=True)
logger.info(
"Dry run enabled - no files will be deleted or changed. Log of files that would be deleted on a real run:"
)
input_zarr_store = sync(make_store(store, mode="r+"))
sync(
migrate_metadata.remove_metadata(
store=input_zarr_store,
zarr_format=cast(Literal[2, 3], int(zarr_format[1:])),
force=force,
dry_run=dry_run,
)
)
@app.callback() # type: ignore[misc]
def main(
verbose: Annotated[
bool,
typer.Option(
help="enable verbose logging - will print info about metadata files being deleted / saved."
),
] = False,
) -> None:
"""
See available commands below - access help for individual commands with zarr COMMAND --help.
"""
_set_logging_level(verbose=verbose)
if __name__ == "__main__":
app()
| ZarrFormatV3 |
python | kamyu104__LeetCode-Solutions | Python/matrix-diagonal-sum.py | {
"start": 29,
"end": 291
} | class ____(object):
def diagonalSum(self, mat):
"""
:type mat: List[List[int]]
:rtype: int
"""
return sum(mat[i][i]+mat[~i][i] for i in xrange(len(mat))) - (mat[len(mat)//2][len(mat)//2] if len(mat)%2 == 1 else 0)
| Solution |
python | django__django | tests/queries/models.py | {
"start": 18226,
"end": 18307
} | class ____(models.Model):
created = CreatedField(editable=False)
| ReturningModel |
python | oauthlib__oauthlib | tests/oauth2/rfc6749/endpoints/test_extra_credentials.py | {
"start": 295,
"end": 2611
} | class ____(TestCase):
def set_client(self, request):
request.client = mock.MagicMock()
request.client.client_id = 'mocked'
return True
def setUp(self):
self.validator = mock.MagicMock(spec=RequestValidator)
self.validator.get_default_redirect_uri.return_value = 'https://i.b/cb'
self.web = WebApplicationServer(self.validator)
self.mobile = MobileApplicationServer(self.validator)
self.legacy = LegacyApplicationServer(self.validator)
self.backend = BackendApplicationServer(self.validator)
def test_post_authorization_request(self):
def save_code(client_id, token, request):
self.assertEqual('creds', request.extra)
def save_token(token, request):
self.assertEqual('creds', request.extra)
# Authorization code grant
self.validator.save_authorization_code.side_effect = save_code
self.web.create_authorization_response(
'https://i.b/auth?client_id=foo&response_type=code',
scopes=['foo'],
credentials={'extra': 'creds'})
# Implicit grant
self.validator.save_bearer_token.side_effect = save_token
self.mobile.create_authorization_response(
'https://i.b/auth?client_id=foo&response_type=token',
scopes=['foo'],
credentials={'extra': 'creds'})
def test_token_request(self):
def save_token(token, request):
self.assertIn('extra', token)
self.validator.save_bearer_token.side_effect = save_token
self.validator.authenticate_client.side_effect = self.set_client
# Authorization code grant
self.web.create_token_response('https://i.b/token',
body='grant_type=authorization_code&code=foo',
credentials={'extra': 'creds'})
# Password credentials grant
self.legacy.create_token_response('https://i.b/token',
body='grant_type=password&username=foo&password=bar',
credentials={'extra': 'creds'})
# Client credentials grant
self.backend.create_token_response('https://i.b/token',
body='grant_type=client_credentials',
credentials={'extra': 'creds'})
| ExtraCredentialsTest |
python | PyCQA__pydocstyle | src/tests/test_decorators.py | {
"start": 185,
"end": 5499
} | class ____:
"""Check parsing of Python source code."""
def test_parse_class_single_decorator(self):
"""Class decorator is recorded in class instance."""
code = textwrap.dedent("""\
@first_decorator
class Foo:
pass
""")
module = checker.parse(io.StringIO(code), 'dummy.py')
decorators = module.children[0].decorators
assert 1 == len(decorators)
assert 'first_decorator' == decorators[0].name
assert '' == decorators[0].arguments
def test_parse_class_decorators(self):
"""Class decorators are accumulated together with their arguments."""
code = textwrap.dedent("""\
@first_decorator
@second.decorator(argument)
@third.multi.line(
decorator,
key=value,
)
class Foo:
pass
""")
module = checker.parse(io.StringIO(code), 'dummy.py')
defined_class = module.children[0]
decorators = defined_class.decorators
assert 3 == len(decorators)
assert 'first_decorator' == decorators[0].name
assert '' == decorators[0].arguments
assert 'second.decorator' == decorators[1].name
assert 'argument' == decorators[1].arguments
assert 'third.multi.line' == decorators[2].name
assert 'decorator,key=value,' == decorators[2].arguments
def test_parse_class_nested_decorator(self):
"""Class decorator is recorded even for nested classes."""
code = textwrap.dedent("""\
@parent_decorator
class Foo:
pass
@first_decorator
class NestedClass:
pass
""")
module = checker.parse(io.StringIO(code), 'dummy.py')
nested_class = module.children[0].children[0]
decorators = nested_class.decorators
assert 1 == len(decorators)
assert 'first_decorator' == decorators[0].name
assert '' == decorators[0].arguments
def test_parse_method_single_decorator(self):
"""Method decorators are accumulated."""
code = textwrap.dedent("""\
class Foo:
@first_decorator
def method(self):
pass
""")
module = checker.parse(io.StringIO(code), 'dummy.py')
defined_class = module.children[0]
decorators = defined_class.children[0].decorators
assert 1 == len(decorators)
assert 'first_decorator' == decorators[0].name
assert '' == decorators[0].arguments
def test_parse_method_decorators(self):
"""Multiple method decorators are accumulated along with their args."""
code = textwrap.dedent("""\
class Foo:
@first_decorator
@second.decorator(argument)
@third.multi.line(
decorator,
key=value,
)
def method(self):
pass
""")
module = checker.parse(io.StringIO(code), 'dummy.py')
defined_class = module.children[0]
decorators = defined_class.children[0].decorators
assert 3 == len(decorators)
assert 'first_decorator' == decorators[0].name
assert '' == decorators[0].arguments
assert 'second.decorator' == decorators[1].name
assert 'argument' == decorators[1].arguments
assert 'third.multi.line' == decorators[2].name
assert 'decorator,key=value,' == decorators[2].arguments
def test_parse_function_decorator(self):
"""A function decorator is also accumulated."""
code = textwrap.dedent("""\
@first_decorator
def some_method(self):
pass
""")
module = checker.parse(io.StringIO(code), 'dummy.py')
decorators = module.children[0].decorators
assert 1 == len(decorators)
assert 'first_decorator' == decorators[0].name
assert '' == decorators[0].arguments
def test_parse_async_function_decorator(self):
"""Decorators for async functions are also accumulated."""
code = textwrap.dedent("""\
@first_decorator
async def some_method(self):
pass
""")
module = checker.parse(io.StringIO(code), 'dummy.py')
decorators = module.children[0].decorators
assert 1 == len(decorators)
assert 'first_decorator' == decorators[0].name
assert '' == decorators[0].arguments
def test_parse_method_nested_decorator(self):
"""Method decorators are accumulated for nested methods."""
code = textwrap.dedent("""\
class Foo:
@parent_decorator
def method(self):
@first_decorator
def nested_method(arg):
pass
""")
module = checker.parse(io.StringIO(code), 'dummy.py')
defined_class = module.children[0]
decorators = defined_class.children[0].children[0].decorators
assert 1 == len(decorators)
assert 'first_decorator' == decorators[0].name
assert '' == decorators[0].arguments
| TestParser |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/core_api/datamodels/plugins.py | {
"start": 1520,
"end": 1779
} | class ____(BaseModel):
"""Serializer for AppBuilder View responses."""
model_config = ConfigDict(extra="allow")
name: str | None = None
category: str | None = None
view: str | None = None
label: str | None = None
| AppBuilderViewResponse |
python | scipy__scipy | scipy/special/tests/test_cdflib.py | {
"start": 18457,
"end": 24823
} | class ____:
# Reference values computed with mpmath with the following script
# Formula from:
# Lenth, Russell V (1989). "Algorithm AS 243: Cumulative Distribution Function
# of the Non-central t Distribution". Journal of the Royal Statistical Society,
# Series C. 38 (1): 185-189
#
# Warning: may take a long time to run
#
# from mpmath import mp
# mp.dps = 400
# def nct_cdf(df, nc, x):
# df, nc, x = map(mp.mpf, (df, nc, x))
# def f(df, nc, x):
# phi = mp.ncdf(-nc)
# y = x * x / (x * x + df)
# constant = mp.exp(-nc * nc / 2.)
# def term(j):
# intermediate = constant * (nc *nc / 2.)**j
# p = intermediate/mp.factorial(j)
# q = nc / (mp.sqrt(2.) * mp.gamma(j + 1.5)) * intermediate
# first_beta_term = mp.betainc(j + 0.5, df/2., x2=y,
# regularized=True)
# second_beta_term = mp.betainc(j + mp.one, df/2., x2=y,
# regularized=True)
# return p * first_beta_term + q * second_beta_term
# sum_term = mp.nsum(term, [0, mp.inf])
# f = phi + 0.5 * sum_term
# return f
# if x >= 0:
# result = f(df, nc, x)
# else:
# result = mp.one - f(df, -nc, x)
# return float(result)
@pytest.mark.parametrize("df, nc, x, expected_cdf", [
(0.98, -3.8, 0.0015, 0.9999279987514815),
(0.98, -3.8, 0.15, 0.9999528361700505),
(0.98, -3.8, 1.5, 0.9999908823016942),
(0.98, -3.8, 15, 0.9999990264591945),
(0.98, 0.38, 0.0015, 0.35241533122693),
(0.98, 0.38, 0.15, 0.39749697267146983),
(0.98, 0.38, 1.5, 0.716862963488558),
(0.98, 0.38, 15, 0.9656246449257494),
(0.98, 3.8, 0.0015, 7.26973354942293e-05),
(0.98, 3.8, 0.15, 0.00012416481147589105),
(0.98, 3.8, 1.5, 0.035388035775454095),
(0.98, 3.8, 15, 0.7954826975430583),
(0.98, 38, 0.0015, 3.02106943e-316),
(0.98, 38, 0.15, 6.069970616996603e-309),
(0.98, 38, 1.5, 2.591995360483094e-97),
(0.98, 38, 15, 0.011927265886910935),
(9.8, -3.8, 0.0015, 0.9999280776192786),
(9.8, -3.8, 0.15, 0.9999599410685442),
(9.8, -3.8, 1.5, 0.9999997432394788),
(9.8, -3.8, 15, 0.9999999999999984),
(9.8, 0.38, 0.0015, 0.3525155979107491),
(9.8, 0.38, 0.15, 0.40763120140379194),
(9.8, 0.38, 1.5, 0.8476794017024651),
(9.8, 0.38, 15, 0.9999999297116268),
(9.8, 3.8, 0.0015, 7.277620328149153e-05),
(9.8, 3.8, 0.15, 0.00013024802220900652),
(9.8, 3.8, 1.5, 0.013477432800072933),
(9.8, 3.8, 15, 0.999850151230648),
(9.8, 38, 0.0015, 3.05066095e-316),
(9.8, 38, 0.15, 1.79065514676e-313),
(9.8, 38, 1.5, 2.0935940165900746e-249),
(9.8, 38, 15, 2.252076291604796e-09),
(98, -3.8, 0.0015, 0.9999280875149109),
(98, -3.8, 0.15, 0.9999608250170452),
(98, -3.8, 1.5, 0.9999999304757682),
(98, -3.8, 15, 1.0),
(98, 0.38, 0.0015, 0.35252817848596313),
(98, 0.38, 0.15, 0.40890253001794846),
(98, 0.38, 1.5, 0.8664672830006552),
(98, 0.38, 15, 1.0),
(98, 3.8, 0.0015, 7.278609891281275e-05),
(98, 3.8, 0.15, 0.0001310318674827004),
(98, 3.8, 1.5, 0.010990879189991727),
(98, 3.8, 15, 0.9999999999999989),
(98, 38, 0.0015, 3.05437385e-316),
(98, 38, 0.15, 9.1668336166e-314),
(98, 38, 1.5, 1.8085884236563926e-288),
(98, 38, 15, 2.7740532792035907e-50),
(980, -3.8, 0.0015, 0.9999280885188965),
(980, -3.8, 0.15, 0.9999609144559273),
(980, -3.8, 1.5, 0.9999999410050979),
(980, -3.8, 15, 1.0),
(980, 0.38, 0.0015, 0.3525294548792812),
(980, 0.38, 0.15, 0.4090315324657382),
(980, 0.38, 1.5, 0.8684247068517293),
(980, 0.38, 15, 1.0),
(980, 3.8, 0.0015, 7.278710289828983e-05),
(980, 3.8, 0.15, 0.00013111131667906573),
(980, 3.8, 1.5, 0.010750678886113882),
(980, 3.8, 15, 1.0),
(980, 38, 0.0015, 3.0547506e-316),
(980, 38, 0.15, 8.6191646313e-314),
# revisit when boost1.90 is released,
# see https://github.com/boostorg/math/issues/1308
pytest.param(980, 38, 1.5, 1.1824454111413493e-291,
marks=pytest.mark.xfail(
reason="Bug in underlying Boost math implementation")),
(980, 38, 15, 5.407535300713606e-105)
])
def test_gh19896(self, df, nc, x, expected_cdf):
# test that gh-19896 is resolved.
# Originally this was a regression test that used the old Fortran results
# as a reference. The Fortran results were not accurate, so the reference
# values were recomputed with mpmath.
nctdtr_result = sp.nctdtr(df, nc, x)
assert_allclose(nctdtr_result, expected_cdf, rtol=1e-13, atol=1e-303)
def test_nctdtr_gh8344(self):
# test that gh-8344 is resolved.
df, nc, x = 3000, 3, 0.1
expected = 0.0018657780826323328
assert_allclose(sp.nctdtr(df, nc, x), expected, rtol=1e-14)
@pytest.mark.parametrize(
"df, nc, x, expected, rtol",
# revisit tolerances when boost1.90 is released,
# see https://github.com/boostorg/math/issues/1308
[[3., 5., -2., 1.5645373999149622e-09, 2e-8],
[1000., 10., 1., 1.1493552133826623e-19, 1e-13],
[1e-5, -6., 2., 0.9999999990135003, 1e-13],
[10., 20., 0.15, 6.426530505957303e-88, 1e-13],
[1., 1., np.inf, 1.0, 0.0],
[1., 1., -np.inf, 0.0, 0.0]
]
)
def test_nctdtr_accuracy(self, df, nc, x, expected, rtol):
assert_allclose(sp.nctdtr(df, nc, x), expected, rtol=rtol)
@pytest.mark.parametrize("df, nc, x, expected_cdf", [
(0.98, 38, 1.5, 2.591995360483094e-97),
(3000, 3, 0.1, 0.0018657780826323328),
(0.98, -3.8, 15, 0.9999990264591945),
(9.8, 38, 15, 2.252076291604796e-09),
])
def test_nctdtrit(self, df, nc, x, expected_cdf):
assert_allclose(sp.nctdtrit(df, nc, expected_cdf), x, rtol=1e-10)
| TestNoncentralTFunctions |
python | sphinx-doc__sphinx | sphinx/errors.py | {
"start": 2437,
"end": 2567
} | class ____(SphinxError):
"""Incompatible Sphinx version error."""
category = 'Sphinx version error'
| VersionRequirementError |
python | pennersr__django-allauth | allauth/mfa/base/views.py | {
"start": 5818,
"end": 6494
} | class ____(FormView):
form_class = Form
template_name = "mfa/trust." + account_settings.TEMPLATE_EXTENSION
def form_valid(self, form):
do_trust = self.request.POST.get("action") == "trust"
stage = self.request._login_stage
response = stage.exit()
if do_trust:
trust_.trust_browser(self.request, stage.login.user, response)
return response
def get_context_data(self, **kwargs):
ret = super().get_context_data(**kwargs)
now = timezone.now()
ret["trust_from"] = now
ret["trust_until"] = now + app_settings.TRUST_COOKIE_AGE
return ret
trust = TrustView.as_view()
| TrustView |
python | readthedocs__readthedocs.org | readthedocs/projects/views/private.py | {
"start": 33391,
"end": 34224
} | class ____(IntegrationMixin, CreateView):
success_message = _("Integration created")
def form_valid(self, form):
self.object = form.save()
if self.object.has_sync:
attach_webhook(
project_pk=self.get_project().pk,
integration=self.object,
# TODO: Remove user_pk on the next release,
# it's used just to keep backward compatibility with the old task signature.
user_pk=None,
)
return HttpResponseRedirect(self.get_success_url())
def get_success_url(self):
return reverse(
"projects_integrations_detail",
kwargs={
"project_slug": self.get_project().slug,
"integration_pk": self.object.id,
},
)
| IntegrationCreate |
python | django__django | django/db/migrations/state.py | {
"start": 30279,
"end": 41978
} | class ____:
"""
Represent a Django Model. Don't use the actual Model class as it's not
designed to have its options changed - instead, mutate this one and then
render it into a Model as required.
Note that while you are allowed to mutate .fields, you are not allowed
to mutate the Field instances inside there themselves - you must instead
assign new ones, as these are not detached during a clone.
"""
def __init__(
self, app_label, name, fields, options=None, bases=None, managers=None
):
self.app_label = app_label
self.name = name
self.fields = dict(fields)
self.options = options or {}
self.options.setdefault("indexes", [])
self.options.setdefault("constraints", [])
self.bases = bases or (models.Model,)
self.managers = managers or []
for name, field in self.fields.items():
# Sanity-check that fields are NOT already bound to a model.
if hasattr(field, "model"):
raise ValueError(
'ModelState.fields cannot be bound to a model - "%s" is.' % name
)
# Ensure that relation fields are NOT referring to a model class.
if field.is_relation and hasattr(field.related_model, "_meta"):
raise ValueError(
'Model fields in "ModelState.fields" cannot refer to a model class '
f'- "{self.app_label}.{self.name}.{name}.to" does. Use a string '
"reference instead."
)
if field.many_to_many and hasattr(field.remote_field.through, "_meta"):
raise ValueError(
'Model fields in "ModelState.fields" cannot refer to a model class '
f'- "{self.app_label}.{self.name}.{name}.through" does. Use a '
"string reference instead."
)
# Sanity-check that indexes have their name set.
for index in self.options["indexes"]:
if not index.name:
raise ValueError(
"Indexes passed to ModelState require a name attribute. "
"%r doesn't have one." % index
)
@cached_property
def name_lower(self):
return self.name.lower()
def get_field(self, field_name):
if (
field_name == "_order"
and self.options.get("order_with_respect_to") is not None
):
field_name = self.options["order_with_respect_to"]
return self.fields[field_name]
@classmethod
def from_model(cls, model, exclude_rels=False):
"""Given a model, return a ModelState representing it."""
# Deconstruct the fields
fields = []
for field in model._meta.local_fields:
if getattr(field, "remote_field", None) and exclude_rels:
continue
if isinstance(field, models.OrderWrt):
continue
name = field.name
try:
fields.append((name, field.clone()))
except TypeError as e:
raise TypeError(
"Couldn't reconstruct field %s on %s: %s"
% (
name,
model._meta.label,
e,
)
)
if not exclude_rels:
for field in model._meta.local_many_to_many:
name = field.name
try:
fields.append((name, field.clone()))
except TypeError as e:
raise TypeError(
"Couldn't reconstruct m2m field %s on %s: %s"
% (
name,
model._meta.object_name,
e,
)
)
# Extract the options
options = {}
for name in DEFAULT_NAMES:
# Ignore some special options
if name in ["apps", "app_label"]:
continue
elif name in model._meta.original_attrs:
if name == "unique_together":
ut = model._meta.original_attrs["unique_together"]
options[name] = set(normalize_together(ut))
elif name == "indexes":
indexes = [idx.clone() for idx in model._meta.indexes]
for index in indexes:
if not index.name:
index.set_name_with_model(model)
options["indexes"] = indexes
elif name == "constraints":
options["constraints"] = [
con.clone() for con in model._meta.constraints
]
else:
options[name] = model._meta.original_attrs[name]
# If we're ignoring relationships, remove all field-listing model
# options (that option basically just means "make a stub model")
if exclude_rels:
for key in ["unique_together", "order_with_respect_to"]:
if key in options:
del options[key]
# Private fields are ignored, so remove options that refer to them.
elif options.get("order_with_respect_to") in {
field.name for field in model._meta.private_fields
}:
del options["order_with_respect_to"]
def flatten_bases(model):
bases = []
for base in model.__bases__:
if hasattr(base, "_meta") and base._meta.abstract:
bases.extend(flatten_bases(base))
else:
bases.append(base)
return bases
# We can't rely on __mro__ directly because we only want to flatten
# abstract models and not the whole tree. However by recursing on
# __bases__ we may end up with duplicates and ordering issues, we
# therefore discard any duplicates and reorder the bases according
# to their index in the MRO.
flattened_bases = sorted(
set(flatten_bases(model)), key=lambda x: model.__mro__.index(x)
)
# Make our record
bases = tuple(
(base._meta.label_lower if hasattr(base, "_meta") else base)
for base in flattened_bases
)
# Ensure at least one base inherits from models.Model
if not any(
(isinstance(base, str) or issubclass(base, models.Model)) for base in bases
):
bases = (models.Model,)
managers = []
manager_names = set()
default_manager_shim = None
for manager in model._meta.managers:
if manager.name in manager_names:
# Skip overridden managers.
continue
elif manager.use_in_migrations:
# Copy managers usable in migrations.
new_manager = copy.copy(manager)
new_manager._set_creation_counter()
elif manager is model._base_manager or manager is model._default_manager:
# Shim custom managers used as default and base managers.
new_manager = models.Manager()
new_manager.model = manager.model
new_manager.name = manager.name
if manager is model._default_manager:
default_manager_shim = new_manager
else:
continue
manager_names.add(manager.name)
managers.append((manager.name, new_manager))
# Ignore a shimmed default manager called objects if it's the only one.
if managers == [("objects", default_manager_shim)]:
managers = []
# Construct the new ModelState
return cls(
model._meta.app_label,
model._meta.object_name,
fields,
options,
bases,
managers,
)
def construct_managers(self):
"""Deep-clone the managers using deconstruction."""
# Sort all managers by their creation counter
sorted_managers = sorted(self.managers, key=lambda v: v[1].creation_counter)
for mgr_name, manager in sorted_managers:
as_manager, manager_path, qs_path, args, kwargs = manager.deconstruct()
if as_manager:
qs_class = import_string(qs_path)
yield mgr_name, qs_class.as_manager()
else:
manager_class = import_string(manager_path)
yield mgr_name, manager_class(*args, **kwargs)
def clone(self):
"""Return an exact copy of this ModelState."""
return self.__class__(
app_label=self.app_label,
name=self.name,
fields=dict(self.fields),
# Since options are shallow-copied here, operations such as
# AddIndex must replace their option (e.g 'indexes') rather
# than mutating it.
options=dict(self.options),
bases=self.bases,
managers=list(self.managers),
)
def render(self, apps):
"""Create a Model object from our current state into the given apps."""
# First, make a Meta object
meta_options = {**self.options}
# Prune index_together from options as it's no longer an allowed meta
# attribute.
meta_options.pop("index_together", None)
meta_contents = {"app_label": self.app_label, "apps": apps, **meta_options}
meta = type("Meta", (), meta_contents)
# Then, work out our bases
try:
bases = tuple(
(apps.get_model(base) if isinstance(base, str) else base)
for base in self.bases
if base != typing.Generic
)
except LookupError:
raise InvalidBasesError(
"Cannot resolve one or more bases from %r" % (self.bases,)
)
# Clone fields for the body, add other bits.
body = {name: field.clone() for name, field in self.fields.items()}
body["Meta"] = meta
body["__module__"] = "__fake__"
# Restore managers
body.update(self.construct_managers())
# Then, make a Model object (apps.register_model is called in __new__)
return type(self.name, bases, body)
def get_index_by_name(self, name):
for index in self.options["indexes"]:
if index.name == name:
return index
raise ValueError("No index named %s on model %s" % (name, self.name))
def get_constraint_by_name(self, name):
for constraint in self.options["constraints"]:
if constraint.name == name:
return constraint
raise ValueError("No constraint named %s on model %s" % (name, self.name))
def __repr__(self):
return "<%s: '%s.%s'>" % (self.__class__.__name__, self.app_label, self.name)
def __eq__(self, other):
return (
(self.app_label == other.app_label)
and (self.name == other.name)
and (len(self.fields) == len(other.fields))
and all(
k1 == k2 and f1.deconstruct()[1:] == f2.deconstruct()[1:]
for (k1, f1), (k2, f2) in zip(
sorted(self.fields.items()),
sorted(other.fields.items()),
)
)
and (self.options == other.options)
and (self.bases == other.bases)
and (self.managers == other.managers)
)
| ModelState |
python | rushter__MLAlgorithms | mla/neuralnet/regularizers.py | {
"start": 601,
"end": 795
} | class ____(Regularizer):
"""Linear combination of L1 and L2 penalties."""
def _penalty(self, weights):
return 0.5 * self.C * weights**2 + (1.0 - self.C) * np.abs(weights)
| ElasticNet |
python | bokeh__bokeh | tests/unit/bokeh/embed/test_standalone.py | {
"start": 14669,
"end": 17286
} | class ____:
def test_with_target_id(self, test_plot: figure) -> None:
out = bes.json_item(test_plot, target=ID("foo"))
assert set(out.keys()) == JSON_ITEMS_KEYS
assert out['target_id'] == "foo"
def test_without_target_id(self, test_plot: figure) -> None:
out = bes.json_item(test_plot)
assert set(out.keys()) == JSON_ITEMS_KEYS
assert out['target_id'] is None
def test_doc_json(self, test_plot: figure) -> None:
out = bes.json_item(test_plot, target=ID("foo"))
assert set(out.keys()) == JSON_ITEMS_KEYS
expected = next(iter(standalone_docs_json([test_plot]).values()))
assert out['doc'] == expected
def test_doc_title(self, test_plot: figure) -> None:
out = bes.json_item(test_plot, target=ID("foo"))
assert set(out.keys()) == JSON_ITEMS_KEYS
assert out['doc']['title'] == ""
def test_root_id(self, test_plot: figure) -> None:
out = bes.json_item(test_plot, target=ID("foo"))
assert set(out.keys()) == JSON_ITEMS_KEYS
assert out['doc']['roots'][0]["id"] == out['root_id']
def test_version(self, monkeypatch: pytest.MonkeyPatch, test_plot: figure) -> None:
from bokeh import __version__
out = bes.json_item(test_plot, target=ID("foo"))
assert set(out.keys()) == JSON_ITEMS_KEYS
assert out['doc']['version'] == __version__
out = bes.json_item(test_plot)
assert set(out.keys()) == JSON_ITEMS_KEYS
assert out['doc']['version'] == __version__
def test_json_dumps(self, test_plot: figure) -> None:
doc_json = bes.json_item(test_plot)
assert isinstance(json.dumps(doc_json), str)
@patch('bokeh.embed.standalone.OutputDocumentFor')
def test_apply_theme(self, mock_OFD: MagicMock, test_plot: figure) -> None:
# the subsequent call inside ODF will fail since the model was never
# added to a document. Ignoring that since we just want to make sure
# ODF is called with the expected theme arg.
theme = Theme(json={})
try:
bes.json_item(test_plot, theme=theme)
except ValueError:
pass
mock_OFD.assert_called_once_with([test_plot], apply_theme=theme)
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
| Test_json_item |
python | huggingface__transformers | src/transformers/models/sam3_tracker_video/modeling_sam3_tracker_video.py | {
"start": 43750,
"end": 44230
} | class ____(nn.Module):
def __init__(self, config: Sam3TrackerVideoConfig):
super().__init__()
self.layers = nn.ModuleList(
[Sam3TrackerVideoMemoryFuserCXBlock(config) for _ in range(config.memory_fuser_num_layers)]
)
def forward(self, hidden_states):
# normally hidden_states: (N, C, H, W)
for layer in self.layers:
hidden_states = layer(hidden_states)
return hidden_states
| Sam3TrackerVideoMemoryFuser |
python | facelessuser__pymdown-extensions | tests/test_extensions/test_arithmatex.py | {
"start": 1088,
"end": 3762
} | class ____(util.MdCase):
"""Test escaping cases for Arithmatex blocks."""
extension = [
'pymdownx.arithmatex'
]
extension_configs = {}
def test_escaped_dollar_block(self):
"""Test escaping a dollar."""
self.check_markdown(
r'''
$$3+2\$$
''',
r'''
<p>$<span class="arithmatex"><span class="MathJax_Preview">3+2\$</span><script type="math/tex">3+2\$</script></span></p>
''', # noqa: E501
True
)
def test_escaped_dollar_dollar_block(self):
"""Test escaping both dollars."""
self.check_markdown(
r'''
$$3+2\$\$
''',
r'''
<p>$$3+2$$</p>
''',
True
)
def test_double_escaped_dollar_block(self):
"""Test double escaping a dollar."""
self.check_markdown(
r'''
$$3+2\\$$
''',
r'''
<div class="arithmatex">
<div class="MathJax_Preview">3+2\\</div>
<script type="math/tex; mode=display">3+2\\</script>
</div>
''',
True
)
def test_escaped_end_block(self):
"""Test escaping an end."""
self.check_markdown(
r'''
\begin{align}3+2\\end{align}
''',
r'''
<p>\begin{align}3+2\end{align}</p>
''',
True
)
def test_double_escaped_end_block(self):
"""Test double escaping an end."""
self.check_markdown(
r'''
\begin{align}3+2\\\end{align}
''',
r'''
<div class="arithmatex">
<div class="MathJax_Preview">\begin{align}3+2\\\end{align}</div>
<script type="math/tex; mode=display">\begin{align}3+2\\\end{align}</script>
</div>
''',
True
)
def test_escaped_bracket_block(self):
"""Test escaping a bracket."""
self.check_markdown(
r'''
\[3+2\\]
''',
r'''
<p>[3+2\]</p>
''',
True
)
def test_double_escaped_bracket_block(self):
"""Test double escaping a bracket."""
self.check_markdown(
r'''
\[3+2\\\]
''',
r'''
<div class="arithmatex">
<div class="MathJax_Preview">3+2\\</div>
<script type="math/tex; mode=display">3+2\\</script>
</div>
''',
True
)
| TestArithmatexBlockEscapes |
python | pytorch__pytorch | torch/_dynamo/output_graph.py | {
"start": 5983,
"end": 6304
} | class ____:
vt_id: int
# Two different source can point to the same object. However, Dynamo handles
# globals and local source differently when it comes to guards and possibly
# some other parts as well. So, cache also relies on the source.
source: Source
@dataclass(frozen=True)
| VariableTrackerCacheKey |
python | miyuchina__mistletoe | mistletoe/markdown_renderer.py | {
"start": 1747,
"end": 2085
} | class ____:
"""
Markdown fragment. Used when rendering trees of span tokens into flat sequences.
May carry additional data in addition to the text.
Attributes:
text (str): markdown fragment.
"""
def __init__(self, text: str, **extras):
self.text = text
self.__dict__.update(extras)
| Fragment |
python | cython__cython | Cython/Compiler/Nodes.py | {
"start": 379841,
"end": 380034
} | class ____(TryFinallyStatNode):
"""
A try/finally statement that may be used in nogil code sections.
"""
preserve_exception = False
nogil_check = None
| NogilTryFinallyStatNode |
python | cherrypy__cherrypy | cherrypy/process/plugins.py | {
"start": 25887,
"end": 28168
} | class ____(SimplePlugin):
"""Manager for HTTP request threads.
If you have control over thread creation and destruction, publish to
the 'acquire_thread' and 'release_thread' channels (for each
thread). This will register/unregister the current thread and
publish to 'start_thread' and 'stop_thread' listeners in the bus as
needed.
If threads are created and destroyed by code you do not control
(e.g., Apache), then, at the beginning of every HTTP request,
publish to 'acquire_thread' only. You should not publish to
'release_thread' in this case, since you do not know whether the
thread will be re-used or not. The bus will call 'stop_thread'
listeners for you when it stops.
"""
threads = None
"""A map of {thread ident: index number} pairs."""
def __init__(self, bus):
"""Initialize the thread manager plugin."""
self.threads = {}
SimplePlugin.__init__(self, bus)
self.bus.listeners.setdefault('acquire_thread', set())
self.bus.listeners.setdefault('start_thread', set())
self.bus.listeners.setdefault('release_thread', set())
self.bus.listeners.setdefault('stop_thread', set())
def acquire_thread(self):
"""Run 'start_thread' listeners for the current thread.
If the current thread has already been seen, any 'start_thread'
listeners will not be run again.
"""
thread_ident = _thread.get_ident()
if thread_ident not in self.threads:
# We can't just use get_ident as the thread ID
# because some platforms reuse thread ID's.
i = len(self.threads) + 1
self.threads[thread_ident] = i
self.bus.publish('start_thread', i)
def release_thread(self):
"""Release the current thread and run 'stop_thread' listeners."""
thread_ident = _thread.get_ident()
i = self.threads.pop(thread_ident, None)
if i is not None:
self.bus.publish('stop_thread', i)
def stop(self):
"""Release all threads and run all 'stop_thread' listeners."""
for thread_ident, i in self.threads.items():
self.bus.publish('stop_thread', i)
self.threads.clear()
graceful = stop
| ThreadManager |
python | great-expectations__great_expectations | great_expectations/datasource/fluent/spark_google_cloud_storage_datasource.py | {
"start": 1174,
"end": 7188
} | class ____(_SparkFilePathDatasource):
"""
SparkGoogleCloudStorageDatasource is a subclass of SparkDatasource which connects to
Google Cloud Storage.
"""
# class attributes
data_connector_type: ClassVar[Type[GoogleCloudStorageDataConnector]] = (
GoogleCloudStorageDataConnector
)
# these fields should not be passed to the execution engine
_EXTRA_EXCLUDED_EXEC_ENG_ARGS: ClassVar[set] = {
"bucket_or_name",
"gcs_options",
"max_results",
}
# instance attributes
type: Literal["spark_gcs"] = "spark_gcs"
# Google Cloud Storage specific attributes
bucket_or_name: str
gcs_options: Dict[str, Union[ConfigStr, Any]] = {}
# on 3.11 the annotation must be type-checking import otherwise it will fail at import time
_gcs_client: Union[Client, None] = pydantic.PrivateAttr(default=None)
def _get_gcs_client(self) -> google.Client:
gcs_client: Union[google.Client, None] = self._gcs_client
if not gcs_client:
# Validate that "google" libararies were successfully imported and attempt to create "gcs_client" handle. # noqa: E501 # FIXME CoP
if google.service_account and google.storage:
try:
credentials: Union[google.Client, None] = (
None # If configured with gcloud CLI / env vars
)
_check_config_substitutions_needed(
self,
self.gcs_options,
raise_warning_if_provider_not_present=True,
)
# pull in needed config substitutions using the `_config_provider`
# The `FluentBaseModel.dict()` call will do the config substitution on the serialized dict if a `config_provider` is passed # noqa: E501 # FIXME CoP
gcs_options: dict = self.dict(config_provider=self._config_provider).get(
"gcs_options", {}
)
if "filename" in gcs_options:
filename: str = gcs_options.pop("filename")
credentials = google.service_account.Credentials.from_service_account_file(
filename=filename
)
elif "info" in gcs_options:
info: Any = gcs_options.pop("info")
credentials = google.service_account.Credentials.from_service_account_info(
info=info
)
gcs_client = google.storage.Client(credentials=credentials, **gcs_options)
except Exception as e:
# Failure to create "gcs_client" is most likely due invalid "gcs_options" dictionary. # noqa: E501 # FIXME CoP
raise SparkGoogleCloudStorageDatasourceError( # noqa: TRY003 # FIXME CoP
f'Due to exception: "{e!r}", "gcs_client" could not be created.'
) from e
else:
raise SparkGoogleCloudStorageDatasourceError( # noqa: TRY003 # FIXME CoP
'Unable to create "SparkGoogleCloudStorageDatasource" due to missing google dependency.' # noqa: E501 # FIXME CoP
)
self._gcs_client = gcs_client
return gcs_client
@override
def test_connection(self, test_assets: bool = True) -> None:
"""Test the connection for the SparkGoogleCloudStorageDatasource.
Args:
test_assets: If assets have been passed to the SparkGoogleCloudStorageDatasource, whether to test them as well.
Raises:
TestConnectionError: If the connection test fails.
""" # noqa: E501 # FIXME CoP
try:
# tests GCS connection
_ = self._get_gcs_client()
except Exception as e:
raise TestConnectionError( # noqa: TRY003 # FIXME CoP
f"Attempt to connect to datasource failed with the following error message: {e!s}"
) from e
# tests Spark connection, raising TestConnectionError
super().test_connection()
if self.assets and test_assets:
for asset in self.assets:
asset.test_connection()
@override
def _build_data_connector(
self,
data_asset: SPARK_PATH_ASSET_UNION,
gcs_prefix: str = "",
gcs_delimiter: str = "/",
gcs_max_results: int = 1000,
gcs_recursive_file_discovery: bool = False,
**kwargs,
) -> None:
"""Builds and attaches the `GoogleCloudStorageDataConnector` to the asset."""
if kwargs:
raise TypeError( # noqa: TRY003 # FIXME CoP
f"_build_data_connector() got unexpected keyword arguments {list(kwargs.keys())}"
)
data_asset._data_connector = self.data_connector_type.build_data_connector(
datasource_name=self.name,
data_asset_name=data_asset.name,
gcs_client=self._get_gcs_client(),
bucket_or_name=self.bucket_or_name,
prefix=gcs_prefix,
delimiter=gcs_delimiter,
max_results=gcs_max_results,
recursive_file_discovery=gcs_recursive_file_discovery,
file_path_template_map_fn=GCSUrl.OBJECT_URL_TEMPLATE.format,
whole_directory_path_override=data_asset.get_whole_directory_path_override(),
)
# build a more specific `_test_connection_error_message`
data_asset._test_connection_error_message = (
self.data_connector_type.build_test_connection_error_message(
data_asset_name=data_asset.name,
bucket_or_name=self.bucket_or_name,
prefix=gcs_prefix,
delimiter=gcs_delimiter,
recursive_file_discovery=gcs_recursive_file_discovery,
)
)
| SparkGoogleCloudStorageDatasource |
python | fsspec__filesystem_spec | fsspec/caching.py | {
"start": 9816,
"end": 11441
} | class ____(BaseCache):
"""Caches the first block of a file only
This may be useful for file types where the metadata is stored in the header,
but is randomly accessed.
"""
name = "first"
def __init__(self, blocksize: int, fetcher: Fetcher, size: int) -> None:
if blocksize > size:
# this will buffer the whole thing
blocksize = size
super().__init__(blocksize, fetcher, size)
self.cache: bytes | None = None
def _fetch(self, start: int | None, end: int | None) -> bytes:
start = start or 0
if start > self.size:
logger.debug("FirstChunkCache: requested start > file size")
return b""
end = min(end, self.size)
if start < self.blocksize:
if self.cache is None:
self.miss_count += 1
if end > self.blocksize:
self.total_requested_bytes += end
data = self.fetcher(0, end)
self.cache = data[: self.blocksize]
return data[start:]
self.cache = self.fetcher(0, self.blocksize)
self.total_requested_bytes += self.blocksize
part = self.cache[start:end]
if end > self.blocksize:
self.total_requested_bytes += end - self.blocksize
part += self.fetcher(self.blocksize, end)
self.hit_count += 1
return part
else:
self.miss_count += 1
self.total_requested_bytes += end - start
return self.fetcher(start, end)
| FirstChunkCache |
python | getsentry__sentry | src/sentry/codecov/endpoints/test_results_aggregates/serializers.py | {
"start": 113,
"end": 1900
} | class ____(serializers.Serializer):
"""
Serializer for test results aggregates response
"""
__test__ = False
totalDuration = serializers.FloatField()
totalDurationPercentChange = serializers.FloatField()
slowestTestsDuration = serializers.FloatField()
slowestTestsDurationPercentChange = serializers.FloatField()
totalSlowTests = serializers.IntegerField()
totalSlowTestsPercentChange = serializers.FloatField()
totalFails = serializers.IntegerField()
totalFailsPercentChange = serializers.FloatField()
totalSkips = serializers.IntegerField()
totalSkipsPercentChange = serializers.FloatField()
flakeCount = serializers.IntegerField()
flakeCountPercentChange = serializers.FloatField()
flakeRate = serializers.FloatField()
flakeRatePercentChange = serializers.FloatField()
def to_representation(self, instance):
"""
Transform the GraphQL response to the serialized format
"""
try:
test_analytics = instance["data"]["owner"]["repository"]["testAnalytics"]
response_data = test_analytics["testResultsAggregates"]
response_data.update(test_analytics["flakeAggregates"])
return super().to_representation(response_data)
except (KeyError, TypeError) as e:
sentry_sdk.capture_exception(e)
logger.exception(
"Error parsing GraphQL response",
extra={
"error": str(e),
"endpoint": "test-results-aggregates",
"response_keys": (
list(instance.keys()) if isinstance(instance, dict) else None
),
},
)
raise
| TestResultAggregatesSerializer |
python | great-expectations__great_expectations | contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_to_be_valid_iso_country.py | {
"start": 1653,
"end": 4033
} | class ____(ColumnMapExpectation):
"""Expect column values to be valid country codes according to ISO 3166."""
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"data": {
"well_formed_country": [
"US",
"us",
"USA",
"United States",
"840",
"United States of America",
],
"malformed_country": [
"",
"42",
"country",
"ZZ",
"United State",
"This is not a country",
],
},
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "well_formed_country"},
"out": {"success": True},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "malformed_country"},
"out": {"success": False},
},
],
}
]
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.valid_iso_country"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
success_keys = ("mostly",)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {}
# This object contains metadata for display in the public Gallery
library_metadata = {
"maturity": "experimental",
"tags": ["experimental", "hackathon", "typed-entities"],
"contributors": [
"@voidforall",
],
"requirements": ["pycountry"],
}
if __name__ == "__main__":
ExpectColumnValuesToBeValidIsoCountry().print_diagnostic_checklist()
| ExpectColumnValuesToBeValidIsoCountry |
python | ray-project__ray | rllib/algorithms/sac/sac.py | {
"start": 1208,
"end": 26844
} | class ____(AlgorithmConfig):
"""Defines a configuration class from which an SAC Algorithm can be built.
.. testcode::
config = (
SACConfig()
.environment("Pendulum-v1")
.env_runners(num_env_runners=1)
.training(
gamma=0.9,
actor_lr=0.001,
critic_lr=0.002,
train_batch_size_per_learner=32,
)
)
# Build the SAC algo object from the config and run 1 training iteration.
algo = config.build()
algo.train()
"""
def __init__(self, algo_class=None):
self.exploration_config = {
# The Exploration class to use. In the simplest case, this is the name
# (str) of any class present in the `rllib.utils.exploration` package.
# You can also provide the python class directly or the full location
# of your class (e.g. "ray.rllib.utils.exploration.epsilon_greedy.
# EpsilonGreedy").
"type": "StochasticSampling",
# Add constructor kwargs here (if any).
}
super().__init__(algo_class=algo_class or SAC)
# fmt: off
# __sphinx_doc_begin__
# SAC-specific config settings.
# `.training()`
self.twin_q = True
self.q_model_config = {
"fcnet_hiddens": [256, 256],
"fcnet_activation": "relu",
"post_fcnet_hiddens": [],
"post_fcnet_activation": None,
"custom_model": None, # Use this to define custom Q-model(s).
"custom_model_config": {},
}
self.policy_model_config = {
"fcnet_hiddens": [256, 256],
"fcnet_activation": "relu",
"post_fcnet_hiddens": [],
"post_fcnet_activation": None,
"custom_model": None, # Use this to define a custom policy model.
"custom_model_config": {},
}
self.clip_actions = False
self.tau = 5e-3
self.initial_alpha = 1.0
self.target_entropy = "auto"
self.n_step = 1
# Replay buffer configuration.
self.replay_buffer_config = {
"type": "PrioritizedEpisodeReplayBuffer",
# Size of the replay buffer. Note that if async_updates is set,
# then each worker will have a replay buffer of this size.
"capacity": int(1e6),
"alpha": 0.6,
# Beta parameter for sampling from prioritized replay buffer.
"beta": 0.4,
}
self.store_buffer_in_checkpoints = False
self.training_intensity = None
self.optimization = {
"actor_learning_rate": 3e-4,
"critic_learning_rate": 3e-4,
"entropy_learning_rate": 3e-4,
}
self.actor_lr = 3e-5
self.critic_lr = 3e-4
self.alpha_lr = 3e-4
# Set `lr` parameter to `None` and ensure it is not used.
self.lr = None
self.grad_clip = None
self.target_network_update_freq = 0
# .env_runners()
# Set to `self.n_step`, if 'auto'.
self.rollout_fragment_length = "auto"
# .training()
self.train_batch_size_per_learner = 256
self.train_batch_size = 256 # @OldAPIstack
self.num_steps_sampled_before_learning_starts = 1500
# .reporting()
self.min_time_s_per_iteration = 1
self.min_sample_timesteps_per_iteration = 100
# __sphinx_doc_end__
# fmt: on
self._deterministic_loss = False
self._use_beta_distribution = False
self.use_state_preprocessor = DEPRECATED_VALUE
self.worker_side_prioritization = DEPRECATED_VALUE
@override(AlgorithmConfig)
def training(
self,
*,
twin_q: Optional[bool] = NotProvided,
q_model_config: Optional[Dict[str, Any]] = NotProvided,
policy_model_config: Optional[Dict[str, Any]] = NotProvided,
tau: Optional[float] = NotProvided,
initial_alpha: Optional[float] = NotProvided,
target_entropy: Optional[Union[str, float]] = NotProvided,
n_step: Optional[Union[int, Tuple[int, int]]] = NotProvided,
store_buffer_in_checkpoints: Optional[bool] = NotProvided,
replay_buffer_config: Optional[Dict[str, Any]] = NotProvided,
training_intensity: Optional[float] = NotProvided,
clip_actions: Optional[bool] = NotProvided,
grad_clip: Optional[float] = NotProvided,
optimization_config: Optional[Dict[str, Any]] = NotProvided,
actor_lr: Optional[LearningRateOrSchedule] = NotProvided,
critic_lr: Optional[LearningRateOrSchedule] = NotProvided,
alpha_lr: Optional[LearningRateOrSchedule] = NotProvided,
target_network_update_freq: Optional[int] = NotProvided,
_deterministic_loss: Optional[bool] = NotProvided,
_use_beta_distribution: Optional[bool] = NotProvided,
num_steps_sampled_before_learning_starts: Optional[int] = NotProvided,
**kwargs,
) -> Self:
"""Sets the training related configuration.
Args:
twin_q: Use two Q-networks (instead of one) for action-value estimation.
Note: Each Q-network will have its own target network.
q_model_config: Model configs for the Q network(s). These will override
MODEL_DEFAULTS. This is treated just as the top-level `model` dict in
setting up the Q-network(s) (2 if twin_q=True).
That means, you can do for different observation spaces:
`obs=Box(1D)` -> `Tuple(Box(1D) + Action)` -> `concat` -> `post_fcnet`
obs=Box(3D) -> Tuple(Box(3D) + Action) -> vision-net -> concat w/ action
-> post_fcnet
obs=Tuple(Box(1D), Box(3D)) -> Tuple(Box(1D), Box(3D), Action)
-> vision-net -> concat w/ Box(1D) and action -> post_fcnet
You can also have SAC use your custom_model as Q-model(s), by simply
specifying the `custom_model` sub-key in below dict (just like you would
do in the top-level `model` dict.
policy_model_config: Model options for the policy function (see
`q_model_config` above for details). The difference to `q_model_config`
above is that no action concat'ing is performed before the post_fcnet
stack.
tau: Update the target by \tau * policy + (1-\tau) * target_policy.
initial_alpha: Initial value to use for the entropy weight alpha.
target_entropy: Target entropy lower bound. If "auto", will be set
to `-|A|` (e.g. -2.0 for Discrete(2), -3.0 for Box(shape=(3,))).
This is the inverse of reward scale, and will be optimized
automatically.
n_step: N-step target updates. If >1, sars' tuples in trajectories will be
postprocessed to become sa[discounted sum of R][s t+n] tuples. An
integer will be interpreted as a fixed n-step value. If a tuple of 2
ints is provided here, the n-step value will be drawn for each sample(!)
in the train batch from a uniform distribution over the closed interval
defined by `[n_step[0], n_step[1]]`.
store_buffer_in_checkpoints: Set this to True, if you want the contents of
your buffer(s) to be stored in any saved checkpoints as well.
Warnings will be created if:
- This is True AND restoring from a checkpoint that contains no buffer
data.
- This is False AND restoring from a checkpoint that does contain
buffer data.
replay_buffer_config: Replay buffer config.
Examples:
{
"_enable_replay_buffer_api": True,
"type": "MultiAgentReplayBuffer",
"capacity": 50000,
"replay_batch_size": 32,
"replay_sequence_length": 1,
}
- OR -
{
"_enable_replay_buffer_api": True,
"type": "MultiAgentPrioritizedReplayBuffer",
"capacity": 50000,
"prioritized_replay_alpha": 0.6,
"prioritized_replay_beta": 0.4,
"prioritized_replay_eps": 1e-6,
"replay_sequence_length": 1,
}
- Where -
prioritized_replay_alpha: Alpha parameter controls the degree of
prioritization in the buffer. In other words, when a buffer sample has
a higher temporal-difference error, with how much more probability
should it drawn to use to update the parametrized Q-network. 0.0
corresponds to uniform probability. Setting much above 1.0 may quickly
result as the sampling distribution could become heavily “pointy” with
low entropy.
prioritized_replay_beta: Beta parameter controls the degree of
importance sampling which suppresses the influence of gradient updates
from samples that have higher probability of being sampled via alpha
parameter and the temporal-difference error.
prioritized_replay_eps: Epsilon parameter sets the baseline probability
for sampling so that when the temporal-difference error of a sample is
zero, there is still a chance of drawing the sample.
training_intensity: The intensity with which to update the model (vs
collecting samples from the env).
If None, uses "natural" values of:
`train_batch_size` / (`rollout_fragment_length` x `num_env_runners` x
`num_envs_per_env_runner`).
If not None, will make sure that the ratio between timesteps inserted
into and sampled from th buffer matches the given values.
Example:
training_intensity=1000.0
train_batch_size=250
rollout_fragment_length=1
num_env_runners=1 (or 0)
num_envs_per_env_runner=1
-> natural value = 250 / 1 = 250.0
-> will make sure that replay+train op will be executed 4x asoften as
rollout+insert op (4 * 250 = 1000).
See: rllib/algorithms/dqn/dqn.py::calculate_rr_weights for further
details.
clip_actions: Whether to clip actions. If actions are already normalized,
this should be set to False.
grad_clip: If not None, clip gradients during optimization at this value.
optimization_config: Config dict for optimization. Set the supported keys
`actor_learning_rate`, `critic_learning_rate`, and
`entropy_learning_rate` in here.
actor_lr: The learning rate (float) or learning rate schedule for the
policy in the format of
[[timestep, lr-value], [timestep, lr-value], ...] In case of a
schedule, intermediary timesteps will be assigned to linearly
interpolated learning rate values. A schedule config's first entry
must start with timestep 0, i.e.: [[0, initial_value], [...]].
Note: It is common practice (two-timescale approach) to use a smaller
learning rate for the policy than for the critic to ensure that the
critic gives adequate values for improving the policy.
Note: If you require a) more than one optimizer (per RLModule),
b) optimizer types that are not Adam, c) a learning rate schedule that
is not a linearly interpolated, piecewise schedule as described above,
or d) specifying c'tor arguments of the optimizer that are not the
learning rate (e.g. Adam's epsilon), then you must override your
Learner's `configure_optimizer_for_module()` method and handle
lr-scheduling yourself.
The default value is 3e-5, one decimal less than the respective
learning rate of the critic (see `critic_lr`).
critic_lr: The learning rate (float) or learning rate schedule for the
critic in the format of
[[timestep, lr-value], [timestep, lr-value], ...] In case of a
schedule, intermediary timesteps will be assigned to linearly
interpolated learning rate values. A schedule config's first entry
must start with timestep 0, i.e.: [[0, initial_value], [...]].
Note: It is common practice (two-timescale approach) to use a smaller
learning rate for the policy than for the critic to ensure that the
critic gives adequate values for improving the policy.
Note: If you require a) more than one optimizer (per RLModule),
b) optimizer types that are not Adam, c) a learning rate schedule that
is not a linearly interpolated, piecewise schedule as described above,
or d) specifying c'tor arguments of the optimizer that are not the
learning rate (e.g. Adam's epsilon), then you must override your
Learner's `configure_optimizer_for_module()` method and handle
lr-scheduling yourself.
The default value is 3e-4, one decimal higher than the respective
learning rate of the actor (policy) (see `actor_lr`).
alpha_lr: The learning rate (float) or learning rate schedule for the
hyperparameter alpha in the format of
[[timestep, lr-value], [timestep, lr-value], ...] In case of a
schedule, intermediary timesteps will be assigned to linearly
interpolated learning rate values. A schedule config's first entry
must start with timestep 0, i.e.: [[0, initial_value], [...]].
Note: If you require a) more than one optimizer (per RLModule),
b) optimizer types that are not Adam, c) a learning rate schedule that
is not a linearly interpolated, piecewise schedule as described above,
or d) specifying c'tor arguments of the optimizer that are not the
learning rate (e.g. Adam's epsilon), then you must override your
Learner's `configure_optimizer_for_module()` method and handle
lr-scheduling yourself.
The default value is 3e-4, identical to the critic learning rate (`lr`).
target_network_update_freq: Update the target network every
`target_network_update_freq` steps.
num_steps_sampled_before_learning_starts: Number of timesteps (int)
that we collect from the runners before we start sampling the
replay buffers for learning. Whether we count this in agent steps
or environment steps depends on the value of
`config.multi_agent(count_steps_by=...)`.
_deterministic_loss: Whether the loss should be calculated deterministically
(w/o the stochastic action sampling step). True only useful for
continuous actions and for debugging.
_use_beta_distribution: Use a Beta-distribution instead of a
`SquashedGaussian` for bounded, continuous action spaces (not
recommended; for debugging only).
Returns:
This updated AlgorithmConfig object.
"""
# Pass kwargs onto super's `training()` method.
super().training(**kwargs)
if twin_q is not NotProvided:
self.twin_q = twin_q
if q_model_config is not NotProvided:
self.q_model_config.update(q_model_config)
if policy_model_config is not NotProvided:
self.policy_model_config.update(policy_model_config)
if tau is not NotProvided:
self.tau = tau
if initial_alpha is not NotProvided:
self.initial_alpha = initial_alpha
if target_entropy is not NotProvided:
self.target_entropy = target_entropy
if n_step is not NotProvided:
self.n_step = n_step
if store_buffer_in_checkpoints is not NotProvided:
self.store_buffer_in_checkpoints = store_buffer_in_checkpoints
if replay_buffer_config is not NotProvided:
# Override entire `replay_buffer_config` if `type` key changes.
# Update, if `type` key remains the same or is not specified.
new_replay_buffer_config = deep_update(
{"replay_buffer_config": self.replay_buffer_config},
{"replay_buffer_config": replay_buffer_config},
False,
["replay_buffer_config"],
["replay_buffer_config"],
)
self.replay_buffer_config = new_replay_buffer_config["replay_buffer_config"]
if training_intensity is not NotProvided:
self.training_intensity = training_intensity
if clip_actions is not NotProvided:
self.clip_actions = clip_actions
if grad_clip is not NotProvided:
self.grad_clip = grad_clip
if optimization_config is not NotProvided:
self.optimization = optimization_config
if actor_lr is not NotProvided:
self.actor_lr = actor_lr
if critic_lr is not NotProvided:
self.critic_lr = critic_lr
if alpha_lr is not NotProvided:
self.alpha_lr = alpha_lr
if target_network_update_freq is not NotProvided:
self.target_network_update_freq = target_network_update_freq
if _deterministic_loss is not NotProvided:
self._deterministic_loss = _deterministic_loss
if _use_beta_distribution is not NotProvided:
self._use_beta_distribution = _use_beta_distribution
if num_steps_sampled_before_learning_starts is not NotProvided:
self.num_steps_sampled_before_learning_starts = (
num_steps_sampled_before_learning_starts
)
return self
@override(AlgorithmConfig)
def validate(self) -> None:
# Call super's validation method.
super().validate()
# Check rollout_fragment_length to be compatible with n_step.
if isinstance(self.n_step, tuple):
min_rollout_fragment_length = self.n_step[1]
else:
min_rollout_fragment_length = self.n_step
if (
not self.in_evaluation
and self.rollout_fragment_length != "auto"
and self.rollout_fragment_length
< min_rollout_fragment_length # (self.n_step or 1)
):
raise ValueError(
f"Your `rollout_fragment_length` ({self.rollout_fragment_length}) is "
f"smaller than needed for `n_step` ({self.n_step})! If `n_step` is "
f"an integer try setting `rollout_fragment_length={self.n_step}`. If "
"`n_step` is a tuple, try setting "
f"`rollout_fragment_length={self.n_step[1]}`."
)
if self.use_state_preprocessor != DEPRECATED_VALUE:
deprecation_warning(
old="config['use_state_preprocessor']",
error=False,
)
self.use_state_preprocessor = DEPRECATED_VALUE
if self.grad_clip is not None and self.grad_clip <= 0.0:
raise ValueError("`grad_clip` value must be > 0.0!")
if self.framework in ["tf", "tf2"] and tfp is None:
logger.warning(
"You need `tensorflow_probability` in order to run SAC! "
"Install it via `pip install tensorflow_probability`. Your "
f"tf.__version__={tf.__version__ if tf else None}."
"Trying to import tfp results in the following error:"
)
try_import_tfp(error=True)
# Validate that we use the corresponding `EpisodeReplayBuffer` when using
# episodes.
if (
self.enable_env_runner_and_connector_v2
and self.replay_buffer_config["type"]
not in [
"EpisodeReplayBuffer",
"PrioritizedEpisodeReplayBuffer",
"MultiAgentEpisodeReplayBuffer",
"MultiAgentPrioritizedEpisodeReplayBuffer",
]
and not (
# TODO (simon): Set up an indicator `is_offline_new_stack` that
# includes all these variable checks.
self.input_
and (
isinstance(self.input_, str)
or (
isinstance(self.input_, list)
and isinstance(self.input_[0], str)
)
)
and self.input_ != "sampler"
and self.enable_rl_module_and_learner
)
):
raise ValueError(
"When using the new `EnvRunner API` the replay buffer must be of type "
"`EpisodeReplayBuffer`."
)
elif not self.enable_env_runner_and_connector_v2 and (
(
isinstance(self.replay_buffer_config["type"], str)
and "Episode" in self.replay_buffer_config["type"]
)
or (
isinstance(self.replay_buffer_config["type"], type)
and issubclass(self.replay_buffer_config["type"], EpisodeReplayBuffer)
)
):
raise ValueError(
"When using the old API stack the replay buffer must not be of type "
"`EpisodeReplayBuffer`! We suggest you use the following config to run "
"SAC on the old API stack: `config.training(replay_buffer_config={"
"'type': 'MultiAgentPrioritizedReplayBuffer', "
"'prioritized_replay_alpha': [alpha], "
"'prioritized_replay_beta': [beta], "
"'prioritized_replay_eps': [eps], "
"})`."
)
if self.enable_rl_module_and_learner:
if self.lr is not None:
raise ValueError(
"Basic learning rate parameter `lr` is not `None`. For SAC "
"use the specific learning rate parameters `actor_lr`, `critic_lr` "
"and `alpha_lr`, for the actor, critic, and the hyperparameter "
"`alpha`, respectively and set `config.lr` to None."
)
# Warn about new API stack on by default.
logger.warning(
"You are running SAC on the new API stack! This is the new default "
"behavior for this algorithm. If you don't want to use the new API "
"stack, set `config.api_stack(enable_rl_module_and_learner=False, "
"enable_env_runner_and_connector_v2=False)`. For a detailed "
"migration guide, see here: https://docs.ray.io/en/master/rllib/new-api-stack-migration-guide.html" # noqa
)
@override(AlgorithmConfig)
def get_rollout_fragment_length(self, worker_index: int = 0) -> int:
if self.rollout_fragment_length == "auto":
return (
self.n_step[1]
if isinstance(self.n_step, (tuple, list))
else self.n_step
)
else:
return self.rollout_fragment_length
@override(AlgorithmConfig)
def get_default_rl_module_spec(self) -> RLModuleSpecType:
if self.framework_str == "torch":
from ray.rllib.algorithms.sac.torch.default_sac_torch_rl_module import (
DefaultSACTorchRLModule,
)
return RLModuleSpec(module_class=DefaultSACTorchRLModule)
else:
raise ValueError(
f"The framework {self.framework_str} is not supported. Use `torch`."
)
@override(AlgorithmConfig)
def get_default_learner_class(self) -> Union[Type["Learner"], str]:
if self.framework_str == "torch":
from ray.rllib.algorithms.sac.torch.sac_torch_learner import SACTorchLearner
return SACTorchLearner
else:
raise ValueError(
f"The framework {self.framework_str} is not supported. Use `torch`."
)
@override(AlgorithmConfig)
def build_learner_connector(
self,
input_observation_space,
input_action_space,
device=None,
):
pipeline = super().build_learner_connector(
input_observation_space=input_observation_space,
input_action_space=input_action_space,
device=device,
)
# Prepend the "add-NEXT_OBS-from-episodes-to-train-batch" connector piece (right
# after the corresponding "add-OBS-..." default piece).
pipeline.insert_after(
AddObservationsFromEpisodesToBatch,
AddNextObservationsFromEpisodesToTrainBatch(),
)
return pipeline
@property
def _model_config_auto_includes(self):
return super()._model_config_auto_includes | {"twin_q": self.twin_q}
| SACConfig |
python | allegroai__clearml | examples/frameworks/tensorflow/tensorflow_mnist.py | {
"start": 1052,
"end": 4999
} | class ____(Model):
def __init__(self):
super(MyModel, self).__init__()
self.conv1 = Conv2D(32, 3, activation='relu', dtype=tf.float32)
self.flatten = Flatten()
self.d1 = Dense(128, activation='relu', dtype=tf.float32)
self.d2 = Dense(10, activation='softmax', dtype=tf.float32)
def call(self, x):
x = self.conv1(x)
x = self.flatten(x)
x = self.d1(x)
return self.d2(x)
# Create an instance of the model
model = MyModel()
# Choose an optimizer and loss function for training
loss_object = tf.keras.losses.SparseCategoricalCrossentropy()
optimizer = tf.keras.optimizers.Adam()
# Select metrics to measure the loss and the accuracy of the model.
# These metrics accumulate the values over epochs and then print the overall result.
train_loss = tf.keras.metrics.Mean(name='train_loss', dtype=tf.float32)
train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='train_accuracy')
test_loss = tf.keras.metrics.Mean(name='test_loss', dtype=tf.float32)
test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='test_accuracy')
# Use tf.GradientTape to train the model
@tf.function
def train_step(images, labels):
with tf.GradientTape() as tape:
predictions = model(images)
loss = loss_object(labels, predictions)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
train_loss(loss)
train_accuracy(labels, predictions)
# Test the model
@tf.function
def test_step(images, labels):
predictions = model(images)
t_loss = loss_object(labels, predictions)
test_loss(t_loss)
test_accuracy(labels, predictions)
# Set up summary writers to write the summaries to disk in a different logs directory
train_log_dir = os.path.join(gettempdir(), 'logs', 'gradient_tape', 'train')
test_log_dir = os.path.join(gettempdir(), 'logs', 'gradient_tape', 'test')
train_summary_writer = tf.summary.create_file_writer(train_log_dir)
test_summary_writer = tf.summary.create_file_writer(test_log_dir)
# Set up checkpoints manager
ckpt = tf.train.Checkpoint(step=tf.Variable(1), net=model)
manager = tf.train.CheckpointManager(ckpt, os.path.join(gettempdir(), 'tf_ckpts'), max_to_keep=3)
ckpt.restore(manager.latest_checkpoint)
if manager.latest_checkpoint:
print("Restored from {}".format(manager.latest_checkpoint))
else:
print("Initializing from scratch.")
# Start training
EPOCHS = 5
for epoch in range(EPOCHS):
for images, labels in train_ds:
train_step(images, labels)
with train_summary_writer.as_default():
tf.summary.scalar('loss', train_loss.result(), step=epoch)
tf.summary.scalar('accuracy', train_accuracy.result(), step=epoch)
ckpt.step.assign_add(1)
if int(ckpt.step) % 1 == 0:
save_path = manager.save()
print("Saved checkpoint for step {}: {}".format(int(ckpt.step), save_path))
for test_images, test_labels in test_ds:
test_step(test_images, test_labels)
with test_summary_writer.as_default():
tf.summary.scalar('loss', test_loss.result(), step=epoch)
tf.summary.scalar('accuracy', test_accuracy.result(), step=epoch)
template = 'Epoch {}, Loss: {}, Accuracy: {}, Test Loss: {}, Test Accuracy: {}'
print(template.format(epoch+1,
train_loss.result(),
train_accuracy.result()*100,
test_loss.result(),
test_accuracy.result()*100))
# Reset the metrics for the next epoch
try:
train_loss.reset_states()
train_accuracy.reset_states()
test_loss.reset_states()
test_accuracy.reset_states()
except AttributeError:
train_loss.reset_state()
train_accuracy.reset_state()
test_loss.reset_state()
test_accuracy.reset_state()
| MyModel |
python | django__django | tests/model_formsets_regress/tests.py | {
"start": 13000,
"end": 14947
} | class ____(TestCase):
"""
Regression for #13095 and #17683: Using base forms with widgets
defined in Meta should not raise errors and BaseModelForm should respect
the specified pk widget.
"""
def test_inlineformset_factory_default(self):
Formset = inlineformset_factory(
User, UserSite, form=UserSiteForm, fields="__all__"
)
form = Formset().forms[0]
self.assertIsInstance(form["id"].field.widget, CustomWidget)
self.assertIsInstance(form["data"].field.widget, CustomWidget)
self.assertFalse(form.fields["id"].localize)
self.assertTrue(form.fields["data"].localize)
def test_modelformset_factory_default(self):
Formset = modelformset_factory(UserSite, form=UserSiteForm)
form = Formset().forms[0]
self.assertIsInstance(form["id"].field.widget, CustomWidget)
self.assertIsInstance(form["data"].field.widget, CustomWidget)
self.assertFalse(form.fields["id"].localize)
self.assertTrue(form.fields["data"].localize)
def assertCallbackCalled(self, callback):
id_field, user_field, data_field = UserSite._meta.fields
expected_log = [
(id_field, {"widget": CustomWidget}),
(user_field, {}),
(data_field, {"widget": CustomWidget, "localize": True}),
]
self.assertEqual(callback.log, expected_log)
def test_inlineformset_custom_callback(self):
callback = Callback()
inlineformset_factory(
User,
UserSite,
form=UserSiteForm,
formfield_callback=callback,
fields="__all__",
)
self.assertCallbackCalled(callback)
def test_modelformset_custom_callback(self):
callback = Callback()
modelformset_factory(UserSite, form=UserSiteForm, formfield_callback=callback)
self.assertCallbackCalled(callback)
| FormfieldCallbackTests |
python | getsentry__sentry | tests/snuba/api/endpoints/test_organization_events_trace.py | {
"start": 23605,
"end": 55550
} | class ____(OrganizationEventsTraceEndpointBase):
url_name = "sentry-api-0-organization-events-trace"
check_generation = True
def assert_event(self, result, event_data, message):
assert result["transaction"] == event_data.transaction, message
assert result["event_id"] == event_data.event_id
assert result["start_timestamp"] == event_data.data["start_timestamp"]
assert result["profile_id"] == event_data.data["contexts"]["profile"]["profile_id"]
def assert_trace_data(self, root, gen2_no_children=True):
"""see the setUp docstring for an idea of what the response structure looks like"""
self.assert_event(root, self.root_event, "root")
assert root["parent_event_id"] is None
assert root["parent_span_id"] is None
if self.check_generation:
assert root["generation"] == 0
assert root["transaction.duration"] == 3000
assert root["sdk_name"] == "sentry.test.sdk"
assert len(root["children"]) == 3
self.assert_performance_issues(root)
for i, gen1 in enumerate(root["children"]):
self.assert_event(gen1, self.gen1_events[i], f"gen1_{i}")
assert gen1["parent_event_id"] == self.root_event.event_id
assert gen1["parent_span_id"] == self.root_span_ids[i]
if self.check_generation:
assert gen1["generation"] == 1
assert gen1["transaction.duration"] == 2000
assert gen1["sdk_name"] == "sentry.test.sdk"
assert len(gen1["children"]) == 1
gen2 = gen1["children"][0]
self.assert_event(gen2, self.gen2_events[i], f"gen2_{i}")
assert gen2["parent_event_id"] == self.gen1_events[i].event_id
assert gen2["parent_span_id"] == self.gen1_span_ids[i]
if self.check_generation:
assert gen2["generation"] == 2
assert gen2["transaction.duration"] == 1000
assert gen2["sdk_name"] == "sentry.test.sdk"
# Only the first gen2 descendent has a child
if i == 0:
assert len(gen2["children"]) == 1
gen3 = gen2["children"][0]
self.assert_event(gen3, self.gen3_event, f"gen3_{i}")
assert gen3["parent_event_id"] == self.gen2_events[i].event_id
assert gen3["parent_span_id"] == self.gen2_span_id
if self.check_generation:
assert gen3["generation"] == 3
assert gen3["transaction.duration"] == 500
assert gen3["sdk_name"] == "sentry.test.sdk"
assert len(gen3["children"]) == 0
elif gen2_no_children:
assert len(gen2["children"]) == 0
def assert_performance_issues(self, root):
"""Broken in the non-spans endpoint, but we're not maintaining that anymore"""
pass
def client_get(self, data, url=None):
if url is None:
url = self.url
return self.client.get(
url,
data,
format="json",
)
def test_no_projects(self) -> None:
user = self.create_user()
org = self.create_organization(owner=user)
self.login_as(user=user)
url = reverse(
self.url_name,
kwargs={"organization_id_or_slug": org.slug, "trace_id": uuid4().hex},
)
with self.feature(self.FEATURES):
response = self.client.get(
url,
format="json",
)
assert response.status_code == 404, response.content
def test_simple(self) -> None:
self.load_trace()
with self.feature(self.FEATURES):
response = self.client_get(
data={"project": -1},
)
assert response.status_code == 200, response.content
trace_transaction = response.data["transactions"][0]
self.assert_trace_data(trace_transaction)
# We shouldn't have detailed fields here
assert "transaction.status" not in trace_transaction
assert "tags" not in trace_transaction
assert "measurements" not in trace_transaction
def test_simple_with_limit(self) -> None:
self.load_trace()
with self.feature(self.FEATURES):
response = self.client_get(
data={"project": -1, "limit": 200},
)
assert response.status_code == 200, response.content
trace_transaction = response.data["transactions"][0]
self.assert_trace_data(trace_transaction)
# We shouldn't have detailed fields here
assert "transaction.status" not in trace_transaction
assert "tags" not in trace_transaction
assert "measurements" not in trace_transaction
def test_detailed_trace(self) -> None:
self.load_trace()
with self.feature(self.FEATURES):
response = self.client_get(
data={"project": -1, "detailed": 1},
)
assert response.status_code == 200, response.content
trace_transaction = response.data["transactions"][0]
self.assert_trace_data(trace_transaction)
root = trace_transaction
assert root["transaction.status"] == "ok"
root_tags = {tag["key"]: tag["value"] for tag in root["tags"]}
for [key, value] in self.root_event.tags:
if not key.startswith("sentry:"):
assert root_tags[key] == value, f"tags - {key}"
else:
assert root_tags[key[7:]] == value, f"tags - {key}"
assert root["measurements"]["lcp"]["value"] == 1000
assert root["measurements"]["fcp"]["value"] == 750
def test_detailed_trace_with_bad_tags(self) -> None:
"""Basically test that we're actually using the event serializer's method for tags"""
trace = uuid4().hex
long_tag_key = "somethinglong" * 250 # 3250 characters
long_tag_value = "somethinglong" * 250 # 3250 characters
self.create_event(
trace_id=trace,
transaction="bad-tags",
parent_span_id=None,
spans=[],
project_id=self.project.id,
tags=[[long_tag_key, long_tag_value]],
milliseconds=3000,
store_event_kwargs={"assert_no_errors": False},
is_eap=True,
)
url = reverse(
self.url_name,
kwargs={"organization_id_or_slug": self.project.organization.slug, "trace_id": trace},
)
with self.feature(self.FEATURES):
response = self.client_get(
data={"project": -1, "detailed": 1},
url=url,
)
assert response.status_code == 200, response.content
root = response.data["transactions"][0]
assert root["transaction.status"] == "ok"
# Check that tags are trimmed to 200 characters, not dropped
# Find the tag with the long key/value (it should be trimmed but present)
found_long_tag = None
for tag in root["tags"]:
# Look for a tag that starts with our long key pattern and is around 200 chars
if (
tag["key"]
and tag["key"].startswith("somethinglongsomethinglong")
and len(tag["key"]) <= 200
and tag["value"]
and tag["value"].startswith("somethinglongsomethinglong")
and len(tag["value"]) <= 200
):
found_long_tag = tag
break
assert found_long_tag is not None, f"Expected trimmed tag not found. Tags: {root['tags']}"
# Verify the tag key and value are trimmed to approximately 200 characters
assert (
len(found_long_tag["key"]) <= 200
), f"Tag key too long: {len(found_long_tag['key'])} chars"
assert (
len(found_long_tag["value"]) <= 200
), f"Tag value too long: {len(found_long_tag['value'])} chars"
# Verify they start with the expected pattern (not None)
assert found_long_tag["key"].startswith("somethinglongsomethinglong")
assert found_long_tag["value"].startswith("somethinglongsomethinglong")
def test_bad_span_loop(self) -> None:
"""Maliciously create a loop in the span structure
Structure then becomes something like this:
root
gen1-0...
gen1-1
gen2-1
gen3-1
gen_2-1
gen3-1...
"""
self.load_trace()
gen3_loop_event = self.create_event(
trace_id=self.trace_id,
transaction="/transaction/gen3-1/loop",
spans=[
{
"same_process_as_parent": True,
"op": "http",
"description": "GET gen2-1",
"span_id": self.gen1_span_ids[1],
"trace_id": self.trace_id,
}
],
parent_span_id=self.gen2_span_ids[1],
project_id=self.project.id,
is_eap=True,
)
with self.feature(self.FEATURES):
response = self.client_get(
data={"project": -1},
)
assert response.status_code == 200, response.content
# Should be the same as the simple testcase
trace_transaction = response.data["transactions"][0]
self.assert_trace_data(trace_transaction, gen2_no_children=False)
# The difference is that gen3-1 should exist with no children
gen2_1 = trace_transaction["children"][1]["children"][0]
assert len(gen2_1["children"]) == 1
gen3_1 = gen2_1["children"][0]
assert gen3_1["event_id"] == gen3_loop_event.event_id
# We didn't even try to start the loop of spans
assert len(gen3_1["children"]) == 0
def test_bad_orphan_span_loop(self) -> None:
"""Maliciously create a loop in the span structure but for an orphan event"""
root_span_id = uuid4().hex[:16]
root_parent_span = uuid4().hex[:16]
root_event = self.create_event(
trace_id=self.trace_id,
transaction="/orphan/root/",
spans=[
{
"same_process_as_parent": True,
"op": "http",
"description": "GET orphan_child",
"span_id": root_span_id,
"trace_id": self.trace_id,
}
],
parent_span_id=root_parent_span,
project_id=self.project.id,
milliseconds=3000,
start_timestamp=self.day_ago - timedelta(minutes=1),
is_eap=True,
)
orphan_child = self.create_event(
trace_id=self.trace_id,
transaction="/orphan/child/",
spans=[
{
"same_process_as_parent": True,
"op": "http",
"description": "GET orphan_root",
"span_id": root_parent_span,
"trace_id": self.trace_id,
}
],
parent_span_id=root_span_id,
project_id=self.project.id,
milliseconds=300,
is_eap=True,
)
with self.feature(self.FEATURES):
response = self.client_get(
data={"project": -1},
)
assert response.status_code == 200, response.content
assert len(response.data["transactions"]) == 1
# There really isn't a right answer to which orphan is the "root" since this loops, but the current
# implementation will make the older event the root
root = response.data["transactions"][0]
self.assert_event(root, root_event, "root")
assert len(root["children"]) == 1
child = root["children"][0]
self.assert_event(child, orphan_child, "child")
def test_multiple_roots(self) -> None:
trace_id = uuid4().hex
first_root = self.create_event(
trace_id=trace_id,
transaction="/first_root",
spans=[],
parent_span_id=None,
project_id=self.project.id,
milliseconds=500,
is_eap=True,
)
second_root = self.create_event(
trace_id=trace_id,
transaction="/second_root",
spans=[],
parent_span_id=None,
project_id=self.project.id,
milliseconds=1000,
is_eap=True,
)
self.url = reverse(
self.url_name,
kwargs={
"organization_id_or_slug": self.project.organization.slug,
"trace_id": trace_id,
},
)
with self.feature(self.FEATURES):
response = self.client_get(
data={"project": -1},
)
assert response.status_code == 200, response.content
assert len(response.data["transactions"]) == 2
self.assert_event(response.data["transactions"][0], first_root, "first_root")
self.assert_event(response.data["transactions"][1], second_root, "second_root")
def test_sibling_transactions(self) -> None:
"""More than one transaction can share a parent_span_id"""
self.load_trace()
gen3_event_siblings = [
self.create_event(
trace_id=self.trace_id,
transaction="/transaction/gen3-1",
spans=[],
project_id=self.create_project(organization=self.organization).id,
parent_span_id=self.gen2_span_ids[1],
milliseconds=1000,
is_eap=True,
).event_id,
self.create_event(
trace_id=self.trace_id,
transaction="/transaction/gen3-2",
spans=[],
project_id=self.create_project(organization=self.organization).id,
parent_span_id=self.gen2_span_ids[1],
milliseconds=2000,
is_eap=True,
).event_id,
]
with self.feature(self.FEATURES):
response = self.client_get(
data={"project": -1},
)
assert response.status_code == 200, response.content
# Should be the same as the simple testcase, but skip checking gen2 children
self.assert_trace_data(response.data["transactions"][0], gen2_no_children=False)
gen2_parent = response.data["transactions"][0]["children"][1]["children"][0]
assert len(gen2_parent["children"]) == 2
assert [child["event_id"] for child in gen2_parent["children"]] == gen3_event_siblings
def test_with_orphan_siblings(self) -> None:
self.load_trace()
parent_span_id = uuid4().hex[:16]
root_event = self.create_event(
trace_id=self.trace_id,
transaction="/orphan/root",
spans=[],
# Some random id so its separated from the rest of the trace
parent_span_id=parent_span_id,
project_id=self.project.id,
# Shorter duration means that this event happened first, and should be ordered first
milliseconds=1000,
is_eap=True,
)
root_sibling_event = self.create_event(
trace_id=self.trace_id,
transaction="/orphan/root-sibling",
spans=[],
parent_span_id=parent_span_id,
project_id=self.project.id,
milliseconds=2000,
is_eap=True,
)
with self.feature(self.FEATURES):
response = self.client_get(
data={"project": -1},
)
assert response.status_code == 200, response.content
assert len(response.data["transactions"]) == 3
# The first item of the response should be the main trace
main, *orphans = response.data["transactions"]
self.assert_trace_data(main)
assert [root_event.event_id, root_sibling_event.event_id] == [
orphan["event_id"] for orphan in orphans
]
def test_with_orphan_trace(self) -> None:
self.load_trace()
orphan_span_ids = {
key: uuid4().hex[:16]
for key in ["root", "root_span", "child", "child_span", "grandchild", "grandchild_span"]
}
# Create the orphan transactions
root_event = self.create_event(
trace_id=self.trace_id,
transaction="/orphan/root",
spans=[
{
"same_process_as_parent": True,
"op": "http",
"description": "GET gen1 orphan",
"span_id": orphan_span_ids["root_span"],
"trace_id": self.trace_id,
}
],
# Some random id so its separated from the rest of the trace
parent_span_id=uuid4().hex[:16],
span_id=orphan_span_ids["root"],
project_id=self.project.id,
milliseconds=3000,
start_timestamp=self.day_ago - timedelta(minutes=1),
is_eap=True,
)
child_event = self.create_event(
trace_id=self.trace_id,
transaction="/orphan/child1-0",
spans=[
{
"same_process_as_parent": True,
"op": "http",
"description": "GET gen1 orphan",
"span_id": orphan_span_ids["child_span"],
"trace_id": self.trace_id,
}
],
parent_span_id=orphan_span_ids["root_span"],
span_id=orphan_span_ids["child"],
project_id=self.gen1_project.id,
# Because the snuba query orders based is_root then timestamp, this causes grandchild1-0 to be added to
# results first before child1-0
milliseconds=2000,
is_eap=True,
)
grandchild_event = self.create_event(
trace_id=self.trace_id,
transaction="/orphan/grandchild1-0",
spans=[
{
"same_process_as_parent": True,
"op": "http",
"description": "GET gen1 orphan",
"span_id": orphan_span_ids["grandchild_span"],
"trace_id": self.trace_id,
}
],
parent_span_id=orphan_span_ids["child_span"],
span_id=orphan_span_ids["grandchild"],
project_id=self.gen1_project.id,
milliseconds=1000,
is_eap=True,
)
with self.feature(self.FEATURES):
response = self.client_get(
data={"project": -1},
)
assert response.status_code == 200, response.content
assert len(response.data["transactions"]) == 2
# The first item of the response should be the main trace
main, orphans = response.data["transactions"]
self.assert_trace_data(main)
self.assert_event(orphans, root_event, "orphan-root")
assert len(orphans["children"]) == 1
if self.check_generation:
assert orphans["generation"] == 0
assert orphans["parent_event_id"] is None
child = orphans["children"][0]
self.assert_event(child, child_event, "orphan-child")
assert len(child["children"]) == 1
if self.check_generation:
assert child["generation"] == 1
assert child["parent_event_id"] == root_event.event_id
grandchild = child["children"][0]
self.assert_event(grandchild, grandchild_event, "orphan-grandchild")
if self.check_generation:
assert grandchild["generation"] == 2
assert grandchild["parent_event_id"] == child_event.event_id
def test_with_errors(self) -> None:
self.load_trace()
error, error1, _ = self.load_errors(self.gen1_project, self.gen1_span_ids[0])
with self.feature(self.FEATURES):
response = self.client_get(
data={"project": -1},
)
assert response.status_code == 200, response.content
self.assert_trace_data(response.data["transactions"][0])
gen1_event = response.data["transactions"][0]["children"][0]
assert len(gen1_event["errors"]) == 3
data = {
"event_id": error.event_id,
"issue_id": error.group_id,
"span": self.gen1_span_ids[0],
"project_id": self.gen1_project.id,
"project_slug": self.gen1_project.slug,
"level": "fatal",
"title": error.title,
"timestamp": datetime.fromisoformat(error.timestamp).timestamp(),
"generation": 0,
"event_type": "error",
"message": error.search_message,
}
data1 = {
"event_id": error1.event_id,
"issue_id": error1.group_id,
"span": self.gen1_span_ids[0],
"project_id": self.gen1_project.id,
"project_slug": self.gen1_project.slug,
"level": "warning",
"title": error1.title,
"timestamp": datetime.fromisoformat(error1.timestamp).timestamp(),
"generation": 0,
"event_type": "error",
"message": error1.search_message,
}
assert data in gen1_event["errors"]
assert data1 in gen1_event["errors"]
def test_with_only_orphan_errors_with_same_span_ids(self) -> None:
span_id = uuid4().hex[:16]
start, end = self.get_start_end_from_day_ago(10000)
# Error 1
error_data = load_data(
"javascript",
timestamp=end,
)
error_data["contexts"]["trace"] = {
"type": "trace",
"trace_id": self.trace_id,
"span_id": span_id,
}
error_data["level"] = "fatal"
error = self.store_event(error_data, project_id=self.project.id)
# Error 2 before after Error 1
error_data1 = load_data(
"javascript",
timestamp=start,
)
error_data1["level"] = "warning"
error_data1["contexts"]["trace"] = {
"type": "trace",
"trace_id": self.trace_id,
"span_id": span_id,
}
error1 = self.store_event(error_data1, project_id=self.project.id)
with self.feature([*self.FEATURES]):
response = self.client_get(
data={"project": -1},
)
assert response.status_code == 200, response.content
assert len(response.data) == 2
# Sorting by timestamp puts Error1 after Error2 in the response
assert {
"event_id": error.event_id,
"issue_id": error.group_id,
"span": span_id,
"project_id": self.project.id,
"project_slug": self.project.slug,
"level": "fatal",
"title": error.title,
"timestamp": datetime.fromisoformat(error.timestamp).timestamp(),
"generation": 0,
"event_type": "error",
"message": error.search_message,
} == response.data["orphan_errors"][1]
assert {
"event_id": error1.event_id,
"issue_id": error1.group_id,
"span": span_id,
"project_id": self.project.id,
"project_slug": self.project.slug,
"level": "warning",
"title": error1.title,
"timestamp": datetime.fromisoformat(error1.timestamp).timestamp(),
"generation": 0,
"event_type": "error",
"message": error1.search_message,
} == response.data["orphan_errors"][0]
def test_with_only_orphan_errors_with_different_span_ids(self) -> None:
start, _ = self.get_start_end_from_day_ago(1000)
span_id = uuid4().hex[:16]
error_data = load_data(
"javascript",
timestamp=start,
)
error_data["contexts"]["trace"] = {
"type": "trace",
"trace_id": self.trace_id,
"span_id": span_id,
}
error_data["level"] = "fatal"
error = self.store_event(error_data, project_id=self.project.id)
error_data["level"] = "warning"
span_id1 = uuid4().hex[:16]
error_data["contexts"]["trace"] = {
"type": "trace",
"trace_id": self.trace_id,
"span_id": span_id1,
}
error1 = self.store_event(error_data, project_id=self.project.id)
with self.feature([*self.FEATURES]):
response = self.client_get(
data={"project": -1},
)
assert response.status_code == 200, response.content
assert len(response.data["orphan_errors"]) == 2
assert {
"event_id": error.event_id,
"issue_id": error.group_id,
"span": span_id,
"project_id": self.project.id,
"project_slug": self.project.slug,
"level": "fatal",
"title": error.title,
"timestamp": datetime.fromisoformat(error.timestamp).timestamp(),
"generation": 0,
"event_type": "error",
"message": error.search_message,
} in response.data["orphan_errors"]
assert {
"event_id": error1.event_id,
"issue_id": error1.group_id,
"span": span_id1,
"project_id": self.project.id,
"project_slug": self.project.slug,
"level": "warning",
"title": error1.title,
"timestamp": datetime.fromisoformat(error1.timestamp).timestamp(),
"generation": 0,
"event_type": "error",
"message": error1.search_message,
} in response.data["orphan_errors"]
def test_with_mixup_of_orphan_errors_with_simple_trace_data(self) -> None:
self.load_trace()
start, _ = self.get_start_end_from_day_ago(1000)
span_id = uuid4().hex[:16]
error_data = load_data(
"javascript",
timestamp=start,
)
error_data["contexts"]["trace"] = {
"type": "trace",
"trace_id": self.trace_id,
"span_id": span_id,
}
error_data["level"] = "fatal"
error = self.store_event(error_data, project_id=self.project.id)
error_data["level"] = "warning"
span_id1 = uuid4().hex[:16]
error_data["contexts"]["trace"] = {
"type": "trace",
"trace_id": self.trace_id,
"span_id": span_id1,
}
with self.feature([*self.FEATURES]):
response = self.client_get(
data={"project": -1},
)
assert response.status_code == 200, response.content
assert len(response.data["transactions"]) == 1
assert len(response.data["orphan_errors"]) == 1
self.assert_trace_data(response.data["transactions"][0])
assert {
"event_id": error.event_id,
"issue_id": error.group_id,
"span": span_id,
"project_id": self.project.id,
"project_slug": self.project.slug,
"level": "fatal",
"title": error.title,
"timestamp": datetime.fromisoformat(error.timestamp).timestamp(),
"generation": 0,
"event_type": "error",
"message": error.search_message,
} in response.data["orphan_errors"]
@pytest.mark.skip(reason="flaky: #84070")
def test_with_default(self) -> None:
self.load_trace()
start, _ = self.get_start_end_from_day_ago(1000)
default_event = self.load_default()
with self.feature(self.FEATURES):
response = self.client_get(
data={"project": -1},
)
assert response.status_code == 200, response.content
self.assert_trace_data(response.data["transactions"][0])
root_event = response.data["transactions"][0]
assert len(root_event["errors"]) == 1
assert {
"event_id": default_event.event_id,
"issue_id": default_event.group_id,
"span": self.root_span_ids[0],
"project_id": self.gen1_project.id,
"project_slug": self.gen1_project.slug,
"level": "debug",
"title": "this is a log message",
"timestamp": datetime.fromisoformat(default_event.timestamp).timestamp(),
"generation": 0,
"event_type": "error",
"message": default_event.search_message,
} in root_event["errors"]
def test_pruning_root(self) -> None:
self.load_trace()
# Pruning shouldn't happen for the root event
with self.feature(self.FEATURES):
response = self.client_get(
data={"project": -1, "event_id": self.root_event.event_id},
)
assert response.status_code == 200, response.content
self.assert_trace_data(response.data["transactions"][0])
def test_pruning_event(self) -> None:
self.load_trace()
with self.feature(self.FEATURES):
response = self.client_get(
data={"project": -1, "event_id": self.gen2_events[0].event_id},
)
assert response.status_code == 200, response.content
root = response.data["transactions"][0]
self.assert_event(root, self.root_event, "root")
# Because of snuba query orders by timestamp we should still have all of the root's children
assert len(root["children"]) == 3
for i, gen1 in enumerate(root["children"]):
self.assert_event(gen1, self.gen1_events[i], f"gen1_{i}")
if i == 0:
assert len(gen1["children"]) == 1
gen2 = gen1["children"][0]
self.assert_event(gen2, self.gen2_events[0], "gen2_0")
assert len(gen2["children"]) == 1
gen3 = gen2["children"][0]
self.assert_event(gen3, self.gen3_event, "gen3_0")
else:
assert len(gen1["children"]) == 0
@mock.patch("sentry.api.endpoints.organization_events_trace.query_trace_data")
def test_timestamp_optimization(self, mock_query: mock.MagicMock) -> None:
"""When timestamp is passed we'll ignore the statsPeriod and make a query with a smaller start & end"""
self.load_trace()
with self.feature(self.FEATURES):
self.client_get(
data={
"project": -1,
"timestamp": self.root_event.timestamp,
"statsPeriod": "90d",
},
)
mock_query.assert_called_once()
params = mock_query.call_args.args[1]
assert abs((params.end - params.start).days) <= 7
def test_timestamp_optimization_without_mock(self) -> None:
"""Make sure that even if the params are smaller the query still works"""
self.load_trace()
with self.feature(self.FEATURES):
response = self.client_get(
data={
"project": -1,
"timestamp": self.root_event.timestamp,
"statsPeriod": "90d",
},
)
assert response.status_code == 200, response.content
trace_transaction = response.data["transactions"][0]
self.assert_trace_data(trace_transaction)
# We shouldn't have detailed fields here
assert "transaction.status" not in trace_transaction
assert "tags" not in trace_transaction
assert "measurements" not in trace_transaction
| OrganizationEventsTraceEndpointTest |
python | huggingface__transformers | src/transformers/models/switch_transformers/modeling_switch_transformers.py | {
"start": 10979,
"end": 21352
} | class ____(nn.Module):
def __init__(
self,
config: SwitchTransformersConfig,
has_relative_attention_bias=False,
layer_idx: Optional[int] = None,
):
super().__init__()
self.is_decoder = config.is_decoder
self.has_relative_attention_bias = has_relative_attention_bias
self.relative_attention_num_buckets = config.relative_attention_num_buckets
self.relative_attention_max_distance = config.relative_attention_max_distance
self.d_model = config.d_model
self.key_value_proj_dim = config.d_kv
self.n_heads = config.num_heads
self.dropout = config.dropout_rate
self.inner_dim = self.n_heads * self.key_value_proj_dim
self.layer_idx = layer_idx
if layer_idx is None and self.is_decoder:
logger.warning_once(
f"Instantiating a decoder {self.__class__.__name__} without passing `layer_idx` is not recommended and "
"will to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` "
"when creating this class."
)
self.q = nn.Linear(self.d_model, self.inner_dim, bias=False)
self.k = nn.Linear(self.d_model, self.inner_dim, bias=False)
self.v = nn.Linear(self.d_model, self.inner_dim, bias=False)
self.o = nn.Linear(self.inner_dim, self.d_model, bias=False)
if self.has_relative_attention_bias:
self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads)
self.gradient_checkpointing = False
@staticmethod
def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):
"""
Adapted from Mesh Tensorflow:
https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593
Translate relative position to a bucket number for relative attention. The relative position is defined as
memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to
position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for
small absolute relative_position and larger buckets for larger absolute relative_positions. All relative
positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket.
This should allow for more graceful generalization to longer sequences than the model has been trained on
Args:
relative_position: an int32 Tensor
bidirectional: a boolean - whether the attention is bidirectional
num_buckets: an integer
max_distance: an integer
Returns:
a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets)
"""
relative_buckets = 0
if bidirectional:
num_buckets //= 2
relative_buckets += (relative_position > 0).to(torch.long) * num_buckets
relative_position = torch.abs(relative_position)
else:
relative_position = -torch.min(relative_position, torch.zeros_like(relative_position))
# now relative_position is in the range [0, inf)
# half of the buckets are for exact increments in positions
max_exact = num_buckets // 2
is_small = relative_position < max_exact
# The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
relative_position_if_large = max_exact + (
torch.log(relative_position.float() / max_exact)
/ math.log(max_distance / max_exact)
* (num_buckets - max_exact)
).to(torch.long)
relative_position_if_large = torch.min(
relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1)
)
relative_buckets += torch.where(is_small, relative_position, relative_position_if_large)
return relative_buckets
def compute_bias(self, query_length, key_length, device=None, cache_position=None):
"""Compute binned relative position bias"""
if device is None:
device = self.relative_attention_bias.weight.device
if cache_position is None:
context_position = torch.arange(query_length, dtype=torch.long, device=device)[:, None]
else:
context_position = cache_position[:, None].to(device)
memory_position = torch.arange(key_length, dtype=torch.long, device=device)[None, :]
relative_position = memory_position - context_position # shape (query_length, key_length)
relative_position_bucket = self._relative_position_bucket(
relative_position, # shape (query_length, key_length)
bidirectional=(not self.is_decoder),
num_buckets=self.relative_attention_num_buckets,
max_distance=self.relative_attention_max_distance,
)
values = self.relative_attention_bias(relative_position_bucket) # shape (query_length, key_length, num_heads)
values = values.permute([2, 0, 1]).unsqueeze(0) # shape (1, num_heads, query_length, key_length)
return values
def forward(
self,
hidden_states,
mask=None,
key_value_states=None,
position_bias=None,
past_key_values=None,
query_length=None,
use_cache=False,
output_attentions=False,
cache_position=None,
):
"""
Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states).
"""
# Input is (batch_size, seq_length, dim)
# Mask is (batch_size, 1, 1, key_length) (non-causal encoder) or (batch_size, 1, seq_length, key_length) (causal decoder)
batch_size, seq_length = hidden_states.shape[:2]
# if key_value_states are provided this layer is used as a cross-attention layer for the decoder
is_cross_attention = key_value_states is not None
query_states = self.q(hidden_states)
query_states = query_states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2)
# Check is encoder-decoder model is being used. Otherwise we'll get `DynamicCache`
is_updated = False
if isinstance(past_key_values, EncoderDecoderCache):
is_updated = past_key_values.is_updated.get(self.layer_idx)
if is_cross_attention:
# after the first generated id, we can subsequently re-use all key/value_states from cache
curr_past_key_values = past_key_values.cross_attention_cache
else:
curr_past_key_values = past_key_values.self_attention_cache
else:
curr_past_key_values = past_key_values
current_states = key_value_states if is_cross_attention else hidden_states
if is_cross_attention and past_key_values is not None and is_updated:
# reuse k,v, cross_attentions
key_states = curr_past_key_values.layers[self.layer_idx].keys
value_states = curr_past_key_values.layers[self.layer_idx].values
else:
key_states = self.k(current_states)
value_states = self.v(current_states)
key_states = key_states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2)
value_states = value_states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2)
if past_key_values is not None:
# save all key/value_states to cache to be re-used for fast auto-regressive generation
cache_position = cache_position if not is_cross_attention else None
key_states, value_states = curr_past_key_values.update(
key_states, value_states, self.layer_idx, {"cache_position": cache_position}
)
# set flag that curr layer for cross-attn is already updated so we can re-use in subsequent calls
if is_cross_attention and isinstance(past_key_values, EncoderDecoderCache):
past_key_values.is_updated[self.layer_idx] = True
# compute scores, equivalent of torch.einsum("bnqd,bnkd->bnqk", query_states, key_states), compatible with onnx op>9
scores = torch.matmul(query_states, key_states.transpose(3, 2))
if position_bias is None:
key_length = key_states.shape[-2]
# cache position is 0-indexed so we add 1 to get the real length of queries (aka with past)
real_seq_length = query_length if query_length is not None else cache_position[-1] + 1
if not self.has_relative_attention_bias:
position_bias = torch.zeros(
(1, self.n_heads, seq_length, key_length), device=scores.device, dtype=scores.dtype
)
if self.gradient_checkpointing and self.training:
position_bias.requires_grad = True
else:
position_bias = self.compute_bias(
real_seq_length, key_length, device=scores.device, cache_position=cache_position
)
position_bias = position_bias[:, :, -seq_length:, :]
if mask is not None:
causal_mask = mask[:, :, :, : key_states.shape[-2]]
position_bias = position_bias + causal_mask
position_bias_masked = position_bias
scores += position_bias_masked
# (batch_size, n_heads, seq_length, key_length)
attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores)
attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
attn_output = torch.matmul(attn_weights, value_states)
attn_output = attn_output.transpose(1, 2).contiguous()
attn_output = attn_output.view(batch_size, -1, self.inner_dim)
attn_output = self.o(attn_output)
outputs = (attn_output, position_bias)
if output_attentions:
outputs = outputs + (attn_weights,)
return outputs
| SwitchTransformersAttention |
python | bokeh__bokeh | src/bokeh/models/tools.py | {
"start": 69318,
"end": 71596
} | class ____(EditTool, Drag, Tap):
''' *toolbar icon*: |point_draw_icon|
The PointDrawTool allows adding, dragging and deleting point-like glyphs
(i.e subclasses of ``XYGlyph``) on one or more renderers by editing the
underlying ``ColumnDataSource`` data. Like other drawing tools, the
renderers that are to be edited must be supplied explicitly as a list. Any
newly added points will be inserted on the ``ColumnDataSource`` of the
first supplied renderer.
The tool will modify the columns on the data source corresponding to the
``x`` and ``y`` values of the glyph. Any additional columns in the data
source will be padded with the given ``empty_value`` when adding a new
point.
.. note::
The data source updates will trigger data change events continuously
throughout the edit operations on the BokehJS side. In Bokeh server
apps, the data source will only be synced once, when the edit operation
finishes.
The supported actions include:
* Add point: Tap anywhere on the plot
* Move point: Tap and drag an existing point, the point will be
dropped once you let go of the mouse button.
* Delete point: Tap a point to select it then press BACKSPACE
key while the mouse is within the plot area.
.. |point_draw_icon| image:: /_images/icons/point-draw.svg
:height: 24px
:alt: Icon of three points with an arrow pointing to one representing the point-edit tool in the toolbar.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
renderers = List(GlyphRendererOf(XYGlyph), help="""
A list of renderers corresponding to glyphs that may be edited.
""")
add = Bool(default=True, help="""
Enables adding of new points on tap events.
""")
drag = Bool(default=True, help="""
Enables dragging of existing points on pan events.
""")
num_objects = Int(default=0, help="""
Defines a limit on the number of points that can be drawn. By default there
is no limit on the number of objects, but if enabled the oldest drawn point
will be dropped to make space for the new point.
""")
| PointDrawTool |
python | pytorch__pytorch | torch/_inductor/index_propagation.py | {
"start": 2396,
"end": 5873
} | class ____:
"""An ops handler where all IR values are SymPy expressions
When a value cannot be represented as a SymPy expression, the method is
either not defined, or returns NotImplemented
"""
@staticmethod
def identity(value: Any) -> Any:
return value
@staticmethod
def constant(value: Union[int, float, bool], dtype: torch.dtype) -> TypedExpr:
return TypedExpr(value, dtype)
@staticmethod
def index_expr(value: Union[sympy.Expr, int], dtype: torch.dtype) -> TypedExpr:
return TypedExpr(value, dtype)
@staticmethod
def to_dtype(
value: TypedExpr,
dtype: torch.dtype,
src_dtype: Optional[torch.dtype] = None,
use_compute_types: bool = False,
) -> TypedExpr:
return TypedExpr(value.expr, dtype)
@staticmethod
def abs(x: TypedExpr) -> TypedExpr:
return TypedExpr(abs(x.expr), x.dtype) # type: ignore[arg-type]
@staticmethod
def square(x: TypedExpr) -> TypedExpr:
return TypedExpr(x.expr * x.expr, x.dtype)
@staticmethod
def add(x: TypedExpr, y: TypedExpr) -> TypedExpr:
result_type = torch.promote_types(x.dtype, y.dtype)
return TypedExpr(x.expr + y.expr, result_type)
@staticmethod
def sub(x: TypedExpr, y: TypedExpr) -> TypedExpr:
result_type = torch.promote_types(x.dtype, y.dtype)
return TypedExpr(x.expr - y.expr, result_type)
@staticmethod
def mul(x: TypedExpr, y: TypedExpr) -> TypedExpr:
result_type = torch.promote_types(x.dtype, y.dtype)
return TypedExpr(x.expr * y.expr, result_type)
@staticmethod
def neg(x: TypedExpr) -> TypedExpr:
return TypedExpr(-x.expr, x.dtype)
@staticmethod
def floordiv(x: TypedExpr, y: TypedExpr) -> TypedExpr:
result_type = torch.promote_types(x.dtype, y.dtype)
if not is_integer_dtype(result_type):
return NotImplemented
return TypedExpr(FloorDiv(x.expr, y.expr), result_type)
@staticmethod
def mod(x: TypedExpr, y: TypedExpr) -> Optional[TypedExpr]:
result_type = torch.promote_types(x.dtype, y.dtype)
if not is_integer_dtype(result_type):
return NotImplemented
result_expr = ModularIndexing(x.expr, sympy.S.One, y.expr)
return TypedExpr(result_expr, result_type)
@staticmethod
def remainder(x: TypedExpr, y: TypedExpr) -> Optional[TypedExpr]:
result_type = torch.promote_types(x.dtype, y.dtype)
if not is_integer_dtype(result_type):
return NotImplemented
x_expr = sympy.sympify(x.expr)
y_expr = sympy.sympify(y.expr)
# In these cases, remainder in Python == remainder in C++, so this transformation
# is sound
if (
x_expr.is_nonnegative is not None
and x_expr.is_nonnegative == y_expr.is_positive
):
result_expr = ModularIndexing(x.expr, sympy.S.One, y.expr)
return TypedExpr(result_expr, result_type)
return NotImplemented
@staticmethod
def minimum(x: TypedExpr, y: TypedExpr) -> TypedExpr:
result_type = torch.promote_types(x.dtype, y.dtype)
return TypedExpr(sympy.Min(x.expr, y.expr), result_type)
@staticmethod
def maximum(x: TypedExpr, y: TypedExpr) -> TypedExpr:
result_type = torch.promote_types(x.dtype, y.dtype)
return TypedExpr(sympy.Max(x.expr, y.expr), result_type)
@dataclass
| SymPyOps |
python | django__django | django/views/generic/base.py | {
"start": 990,
"end": 5925
} | class ____:
"""
Intentionally simple parent class for all views. Only implements
dispatch-by-method and simple sanity checking.
"""
http_method_names = [
"get",
"post",
"put",
"patch",
"delete",
"head",
"options",
"trace",
]
def __init__(self, **kwargs):
"""
Constructor. Called in the URLconf; can contain helpful extra
keyword arguments, and other things.
"""
# Go through keyword arguments, and either save their values to our
# instance, or raise an error.
for key, value in kwargs.items():
setattr(self, key, value)
@classproperty
def view_is_async(cls):
handlers = [
getattr(cls, method)
for method in cls.http_method_names
if (method != "options" and hasattr(cls, method))
]
if not handlers:
return False
is_async = iscoroutinefunction(handlers[0])
if not all(iscoroutinefunction(h) == is_async for h in handlers[1:]):
raise ImproperlyConfigured(
f"{cls.__qualname__} HTTP handlers must either be all sync or all "
"async."
)
return is_async
@classonlymethod
def as_view(cls, **initkwargs):
"""Main entry point for a request-response process."""
for key in initkwargs:
if key in cls.http_method_names:
raise TypeError(
"The method name %s is not accepted as a keyword argument "
"to %s()." % (key, cls.__name__)
)
if not hasattr(cls, key):
raise TypeError(
"%s() received an invalid keyword %r. as_view "
"only accepts arguments that are already "
"attributes of the class." % (cls.__name__, key)
)
def view(request, *args, **kwargs):
self = cls(**initkwargs)
self.setup(request, *args, **kwargs)
if not hasattr(self, "request"):
raise AttributeError(
"%s instance has no 'request' attribute. Did you override "
"setup() and forget to call super()?" % cls.__name__
)
return self.dispatch(request, *args, **kwargs)
view.view_class = cls
view.view_initkwargs = initkwargs
# __name__ and __qualname__ are intentionally left unchanged as
# view_class should be used to robustly determine the name of the view
# instead.
view.__doc__ = cls.__doc__
view.__module__ = cls.__module__
view.__annotations__ = cls.dispatch.__annotations__
# Copy possible attributes set by decorators, e.g. @csrf_exempt, from
# the dispatch method.
view.__dict__.update(cls.dispatch.__dict__)
# Mark the callback if the view class is async.
if cls.view_is_async:
markcoroutinefunction(view)
return view
def setup(self, request, *args, **kwargs):
"""Initialize attributes shared by all view methods."""
if hasattr(self, "get") and not hasattr(self, "head"):
self.head = self.get
self.request = request
self.args = args
self.kwargs = kwargs
def dispatch(self, request, *args, **kwargs):
# Try to dispatch to the right method; if a method doesn't exist,
# defer to the error handler. Also defer to the error handler if the
# request method isn't on the approved list.
method = request.method.lower()
if method in self.http_method_names:
handler = getattr(self, method, self.http_method_not_allowed)
else:
handler = self.http_method_not_allowed
return handler(request, *args, **kwargs)
def http_method_not_allowed(self, request, *args, **kwargs):
response = HttpResponseNotAllowed(self._allowed_methods())
log_response(
"Method Not Allowed (%s): %s",
request.method,
request.path,
response=response,
request=request,
)
if self.view_is_async:
async def func():
return response
return func()
else:
return response
def options(self, request, *args, **kwargs):
"""Handle responding to requests for the OPTIONS HTTP verb."""
response = HttpResponse()
response.headers["Allow"] = ", ".join(self._allowed_methods())
response.headers["Content-Length"] = "0"
if self.view_is_async:
async def func():
return response
return func()
else:
return response
def _allowed_methods(self):
return [m.upper() for m in self.http_method_names if hasattr(self, m)]
| View |
python | pyca__cryptography | src/cryptography/x509/general_name.py | {
"start": 4983,
"end": 5623
} | class ____(GeneralName):
def __init__(self, value: ObjectIdentifier) -> None:
if not isinstance(value, ObjectIdentifier):
raise TypeError("value must be an ObjectIdentifier")
self._value = value
@property
def value(self) -> ObjectIdentifier:
return self._value
def __repr__(self) -> str:
return f"<RegisteredID(value={self.value})>"
def __eq__(self, other: object) -> bool:
if not isinstance(other, RegisteredID):
return NotImplemented
return self.value == other.value
def __hash__(self) -> int:
return hash(self.value)
| RegisteredID |
python | getsentry__sentry | tests/sentry/workflow_engine/processors/test_schedule.py | {
"start": 6430,
"end": 8175
} | class ____(CreateEventTestCase):
def setUp(self) -> None:
super().setUp()
self.project = self.create_project()
self.group = self.create_group(self.project)
self.rule = self.create_alert_rule()
self.project_client = self.batch_client.for_project(self.project.id)
def test_get_hash_data_basic(self) -> None:
self.push_to_hash(self.project.id, self.rule.id, self.group.id, "event-123")
result = self.project_client.get_hash_data(batch_key=None)
assert f"{self.rule.id}:{self.group.id}" in result
data = json.loads(result[f"{self.rule.id}:{self.group.id}"])
assert data["event_id"] == "event-123"
def test_get_hash_data_with_batch_key(self) -> None:
batch_key = str(uuid4())
self.push_to_hash(self.project.id, self.rule.id, self.group.id, "event-456")
# Move data to batch
original_data = self.project_client.get_hash_data(batch_key=None)
self.project_client.push_to_hash(batch_key=batch_key, data=original_data)
result = self.project_client.get_hash_data(batch_key=batch_key)
assert f"{self.rule.id}:{self.group.id}" in result
data = json.loads(result[f"{self.rule.id}:{self.group.id}"])
assert data["event_id"] == "event-456"
def run_to_timestamp(run: int, interval_sec: int, jitter: bool = True) -> float:
"""
Helper to provide timestamps for 'run every X seconds' scenarios.
If jitter_sec is provided, it will add a random jitter to the timestamp.
"""
value = float(run * interval_sec)
if jitter:
# +/- 2 seconds; not unreasonable for our scheduling crons.
value += random.choice((0, 2, -2))
return value
| FetchGroupToEventDataTest |
python | spack__spack | lib/spack/spack/vendor/jinja2/filters.py | {
"start": 34489,
"end": 52739
} | class ____(t.NamedTuple):
grouper: t.Any
list: t.List
# Use the regular tuple repr to hide this subclass if users print
# out the value during debugging.
def __repr__(self) -> str:
return tuple.__repr__(self)
def __str__(self) -> str:
return tuple.__str__(self)
@pass_environment
def sync_do_groupby(
environment: "Environment",
value: "t.Iterable[V]",
attribute: t.Union[str, int],
default: t.Optional[t.Any] = None,
) -> "t.List[t.Tuple[t.Any, t.List[V]]]":
"""Group a sequence of objects by an attribute using Python's
:func:`itertools.groupby`. The attribute can use dot notation for
nested access, like ``"address.city"``. Unlike Python's ``groupby``,
the values are sorted first so only one group is returned for each
unique value.
For example, a list of ``User`` objects with a ``city`` attribute
can be rendered in groups. In this example, ``grouper`` refers to
the ``city`` value of the group.
.. sourcecode:: html+jinja
<ul>{% for city, items in users|groupby("city") %}
<li>{{ city }}
<ul>{% for user in items %}
<li>{{ user.name }}
{% endfor %}</ul>
</li>
{% endfor %}</ul>
``groupby`` yields namedtuples of ``(grouper, list)``, which
can be used instead of the tuple unpacking above. ``grouper`` is the
value of the attribute, and ``list`` is the items with that value.
.. sourcecode:: html+jinja
<ul>{% for group in users|groupby("city") %}
<li>{{ group.grouper }}: {{ group.list|join(", ") }}
{% endfor %}</ul>
You can specify a ``default`` value to use if an object in the list
does not have the given attribute.
.. sourcecode:: jinja
<ul>{% for city, items in users|groupby("city", default="NY") %}
<li>{{ city }}: {{ items|map(attribute="name")|join(", ") }}</li>
{% endfor %}</ul>
.. versionchanged:: 3.0
Added the ``default`` parameter.
.. versionchanged:: 2.6
The attribute supports dot notation for nested access.
"""
expr = make_attrgetter(environment, attribute, default=default)
return [
_GroupTuple(key, list(values))
for key, values in groupby(sorted(value, key=expr), expr)
]
@async_variant(sync_do_groupby) # type: ignore
async def do_groupby(
environment: "Environment",
value: "t.Union[t.AsyncIterable[V], t.Iterable[V]]",
attribute: t.Union[str, int],
default: t.Optional[t.Any] = None,
) -> "t.List[t.Tuple[t.Any, t.List[V]]]":
expr = make_attrgetter(environment, attribute, default=default)
return [
_GroupTuple(key, await auto_to_list(values))
for key, values in groupby(sorted(await auto_to_list(value), key=expr), expr)
]
@pass_environment
def sync_do_sum(
environment: "Environment",
iterable: "t.Iterable[V]",
attribute: t.Optional[t.Union[str, int]] = None,
start: V = 0, # type: ignore
) -> V:
"""Returns the sum of a sequence of numbers plus the value of parameter
'start' (which defaults to 0). When the sequence is empty it returns
start.
It is also possible to sum up only certain attributes:
.. sourcecode:: jinja
Total: {{ items|sum(attribute='price') }}
.. versionchanged:: 2.6
The `attribute` parameter was added to allow suming up over
attributes. Also the `start` parameter was moved on to the right.
"""
if attribute is not None:
iterable = map(make_attrgetter(environment, attribute), iterable)
return sum(iterable, start)
@async_variant(sync_do_sum) # type: ignore
async def do_sum(
environment: "Environment",
iterable: "t.Union[t.AsyncIterable[V], t.Iterable[V]]",
attribute: t.Optional[t.Union[str, int]] = None,
start: V = 0, # type: ignore
) -> V:
rv = start
if attribute is not None:
func = make_attrgetter(environment, attribute)
else:
def func(x: V) -> V:
return x
async for item in auto_aiter(iterable):
rv += func(item)
return rv
def sync_do_list(value: "t.Iterable[V]") -> "t.List[V]":
"""Convert the value into a list. If it was a string the returned list
will be a list of characters.
"""
return list(value)
@async_variant(sync_do_list) # type: ignore
async def do_list(value: "t.Union[t.AsyncIterable[V], t.Iterable[V]]") -> "t.List[V]":
return await auto_to_list(value)
def do_mark_safe(value: str) -> Markup:
"""Mark the value as safe which means that in an environment with automatic
escaping enabled this variable will not be escaped.
"""
return Markup(value)
def do_mark_unsafe(value: str) -> str:
"""Mark a value as unsafe. This is the reverse operation for :func:`safe`."""
return str(value)
@typing.overload
def do_reverse(value: str) -> str:
...
@typing.overload
def do_reverse(value: "t.Iterable[V]") -> "t.Iterable[V]":
...
def do_reverse(value: t.Union[str, t.Iterable[V]]) -> t.Union[str, t.Iterable[V]]:
"""Reverse the object or return an iterator that iterates over it the other
way round.
"""
if isinstance(value, str):
return value[::-1]
try:
return reversed(value) # type: ignore
except TypeError:
try:
rv = list(value)
rv.reverse()
return rv
except TypeError as e:
raise FilterArgumentError("argument must be iterable") from e
@pass_environment
def do_attr(
environment: "Environment", obj: t.Any, name: str
) -> t.Union[Undefined, t.Any]:
"""Get an attribute of an object. ``foo|attr("bar")`` works like
``foo.bar`` just that always an attribute is returned and items are not
looked up.
See :ref:`Notes on subscriptions <notes-on-subscriptions>` for more details.
"""
try:
name = str(name)
except UnicodeError:
pass
else:
try:
value = getattr(obj, name)
except AttributeError:
pass
else:
if environment.sandboxed:
environment = t.cast("SandboxedEnvironment", environment)
if not environment.is_safe_attribute(obj, name, value):
return environment.unsafe_undefined(obj, name)
return value
return environment.undefined(obj=obj, name=name)
@typing.overload
def sync_do_map(
context: "Context", value: t.Iterable, name: str, *args: t.Any, **kwargs: t.Any
) -> t.Iterable:
...
@typing.overload
def sync_do_map(
context: "Context",
value: t.Iterable,
*,
attribute: str = ...,
default: t.Optional[t.Any] = None,
) -> t.Iterable:
...
@pass_context
def sync_do_map(
context: "Context", value: t.Iterable, *args: t.Any, **kwargs: t.Any
) -> t.Iterable:
"""Applies a filter on a sequence of objects or looks up an attribute.
This is useful when dealing with lists of objects but you are really
only interested in a certain value of it.
The basic usage is mapping on an attribute. Imagine you have a list
of users but you are only interested in a list of usernames:
.. sourcecode:: jinja
Users on this page: {{ users|map(attribute='username')|join(', ') }}
You can specify a ``default`` value to use if an object in the list
does not have the given attribute.
.. sourcecode:: jinja
{{ users|map(attribute="username", default="Anonymous")|join(", ") }}
Alternatively you can let it invoke a filter by passing the name of the
filter and the arguments afterwards. A good example would be applying a
text conversion filter on a sequence:
.. sourcecode:: jinja
Users on this page: {{ titles|map('lower')|join(', ') }}
Similar to a generator comprehension such as:
.. code-block:: python
(u.username for u in users)
(getattr(u, "username", "Anonymous") for u in users)
(do_lower(x) for x in titles)
.. versionchanged:: 2.11.0
Added the ``default`` parameter.
.. versionadded:: 2.7
"""
if value:
func = prepare_map(context, args, kwargs)
for item in value:
yield func(item)
@typing.overload
def do_map(
context: "Context",
value: t.Union[t.AsyncIterable, t.Iterable],
name: str,
*args: t.Any,
**kwargs: t.Any,
) -> t.Iterable:
...
@typing.overload
def do_map(
context: "Context",
value: t.Union[t.AsyncIterable, t.Iterable],
*,
attribute: str = ...,
default: t.Optional[t.Any] = None,
) -> t.Iterable:
...
@async_variant(sync_do_map) # type: ignore
async def do_map(
context: "Context",
value: t.Union[t.AsyncIterable, t.Iterable],
*args: t.Any,
**kwargs: t.Any,
) -> t.AsyncIterable:
if value:
func = prepare_map(context, args, kwargs)
async for item in auto_aiter(value):
yield await auto_await(func(item))
@pass_context
def sync_do_select(
context: "Context", value: "t.Iterable[V]", *args: t.Any, **kwargs: t.Any
) -> "t.Iterator[V]":
"""Filters a sequence of objects by applying a test to each object,
and only selecting the objects with the test succeeding.
If no test is specified, each object will be evaluated as a boolean.
Example usage:
.. sourcecode:: jinja
{{ numbers|select("odd") }}
{{ numbers|select("odd") }}
{{ numbers|select("divisibleby", 3) }}
{{ numbers|select("lessthan", 42) }}
{{ strings|select("equalto", "mystring") }}
Similar to a generator comprehension such as:
.. code-block:: python
(n for n in numbers if test_odd(n))
(n for n in numbers if test_divisibleby(n, 3))
.. versionadded:: 2.7
"""
return select_or_reject(context, value, args, kwargs, lambda x: x, False)
@async_variant(sync_do_select) # type: ignore
async def do_select(
context: "Context",
value: "t.Union[t.AsyncIterable[V], t.Iterable[V]]",
*args: t.Any,
**kwargs: t.Any,
) -> "t.AsyncIterator[V]":
return async_select_or_reject(context, value, args, kwargs, lambda x: x, False)
@pass_context
def sync_do_reject(
context: "Context", value: "t.Iterable[V]", *args: t.Any, **kwargs: t.Any
) -> "t.Iterator[V]":
"""Filters a sequence of objects by applying a test to each object,
and rejecting the objects with the test succeeding.
If no test is specified, each object will be evaluated as a boolean.
Example usage:
.. sourcecode:: jinja
{{ numbers|reject("odd") }}
Similar to a generator comprehension such as:
.. code-block:: python
(n for n in numbers if not test_odd(n))
.. versionadded:: 2.7
"""
return select_or_reject(context, value, args, kwargs, lambda x: not x, False)
@async_variant(sync_do_reject) # type: ignore
async def do_reject(
context: "Context",
value: "t.Union[t.AsyncIterable[V], t.Iterable[V]]",
*args: t.Any,
**kwargs: t.Any,
) -> "t.AsyncIterator[V]":
return async_select_or_reject(context, value, args, kwargs, lambda x: not x, False)
@pass_context
def sync_do_selectattr(
context: "Context", value: "t.Iterable[V]", *args: t.Any, **kwargs: t.Any
) -> "t.Iterator[V]":
"""Filters a sequence of objects by applying a test to the specified
attribute of each object, and only selecting the objects with the
test succeeding.
If no test is specified, the attribute's value will be evaluated as
a boolean.
Example usage:
.. sourcecode:: jinja
{{ users|selectattr("is_active") }}
{{ users|selectattr("email", "none") }}
Similar to a generator comprehension such as:
.. code-block:: python
(u for user in users if user.is_active)
(u for user in users if test_none(user.email))
.. versionadded:: 2.7
"""
return select_or_reject(context, value, args, kwargs, lambda x: x, True)
@async_variant(sync_do_selectattr) # type: ignore
async def do_selectattr(
context: "Context",
value: "t.Union[t.AsyncIterable[V], t.Iterable[V]]",
*args: t.Any,
**kwargs: t.Any,
) -> "t.AsyncIterator[V]":
return async_select_or_reject(context, value, args, kwargs, lambda x: x, True)
@pass_context
def sync_do_rejectattr(
context: "Context", value: "t.Iterable[V]", *args: t.Any, **kwargs: t.Any
) -> "t.Iterator[V]":
"""Filters a sequence of objects by applying a test to the specified
attribute of each object, and rejecting the objects with the test
succeeding.
If no test is specified, the attribute's value will be evaluated as
a boolean.
.. sourcecode:: jinja
{{ users|rejectattr("is_active") }}
{{ users|rejectattr("email", "none") }}
Similar to a generator comprehension such as:
.. code-block:: python
(u for user in users if not user.is_active)
(u for user in users if not test_none(user.email))
.. versionadded:: 2.7
"""
return select_or_reject(context, value, args, kwargs, lambda x: not x, True)
@async_variant(sync_do_rejectattr) # type: ignore
async def do_rejectattr(
context: "Context",
value: "t.Union[t.AsyncIterable[V], t.Iterable[V]]",
*args: t.Any,
**kwargs: t.Any,
) -> "t.AsyncIterator[V]":
return async_select_or_reject(context, value, args, kwargs, lambda x: not x, True)
@pass_eval_context
def do_tojson(
eval_ctx: "EvalContext", value: t.Any, indent: t.Optional[int] = None
) -> Markup:
"""Serialize an object to a string of JSON, and mark it safe to
render in HTML. This filter is only for use in HTML documents.
The returned string is safe to render in HTML documents and
``<script>`` tags. The exception is in HTML attributes that are
double quoted; either use single quotes or the ``|forceescape``
filter.
:param value: The object to serialize to JSON.
:param indent: The ``indent`` parameter passed to ``dumps``, for
pretty-printing the value.
.. versionadded:: 2.9
"""
policies = eval_ctx.environment.policies
dumps = policies["json.dumps_function"]
kwargs = policies["json.dumps_kwargs"]
if indent is not None:
kwargs = kwargs.copy()
kwargs["indent"] = indent
return htmlsafe_json_dumps(value, dumps=dumps, **kwargs)
def prepare_map(
context: "Context", args: t.Tuple, kwargs: t.Dict[str, t.Any]
) -> t.Callable[[t.Any], t.Any]:
if not args and "attribute" in kwargs:
attribute = kwargs.pop("attribute")
default = kwargs.pop("default", None)
if kwargs:
raise FilterArgumentError(
f"Unexpected keyword argument {next(iter(kwargs))!r}"
)
func = make_attrgetter(context.environment, attribute, default=default)
else:
try:
name = args[0]
args = args[1:]
except LookupError:
raise FilterArgumentError("map requires a filter argument") from None
def func(item: t.Any) -> t.Any:
return context.environment.call_filter(
name, item, args, kwargs, context=context
)
return func
def prepare_select_or_reject(
context: "Context",
args: t.Tuple,
kwargs: t.Dict[str, t.Any],
modfunc: t.Callable[[t.Any], t.Any],
lookup_attr: bool,
) -> t.Callable[[t.Any], t.Any]:
if lookup_attr:
try:
attr = args[0]
except LookupError:
raise FilterArgumentError("Missing parameter for attribute name") from None
transfunc = make_attrgetter(context.environment, attr)
off = 1
else:
off = 0
def transfunc(x: V) -> V:
return x
try:
name = args[off]
args = args[1 + off :]
def func(item: t.Any) -> t.Any:
return context.environment.call_test(name, item, args, kwargs)
except LookupError:
func = bool # type: ignore
return lambda item: modfunc(func(transfunc(item)))
def select_or_reject(
context: "Context",
value: "t.Iterable[V]",
args: t.Tuple,
kwargs: t.Dict[str, t.Any],
modfunc: t.Callable[[t.Any], t.Any],
lookup_attr: bool,
) -> "t.Iterator[V]":
if value:
func = prepare_select_or_reject(context, args, kwargs, modfunc, lookup_attr)
for item in value:
if func(item):
yield item
async def async_select_or_reject(
context: "Context",
value: "t.Union[t.AsyncIterable[V], t.Iterable[V]]",
args: t.Tuple,
kwargs: t.Dict[str, t.Any],
modfunc: t.Callable[[t.Any], t.Any],
lookup_attr: bool,
) -> "t.AsyncIterator[V]":
if value:
func = prepare_select_or_reject(context, args, kwargs, modfunc, lookup_attr)
async for item in auto_aiter(value):
if func(item):
yield item
FILTERS = {
"abs": abs,
"attr": do_attr,
"batch": do_batch,
"capitalize": do_capitalize,
"center": do_center,
"count": len,
"d": do_default,
"default": do_default,
"dictsort": do_dictsort,
"e": escape,
"escape": escape,
"filesizeformat": do_filesizeformat,
"first": do_first,
"float": do_float,
"forceescape": do_forceescape,
"format": do_format,
"groupby": do_groupby,
"indent": do_indent,
"int": do_int,
"join": do_join,
"last": do_last,
"length": len,
"list": do_list,
"lower": do_lower,
"map": do_map,
"min": do_min,
"max": do_max,
"pprint": do_pprint,
"random": do_random,
"reject": do_reject,
"rejectattr": do_rejectattr,
"replace": do_replace,
"reverse": do_reverse,
"round": do_round,
"safe": do_mark_safe,
"select": do_select,
"selectattr": do_selectattr,
"slice": do_slice,
"sort": do_sort,
"string": soft_str,
"striptags": do_striptags,
"sum": do_sum,
"title": do_title,
"trim": do_trim,
"truncate": do_truncate,
"unique": do_unique,
"upper": do_upper,
"urlencode": do_urlencode,
"urlize": do_urlize,
"wordcount": do_wordcount,
"wordwrap": do_wordwrap,
"xmlattr": do_xmlattr,
"tojson": do_tojson,
}
| _GroupTuple |
python | spack__spack | lib/spack/spack/vendor/jinja2/lexer.py | {
"start": 13414,
"end": 13562
} | class ____(t.NamedTuple):
pattern: t.Pattern[str]
tokens: t.Union[str, t.Tuple[str, ...], t.Tuple[Failure]]
command: t.Optional[str]
| _Rule |
python | sympy__sympy | sympy/physics/quantum/sho1d.py | {
"start": 16509,
"end": 16730
} | class ____(State):
"""State class for SHO states"""
@classmethod
def _eval_hilbert_space(cls, label):
return ComplexSpace(S.Infinity)
@property
def n(self):
return self.args[0]
| SHOState |
python | django__django | tests/generic_relations_regress/models.py | {
"start": 2789,
"end": 2907
} | class ____(models.Model):
name = models.CharField(max_length=15)
def __bool__(self):
return False
| Guild |
python | encode__django-rest-framework | tests/test_serializer_nested.py | {
"start": 2498,
"end": 6118
} | class ____:
def setup_method(self):
class NestedSerializer(serializers.Serializer):
example = serializers.IntegerField(max_value=10)
class TestSerializer(serializers.Serializer):
allow_null = NestedSerializer(many=True, allow_null=True)
not_allow_null = NestedSerializer(many=True)
allow_empty = NestedSerializer(many=True, allow_empty=True)
not_allow_empty = NestedSerializer(many=True, allow_empty=False)
self.Serializer = TestSerializer
def test_null_allowed_if_allow_null_is_set(self):
input_data = {
'allow_null': None,
'not_allow_null': [{'example': '2'}, {'example': '3'}],
'allow_empty': [{'example': '2'}],
'not_allow_empty': [{'example': '2'}],
}
expected_data = {
'allow_null': None,
'not_allow_null': [{'example': 2}, {'example': 3}],
'allow_empty': [{'example': 2}],
'not_allow_empty': [{'example': 2}],
}
serializer = self.Serializer(data=input_data)
assert serializer.is_valid(), serializer.errors
assert serializer.validated_data == expected_data
def test_null_is_not_allowed_if_allow_null_is_not_set(self):
input_data = {
'allow_null': None,
'not_allow_null': None,
'allow_empty': [{'example': '2'}],
'not_allow_empty': [{'example': '2'}],
}
serializer = self.Serializer(data=input_data)
assert not serializer.is_valid()
expected_errors = {'not_allow_null': [serializer.error_messages['null']]}
assert serializer.errors == expected_errors
def test_run_the_field_validation_even_if_the_field_is_null(self):
class TestSerializer(self.Serializer):
validation_was_run = False
def validate_allow_null(self, value):
TestSerializer.validation_was_run = True
return value
input_data = {
'allow_null': None,
'not_allow_null': [{'example': 2}],
'allow_empty': [{'example': 2}],
'not_allow_empty': [{'example': 2}],
}
serializer = TestSerializer(data=input_data)
assert serializer.is_valid()
assert serializer.validated_data == input_data
assert TestSerializer.validation_was_run
def test_empty_allowed_if_allow_empty_is_set(self):
input_data = {
'allow_null': [{'example': '2'}],
'not_allow_null': [{'example': '2'}],
'allow_empty': [],
'not_allow_empty': [{'example': '2'}],
}
expected_data = {
'allow_null': [{'example': 2}],
'not_allow_null': [{'example': 2}],
'allow_empty': [],
'not_allow_empty': [{'example': 2}],
}
serializer = self.Serializer(data=input_data)
assert serializer.is_valid(), serializer.errors
assert serializer.validated_data == expected_data
def test_empty_not_allowed_if_allow_empty_is_set_to_false(self):
input_data = {
'allow_null': [{'example': '2'}],
'not_allow_null': [{'example': '2'}],
'allow_empty': [],
'not_allow_empty': [],
}
serializer = self.Serializer(data=input_data)
assert not serializer.is_valid()
expected_errors = {'not_allow_empty': {'non_field_errors': [serializers.ListSerializer.default_error_messages['empty']]}}
assert serializer.errors == expected_errors
| TestNestedSerializerWithMany |
python | oauthlib__oauthlib | oauthlib/oauth2/rfc6749/errors.py | {
"start": 8350,
"end": 8731
} | class ____(OAuth2Error):
"""
The authorization server is currently unable to handle the request
due to a temporary overloading or maintenance of the server.
(This error code is needed because a 503 Service Unavailable HTTP
status code cannot be returned to the client via a HTTP redirect.)
"""
error = 'temporarily_unavailable'
| TemporarilyUnavailableError |
python | doocs__leetcode | solution/1600-1699/1691.Maximum Height by Stacking Cuboids/Solution.py | {
"start": 0,
"end": 432
} | class ____:
def maxHeight(self, cuboids: List[List[int]]) -> int:
for c in cuboids:
c.sort()
cuboids.sort()
n = len(cuboids)
f = [0] * n
for i in range(n):
for j in range(i):
if cuboids[j][1] <= cuboids[i][1] and cuboids[j][2] <= cuboids[i][2]:
f[i] = max(f[i], f[j])
f[i] += cuboids[i][2]
return max(f)
| Solution |
python | scipy__scipy | scipy/linalg/tests/test_decomp.py | {
"start": 34352,
"end": 39898
} | class ____:
def test_wrong_inputs(self):
# Nonsquare a
assert_raises(ValueError, eigh, np.ones([1, 2]))
# Nonsquare b
assert_raises(ValueError, eigh, np.ones([2, 2]), np.ones([2, 1]))
# Incompatible a, b sizes
assert_raises(ValueError, eigh, np.ones([3, 3]), np.ones([2, 2]))
# Wrong type parameter for generalized problem
assert_raises(ValueError, eigh, np.ones([3, 3]), np.ones([3, 3]),
type=4)
# Both value and index subsets requested
assert_raises(ValueError, eigh, np.ones([3, 3]), np.ones([3, 3]),
subset_by_value=[1, 2], subset_by_index=[2, 4])
# Invalid upper index spec
assert_raises(ValueError, eigh, np.ones([3, 3]), np.ones([3, 3]),
subset_by_index=[0, 4])
# Invalid lower index
assert_raises(ValueError, eigh, np.ones([3, 3]), np.ones([3, 3]),
subset_by_index=[-2, 2])
# Invalid index spec #2
assert_raises(ValueError, eigh, np.ones([3, 3]), np.ones([3, 3]),
subset_by_index=[2, 0])
# Invalid value spec
assert_raises(ValueError, eigh, np.ones([3, 3]), np.ones([3, 3]),
subset_by_value=[2, 0])
# Invalid driver name
assert_raises(ValueError, eigh, np.ones([2, 2]), driver='wrong')
# Generalized driver selection without b
assert_raises(ValueError, eigh, np.ones([3, 3]), None, driver='gvx')
# Standard driver with b
assert_raises(ValueError, eigh, np.ones([3, 3]), np.ones([3, 3]),
driver='evr')
# Subset request from invalid driver
assert_raises(ValueError, eigh, np.ones([3, 3]), np.ones([3, 3]),
driver='gvd', subset_by_index=[1, 2])
assert_raises(ValueError, eigh, np.ones([3, 3]), np.ones([3, 3]),
driver='gvd', subset_by_index=[1, 2])
def test_nonpositive_b(self):
assert_raises(LinAlgError, eigh, np.ones([3, 3]), np.ones([3, 3]))
# index based subsets are done in the legacy test_eigh()
def test_value_subsets(self):
for ind, dt in enumerate(DTYPES):
a = _random_hermitian_matrix(20, dtype=dt)
w, v = eigh(a, subset_by_value=[-2, 2])
assert_equal(v.shape[1], len(w))
assert all((w > -2) & (w < 2))
b = _random_hermitian_matrix(20, posdef=True, dtype=dt)
w, v = eigh(a, b, subset_by_value=[-2, 2])
assert_equal(v.shape[1], len(w))
assert all((w > -2) & (w < 2))
def test_eigh_integer(self):
a = array([[1, 2], [2, 7]])
b = array([[3, 1], [1, 5]])
w, z = eigh(a)
w, z = eigh(a, b)
@skip_xp_invalid_arg
def test_eigh_of_sparse(self):
# This tests the rejection of inputs that eigh cannot currently handle.
import scipy.sparse
a = scipy.sparse.identity(2).tocsc()
b = np.atleast_2d(a)
assert_raises(ValueError, eigh, a)
assert_raises(ValueError, eigh, b)
@pytest.mark.parametrize('dtype_', DTYPES)
@pytest.mark.parametrize('driver', ("ev", "evd", "evr", "evx"))
def test_various_drivers_standard(self, driver, dtype_):
a = _random_hermitian_matrix(n=20, dtype=dtype_)
w, v = eigh(a, driver=driver)
assert_allclose(a @ v - (v * w), 0.,
atol=1000*np.finfo(dtype_).eps,
rtol=0.)
@pytest.mark.parametrize('driver', ("ev", "evd", "evr", "evx"))
def test_1x1_lwork(self, driver):
w, v = eigh([[1]], driver=driver)
assert_allclose(w, array([1.]), atol=1e-15)
assert_allclose(v, array([[1.]]), atol=1e-15)
# complex case now
w, v = eigh([[1j]], driver=driver)
assert_allclose(w, array([0]), atol=1e-15)
assert_allclose(v, array([[1.]]), atol=1e-15)
@pytest.mark.parametrize('type', (1, 2, 3))
@pytest.mark.parametrize('driver', ("gv", "gvd", "gvx"))
def test_various_drivers_generalized(self, driver, type):
atol = np.spacing(5000.)
a = _random_hermitian_matrix(20)
b = _random_hermitian_matrix(20, posdef=True)
w, v = eigh(a=a, b=b, driver=driver, type=type)
if type == 1:
assert_allclose(a @ v - w*(b @ v), 0., atol=atol, rtol=0.)
elif type == 2:
assert_allclose(a @ b @ v - v * w, 0., atol=atol, rtol=0.)
else:
assert_allclose(b @ a @ v - v * w, 0., atol=atol, rtol=0.)
def test_eigvalsh_new_args(self):
a = _random_hermitian_matrix(5)
w = eigvalsh(a, subset_by_index=[1, 2])
assert_equal(len(w), 2)
w2 = eigvalsh(a, subset_by_index=[1, 2])
assert_equal(len(w2), 2)
assert_allclose(w, w2)
b = np.diag([1, 1.2, 1.3, 1.5, 2])
w3 = eigvalsh(b, subset_by_value=[1, 1.4])
assert_equal(len(w3), 2)
assert_allclose(w3, np.array([1.2, 1.3]))
@pytest.mark.parametrize('dt', [int, float, np.float32, complex, np.complex64])
def test_empty(self, dt):
a = np.empty((0, 0), dtype=dt)
w, v = eigh(a)
w_n, v_n = eigh(np.eye(2, dtype=dt))
assert w.shape == (0,)
assert w.dtype == w_n.dtype
assert v.shape == (0, 0)
assert v.dtype == v_n.dtype
w = eigh(a, eigvals_only=True)
assert_allclose(w, np.empty((0,)))
assert w.shape == (0,)
assert w.dtype == w_n.dtype
| TestEigh |
python | rapidsai__cudf | python/cudf/cudf/testing/dataset_generator.py | {
"start": 582,
"end": 2271
} | class ____:
"""Parameters for generating column of data
Attributes
----------
cardinality : int or None
Size of a random set of values that generated data is sampled from.
The values in the random set are derived from the given generator.
If cardinality is None, the Iterable returned by the given generator
is invoked for each value to be generated.
null_frequency : 0.1
Probability of a generated value being null
generator : Callable
Function for generating random data.
is_sorted : bool
Sort this column. Columns are sorted in same order as ColumnParameters
instances stored in column_params of Parameters. If there are one or
more columns marked as sorted, the generated PyArrow Table will be
converted to a Pandas DataFrame to do the sorting. This may implicitly
convert numbers to floats in the presence of nulls.
dtype : optional
a numpy dtype to control the format of the data
"""
def __init__(
self,
cardinality=100,
null_frequency=0.1,
generator=None,
is_sorted=True,
dtype=None,
):
self.cardinality = cardinality
self.null_frequency = null_frequency
if generator is None:
rng = np.random.default_rng(seed=0)
self.generator = lambda: [
_generate_string(
string.ascii_letters, rng, rng.integers(4, 8).item()
)
for _ in range(100)
]
else:
self.generator = generator
self.is_sorted = is_sorted
self.dtype = dtype
| ColumnParameters |
python | huggingface__transformers | src/transformers/models/oneformer/modeling_oneformer.py | {
"start": 111372,
"end": 112722
} | class ____(nn.Module):
def __init__(
self,
transformer_width=256,
transformer_heads=4,
transformer_layers=6,
visual_dim=1024,
dropout=0.1,
layer_norm_eps=1e-05,
**kwargs,
):
super().__init__()
self.memory_proj = nn.Sequential(
nn.LayerNorm(visual_dim, eps=layer_norm_eps),
nn.Linear(visual_dim, transformer_width),
nn.LayerNorm(transformer_width, eps=layer_norm_eps),
)
self.text_proj = nn.Sequential(
nn.LayerNorm(visual_dim, eps=layer_norm_eps),
nn.Linear(visual_dim, transformer_width),
)
self.decoder = nn.ModuleList(
[
OneFormerTextTransformerDecoderLayer(transformer_width, transformer_heads, dropout, layer_norm_eps)
for _ in range(transformer_layers)
]
)
self.out_proj = nn.Sequential(
nn.LayerNorm(transformer_width, eps=layer_norm_eps), nn.Linear(transformer_width, visual_dim)
)
def forward(self, text, visual):
visual = self.memory_proj(visual)
hidden_state = self.text_proj(text)
for layer in self.decoder:
hidden_state = layer(hidden_state, visual)
return self.out_proj(hidden_state)
| OneFormerTextContextDecoder |
python | huggingface__transformers | src/transformers/models/tapas/tokenization_tapas.py | {
"start": 98573,
"end": 101020
} | class ____:
"""Runs WordPiece tokenization."""
def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
"""
Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform
tokenization using the given vocabulary.
For example, `input = "unaffable"` will return as output `["un", "##aff", "##able"]`.
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through *BasicTokenizer*.
Returns:
A list of wordpiece tokens.
"""
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
# Below: utilities for TAPAS tokenizer
# This includes functions to parse numeric values (dates and numbers) from both the table and questions in order
# to create the column_ranks, inv_column_ranks, numeric_values, numeric values_scale and numeric_relations in
# prepare_for_model of TapasTokenizer.
# These are meant to be used in an academic setup, for production use cases Gold mine or Aqua should be used.
# taken from constants.py of the original implementation
# URL: https://github.com/google-research/tapas/blob/master/tapas/utils/constants.py
| WordpieceTokenizer |
python | matplotlib__matplotlib | lib/matplotlib/offsetbox.py | {
"start": 26610,
"end": 29447
} | class ____(OffsetBox):
"""
An OffsetBox with an auxiliary transform.
All child artists are first transformed with *aux_transform*, then
translated with an offset (the same for all children) so the bounding
box of the children matches the drawn box. (In other words, adding an
arbitrary translation to *aux_transform* has no effect as it will be
cancelled out by the later offsetting.)
`AuxTransformBox` is similar to `.DrawingArea`, except that the extent of
the box is not predetermined but calculated from the window extent of its
children, and the extent of the children will be calculated in the
transformed coordinate.
"""
def __init__(self, aux_transform):
self.aux_transform = aux_transform
super().__init__()
self.offset_transform = mtransforms.Affine2D()
# ref_offset_transform makes offset_transform always relative to the
# lower-left corner of the bbox of its children.
self.ref_offset_transform = mtransforms.Affine2D()
def add_artist(self, a):
"""Add an `.Artist` to the container box."""
self._children.append(a)
a.set_transform(self.get_transform())
self.stale = True
def get_transform(self):
"""Return the `.Transform` applied to the children."""
return (self.aux_transform
+ self.ref_offset_transform
+ self.offset_transform)
def set_transform(self, t):
"""
set_transform is ignored.
"""
def set_offset(self, xy):
"""
Set the offset of the container.
Parameters
----------
xy : (float, float)
The (x, y) coordinates of the offset in display units.
"""
self._offset = xy
self.offset_transform.clear()
self.offset_transform.translate(xy[0], xy[1])
self.stale = True
def get_offset(self):
"""Return offset of the container."""
return self._offset
def get_bbox(self, renderer):
# clear the offset transforms
_off = self.offset_transform.get_matrix() # to be restored later
self.ref_offset_transform.clear()
self.offset_transform.clear()
# calculate the extent
bboxes = [c.get_window_extent(renderer) for c in self._children]
ub = Bbox.union(bboxes)
# adjust ref_offset_transform
self.ref_offset_transform.translate(-ub.x0, -ub.y0)
# restore offset transform
self.offset_transform.set_matrix(_off)
return Bbox.from_bounds(0, 0, ub.width, ub.height)
def draw(self, renderer):
# docstring inherited
for c in self._children:
c.draw(renderer)
_bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
self.stale = False
| AuxTransformBox |
python | openai__openai-python | src/openai/resources/videos.py | {
"start": 29443,
"end": 30223
} | class ____:
def __init__(self, videos: AsyncVideos) -> None:
self._videos = videos
self.create = _legacy_response.async_to_raw_response_wrapper(
videos.create,
)
self.retrieve = _legacy_response.async_to_raw_response_wrapper(
videos.retrieve,
)
self.list = _legacy_response.async_to_raw_response_wrapper(
videos.list,
)
self.delete = _legacy_response.async_to_raw_response_wrapper(
videos.delete,
)
self.download_content = _legacy_response.async_to_raw_response_wrapper(
videos.download_content,
)
self.remix = _legacy_response.async_to_raw_response_wrapper(
videos.remix,
)
| AsyncVideosWithRawResponse |
python | ethereum__web3.py | web3/providers/persistent/subscription_manager.py | {
"start": 783,
"end": 13718
} | class ____:
"""
The ``SubscriptionManager`` is responsible for subscribing, unsubscribing, and
managing all active subscriptions for an ``AsyncWeb3`` instance. It is also
used for processing all subscriptions that have handler functions.
"""
logger: logging.Logger = logging.getLogger(
"web3.providers.persistent.subscription_manager"
)
def __init__(self, w3: "AsyncWeb3[Any]") -> None:
self._w3 = w3
self._provider = cast("PersistentConnectionProvider", w3.provider)
self._subscription_container = SubscriptionContainer()
# parallelize all subscription handler calls
self.parallelize = False
self.task_timeout = 1
self._tasks: set[asyncio.Task[None]] = set()
# share the subscription container with the request processor so it can separate
# subscriptions into different queues based on ``sub._handler`` presence
self._provider._request_processor._subscription_container = (
self._subscription_container
)
self.total_handler_calls: int = 0
def _add_subscription(self, subscription: EthSubscription[Any]) -> None:
self._subscription_container.add_subscription(subscription)
def _remove_subscription(self, subscription: EthSubscription[Any]) -> None:
self._subscription_container.remove_subscription(subscription)
def _validate_and_normalize_label(self, subscription: EthSubscription[Any]) -> None:
if subscription.label == subscription._default_label:
# if no custom label was provided, generate a unique label
i = 2
while self.get_by_label(subscription._label) is not None:
subscription._label = f"{subscription._default_label}#{i}"
i += 1
else:
if (
subscription._label
in self._subscription_container.subscriptions_by_label
):
raise Web3ValueError(
"Subscription label already exists. Subscriptions must have unique "
f"labels.\n label: {subscription._label}"
)
def _handler_task_callback(self, task: asyncio.Task[None]) -> None:
"""
Callback when a handler task completes. Similar to _message_listener_callback.
Puts handler exceptions into the queue to be raised in the main loop, else
removes the task from the set of active tasks.
"""
if task.done() and not task.cancelled():
try:
task.result()
self._tasks.discard(task)
except Exception as e:
self.logger.exception("Subscription handler task raised an exception.")
self._provider._request_processor._handler_subscription_queue.put_nowait( # noqa: E501
SubscriptionHandlerTaskException(task, message=str(e))
)
async def _cleanup_remaining_tasks(self) -> None:
"""Cancel and clean up all remaining tasks."""
if not self._tasks:
return
self.logger.debug("Cleaning up %d remaining tasks...", len(self._tasks))
for task in self._tasks:
if not task.done():
task.cancel()
self._tasks.clear()
@property
def subscriptions(self) -> list[EthSubscription[Any]]:
return self._subscription_container.subscriptions
def get_by_id(self, sub_id: HexStr) -> EthSubscription[Any]:
return self._subscription_container.get_by_id(sub_id)
def get_by_label(self, label: str) -> EthSubscription[Any]:
return self._subscription_container.get_by_label(label)
@overload
async def subscribe(self, subscriptions: EthSubscription[Any]) -> HexStr:
...
@overload
async def subscribe(
self, subscriptions: Sequence[EthSubscription[Any]]
) -> list[HexStr]:
...
async def subscribe(
self,
subscriptions: EthSubscription[Any] | Sequence[EthSubscription[Any]],
) -> HexStr | list[HexStr]:
"""
Used to subscribe to a single or multiple subscriptions.
:param subscriptions: A single subscription or a sequence of subscriptions.
:type subscriptions: Union[EthSubscription, Sequence[EthSubscription]]
:return:
"""
if isinstance(subscriptions, EthSubscription):
subscriptions.manager = self
self._validate_and_normalize_label(subscriptions)
sub_id = await self._w3.eth._subscribe(*subscriptions.subscription_params)
subscriptions._id = sub_id
self._add_subscription(subscriptions)
self.logger.info(
"Successfully subscribed to subscription:\n label: %s\n id: %s",
subscriptions.label,
sub_id,
)
return sub_id
elif isinstance(subscriptions, Sequence):
if len(subscriptions) == 0:
raise Web3ValueError("No subscriptions provided.")
sub_ids: list[HexStr] = []
for sub in subscriptions:
sub_ids.append(await self.subscribe(sub))
return sub_ids
raise Web3TypeError("Expected a Subscription or a sequence of Subscriptions.")
@overload
async def unsubscribe(self, subscriptions: EthSubscription[Any]) -> bool:
...
@overload
async def unsubscribe(self, subscriptions: HexStr) -> bool:
...
@overload
async def unsubscribe(
self,
subscriptions: Sequence[EthSubscription[Any] | HexStr],
) -> bool:
...
async def unsubscribe(
self,
subscriptions: (
EthSubscription[Any] | HexStr | Sequence[EthSubscription[Any] | HexStr]
),
) -> bool:
"""
Used to unsubscribe from one or multiple subscriptions.
:param subscriptions: The subscription(s) to unsubscribe from.
:type subscriptions: Union[EthSubscription, Sequence[EthSubscription], HexStr,
Sequence[HexStr]]
:return: ``True`` if unsubscribing to all was successful, ``False`` otherwise
with a warning.
:rtype: bool
"""
if isinstance(subscriptions, EthSubscription) or isinstance(subscriptions, str):
if isinstance(subscriptions, str):
subscription_id = subscriptions
subscriptions = self.get_by_id(subscription_id)
if subscriptions is None:
raise Web3ValueError(
"Subscription not found or is not being managed by the "
f"subscription manager.\n id: {subscription_id}"
)
if subscriptions not in self.subscriptions:
raise Web3ValueError(
"Subscription not found or is not being managed by the "
"subscription manager.\n "
f"label: {subscriptions.label}\n id: {subscriptions._id}"
)
if await self._w3.eth._unsubscribe(subscriptions.id):
self._remove_subscription(subscriptions)
self.logger.info(
"Successfully unsubscribed from subscription:\n"
" label: %s\n id: %s",
subscriptions.label,
subscriptions.id,
)
if len(self._subscription_container.handler_subscriptions) == 0:
queue = (
self._provider._request_processor._handler_subscription_queue
)
await queue.put(SubscriptionProcessingFinished())
return True
elif isinstance(subscriptions, Sequence):
if len(subscriptions) == 0:
raise Web3ValueError("No subscriptions provided.")
unsubscribed: list[bool] = []
# re-create the subscription list to prevent modifying the original list
# in case ``subscription_manager.subscriptions`` was passed in directly
subs = list(subscriptions)
for sub in subs:
if isinstance(sub, str):
sub = HexStr(sub)
unsubscribed.append(await self.unsubscribe(sub))
return all(unsubscribed)
self.logger.warning(
"Failed to unsubscribe from subscription\n subscription=%s",
subscriptions,
)
return False
async def unsubscribe_all(self) -> bool:
"""
Used to unsubscribe from all subscriptions that are being managed by the
subscription manager.
:return: ``True`` if unsubscribing was successful, ``False`` otherwise.
:rtype: bool
"""
unsubscribed = [
await self.unsubscribe(sub)
# use copy to prevent modifying the list while iterating over it
for sub in self.subscriptions.copy()
]
if all(unsubscribed):
self.logger.info("Successfully unsubscribed from all subscriptions.")
return True
else:
if len(self.subscriptions) > 0:
self.logger.warning(
"Failed to unsubscribe from all subscriptions. Some subscriptions "
"are still active.\n subscriptions=%s",
self.subscriptions,
)
return False
async def handle_subscriptions(self, run_forever: bool = False) -> None:
"""
Used to handle all subscriptions that have handlers. The method will run until
all subscriptions that have handler functions are unsubscribed from or, if
``run_forever`` is set to ``True``, it will run indefinitely.
:param run_forever: If ``True``, the method will run indefinitely.
:type run_forever: bool
:return: None
"""
if not self._subscription_container.handler_subscriptions and not run_forever:
self.logger.warning(
"No handler subscriptions found. Subscription handler did not run."
)
return
queue = self._provider._request_processor._handler_subscription_queue
while run_forever or self._subscription_container.handler_subscriptions:
try:
response = cast(RPCResponse, await queue.get())
formatted_sub_response = cast(
FormattedEthSubscriptionResponse,
await self._w3.manager._process_response(response),
)
# if the subscription was unsubscribed from, the response won't be
# formatted because we lost the request information
sub_id = formatted_sub_response.get("subscription")
sub = self._subscription_container.get_handler_subscription_by_id(
sub_id
)
if sub:
sub_context = EthSubscriptionContext(
self._w3,
sub,
formatted_sub_response["result"],
**sub._handler_context,
)
if sub.parallelize is True or (
sub.parallelize is None and self.parallelize
):
# run the handler in a task to allow parallel processing
task = asyncio.create_task(sub._handler(sub_context))
self._tasks.add(task)
task.add_done_callback(self._handler_task_callback)
else:
# await the handler in the main loop to ensure order
await sub._handler(sub_context)
except SubscriptionProcessingFinished:
if not run_forever:
self.logger.info(
"All handler subscriptions have been unsubscribed from. "
"Stopping subscription handling."
)
break
except SubscriptionHandlerTaskException:
self.logger.error(
"An exception occurred in a subscription handler task. "
"Stopping subscription handling."
)
await self._cleanup_remaining_tasks()
raise
except TaskNotRunning as e:
self.logger.error("Stopping subscription handling: %s", e.message)
self._provider._handle_listener_task_exceptions()
break
# no active handler subscriptions, clear the handler subscription queue
self._provider._request_processor._reset_handler_subscription_queue()
if self._tasks:
await self._cleanup_remaining_tasks()
| SubscriptionManager |
python | doocs__leetcode | solution/3200-3299/3216.Lexicographically Smallest String After a Swap/Solution.py | {
"start": 0,
"end": 243
} | class ____:
def getSmallestString(self, s: str) -> str:
for i, (a, b) in enumerate(pairwise(map(ord, s))):
if (a + b) % 2 == 0 and a > b:
return s[:i] + s[i + 1] + s[i] + s[i + 2 :]
return s
| Solution |
python | apache__airflow | providers/microsoft/azure/src/airflow/providers/microsoft/azure/hooks/base_azure.py | {
"start": 1486,
"end": 8752
} | class ____(BaseHook):
"""
This hook acts as a base hook for azure services.
It offers several authentication mechanisms to authenticate
the client library used for upstream azure hooks.
:param sdk_client: The SDKClient to use.
:param conn_id: The :ref:`Azure connection id<howto/connection:azure>`
which refers to the information to connect to the service.
"""
conn_name_attr = "conn_id"
default_conn_name = "azure_default"
conn_type = "azure"
hook_name = "Azure"
@classmethod
@add_managed_identity_connection_widgets
def get_connection_form_widgets(cls) -> dict[str, Any]:
"""Return connection widgets to add to connection form."""
from flask_appbuilder.fieldwidgets import BS3TextFieldWidget
from flask_babel import lazy_gettext
from wtforms import StringField
return {
"tenantId": StringField(lazy_gettext("Azure Tenant ID"), widget=BS3TextFieldWidget()),
"subscriptionId": StringField(lazy_gettext("Azure Subscription ID"), widget=BS3TextFieldWidget()),
}
@classmethod
def get_ui_field_behaviour(cls) -> dict[str, Any]:
"""Return custom field behaviour."""
import json
return {
"hidden_fields": ["schema", "port", "host"],
"relabeling": {
"login": "Azure Client ID",
"password": "Azure Secret",
},
"placeholders": {
"extra": json.dumps(
{
"key_path": "path to json file for auth",
"key_json": "specifies json dict for auth",
},
indent=1,
),
"login": "client_id (token credentials auth)",
"password": "secret (token credentials auth)",
"tenantId": "tenantId (token credentials auth)",
"subscriptionId": "subscriptionId (token credentials auth)",
},
}
def __init__(self, sdk_client: Any = None, conn_id: str = "azure_default"):
self.sdk_client = sdk_client
self.conn_id = conn_id
super().__init__()
def get_conn(self) -> Any:
"""
Authenticate the resource using the connection id passed during init.
:return: the authenticated client.
"""
if not self.sdk_client:
raise ValueError("`sdk_client` must be provided to AzureBaseHook to use `get_conn` method.")
conn = self.get_connection(self.conn_id)
subscription_id = conn.extra_dejson.get("subscriptionId")
key_path = conn.extra_dejson.get("key_path")
if key_path:
if not key_path.endswith(".json"):
raise AirflowException("Unrecognised extension for key file.")
self.log.info("Getting connection using a JSON key file.")
return get_client_from_auth_file(client_class=self.sdk_client, auth_path=key_path)
key_json = conn.extra_dejson.get("key_json")
if key_json:
self.log.info("Getting connection using a JSON config.")
return get_client_from_json_dict(client_class=self.sdk_client, config_dict=key_json)
credentials = self.get_credential(conn=conn)
return self.sdk_client(
credentials=credentials,
subscription_id=subscription_id,
)
def get_credential(
self, *, conn: Connection | None = None
) -> (
ServicePrincipalCredentials
| AzureIdentityCredentialAdapter
| ClientSecretCredential
| DefaultAzureCredential
):
"""
Get Azure credential object for the connection.
Azure Identity based credential object (``ClientSecretCredential``, ``DefaultAzureCredential``) can be used to get OAuth token using ``get_token`` method.
Older Credential objects (``ServicePrincipalCredentials``, ``AzureIdentityCredentialAdapter``) are supported for backward compatibility.
:return: The Azure credential object
"""
if not conn:
conn = self.get_connection(self.conn_id)
tenant = conn.extra_dejson.get("tenantId")
credential: (
ServicePrincipalCredentials
| AzureIdentityCredentialAdapter
| ClientSecretCredential
| DefaultAzureCredential
)
if all([conn.login, conn.password, tenant]):
credential = self._get_client_secret_credential(conn)
else:
credential = self._get_default_azure_credential(conn)
return credential
def _get_client_secret_credential(
self, conn: Connection
) -> ServicePrincipalCredentials | ClientSecretCredential:
self.log.info("Getting credentials using specific credentials and subscription_id.")
extra_dejson = conn.extra_dejson
tenant = extra_dejson.get("tenantId")
use_azure_identity_object = extra_dejson.get("use_azure_identity_object", False)
if use_azure_identity_object:
return ClientSecretCredential(
client_id=conn.login, # type: ignore[arg-type]
client_secret=conn.password, # type: ignore[arg-type]
tenant_id=tenant, # type: ignore[arg-type]
)
return ServicePrincipalCredentials(client_id=conn.login, secret=conn.password, tenant=tenant)
def _get_default_azure_credential(
self, conn: Connection
) -> DefaultAzureCredential | AzureIdentityCredentialAdapter:
self.log.info("Using DefaultAzureCredential as credential")
extra_dejson = conn.extra_dejson
managed_identity_client_id = extra_dejson.get("managed_identity_client_id")
workload_identity_tenant_id = extra_dejson.get("workload_identity_tenant_id")
use_azure_identity_object = extra_dejson.get("use_azure_identity_object", False)
if use_azure_identity_object:
return get_sync_default_azure_credential(
managed_identity_client_id=managed_identity_client_id,
workload_identity_tenant_id=workload_identity_tenant_id,
)
return AzureIdentityCredentialAdapter(
managed_identity_client_id=managed_identity_client_id,
workload_identity_tenant_id=workload_identity_tenant_id,
)
def get_token(self, *scopes, **kwargs) -> AccessToken:
"""
Request an access token for `scopes`.
To use this method, set `use_azure_identity_object: True` in the connection extra field.
ServicePrincipalCredentials and AzureIdentityCredentialAdapter don't support `get_token` method.
"""
credential = self.get_credential()
if isinstance(credential, ServicePrincipalCredentials) or isinstance(
credential, AzureIdentityCredentialAdapter
):
raise AttributeError(
"ServicePrincipalCredentials and AzureIdentityCredentialAdapter don't support get_token method. "
"Please set `use_azure_identity_object: True` in the connection extra field to use credential that support get_token method."
)
return credential.get_token(*scopes, **kwargs)
| AzureBaseHook |
python | tensorflow__tensorflow | tensorflow/cc/saved_model/testdata/generate_saved_models.py | {
"start": 2751,
"end": 4756
} | class ____(module.Module):
"""A module with an Asset, StaticHashTable, and a lookup function."""
def __init__(self):
self.asset = asset.Asset(
test.test_src_dir_path(
"cc/saved_model/testdata/static_hashtable_asset.txt"))
self.table = lookup_ops.StaticHashTable(
lookup_ops.TextFileInitializer(self.asset, dtypes.string,
lookup_ops.TextFileIndex.WHOLE_LINE,
dtypes.int64,
lookup_ops.TextFileIndex.LINE_NUMBER),
-1)
@def_function.function(
input_signature=[tensor_spec.TensorSpec(shape=None, dtype=dtypes.string)])
def lookup(self, word):
return self.table.lookup(word)
def get_simple_session():
ops.disable_eager_execution()
sess = session_lib.Session()
variables.Variable(1.)
sess.run(variables.global_variables_initializer())
return sess
MODULE_CTORS = {
"VarsAndArithmeticObjectGraph": (VarsAndArithmeticObjectGraph, 2),
"CyclicModule": (CyclicModule, 2),
"AssetModule": (AssetModule, 2),
"StaticHashTableModule": (StaticHashTableModule, 2),
"SimpleV1Model": (get_simple_session, 1),
}
def main(args):
if len(args) != 3:
print("Expected: {export_path} {ModuleName}")
print("Allowed ModuleNames:", MODULE_CTORS.keys())
return 1
_, export_path, module_name = args
module_ctor, version = MODULE_CTORS.get(module_name)
if not module_ctor:
print("Expected ModuleName to be one of:", MODULE_CTORS.keys())
return 2
os.makedirs(export_path)
tf_module = module_ctor()
if version == 2:
options = save_options.SaveOptions(save_debug_info=True)
saved_model.save(tf_module, export_path, options=options)
else:
builder = saved_model.builder.SavedModelBuilder(export_path)
builder.add_meta_graph_and_variables(tf_module, ["serve"])
builder.save()
if __name__ == "__main__":
v2_compat.enable_v2_behavior()
app.run(main)
| StaticHashTableModule |
python | lazyprogrammer__machine_learning_examples | ab_testing/chisquare.py | {
"start": 1052,
"end": 1973
} | class ____:
def __init__(self, p1, p2):
self.p1 = p1
self.p2 = p2
def next(self):
click1 = 1 if (np.random.random() < self.p1) else 0
click2 = 1 if (np.random.random() < self.p2) else 0
return click1, click2
def get_p_value(T):
# same as scipy.stats.chi2_contingency(T, correction=False)
det = T[0,0]*T[1,1] - T[0,1]*T[1,0]
c2 = float(det) / T[0].sum() * det / T[1].sum() * T.sum() / T[:,0].sum() / T[:,1].sum()
p = 1 - chi2.cdf(x=c2, df=1)
return p
def run_experiment(p1, p2, N):
data = DataGenerator(p1, p2)
p_values = np.empty(N)
T = np.zeros((2, 2)).astype(np.float32)
for i in range(N):
c1, c2 = data.next()
T[0,c1] += 1
T[1,c2] += 1
# ignore the first 10 values
if i < 10:
p_values[i] = None
else:
p_values[i] = get_p_value(T)
plt.plot(p_values)
plt.plot(np.ones(N)*0.05)
plt.show()
run_experiment(0.1, 0.11, 20000)
| DataGenerator |
python | python-openxml__python-docx | src/docx/shape.py | {
"start": 474,
"end": 1368
} | class ____(Parented):
"""Sequence of |InlineShape| instances, supporting len(), iteration, and indexed access."""
def __init__(self, body_elm: CT_Body, parent: StoryPart):
super(InlineShapes, self).__init__(parent)
self._body = body_elm
def __getitem__(self, idx: int):
"""Provide indexed access, e.g. 'inline_shapes[idx]'."""
try:
inline = self._inline_lst[idx]
except IndexError:
msg = "inline shape index [%d] out of range" % idx
raise IndexError(msg)
return InlineShape(inline)
def __iter__(self):
return (InlineShape(inline) for inline in self._inline_lst)
def __len__(self):
return len(self._inline_lst)
@property
def _inline_lst(self):
body = self._body
xpath = "//w:p/w:r/w:drawing/wp:inline"
return body.xpath(xpath)
| InlineShapes |
python | apache__airflow | airflow-core/src/airflow/utils/log/file_task_handler.py | {
"start": 4284,
"end": 4907
} | class ____(TypedDict):
"""Metadata about the log fetching process, including `end_of_log` and `log_pos`."""
end_of_log: bool
log_pos: NotRequired[int]
# the following attributes are used for Elasticsearch and OpenSearch log handlers
offset: NotRequired[str | int]
# Ensure a string here. Large offset numbers will get JSON.parsed incorrectly
# on the client. Sending as a string prevents this issue.
# https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Number/MAX_SAFE_INTEGER
last_log_timestamp: NotRequired[str]
max_offset: NotRequired[str]
| LogMetadata |
python | pola-rs__polars | py-polars/tests/unit/io/database/test_read.py | {
"start": 2802,
"end": 3598
} | class ____:
"""Mock resultset class for databases we can't test in CI."""
def __init__(
self,
test_data: pa.Table,
batched: bool,
exact_batch_size: bool,
repeat_batch_calls: bool = False,
) -> None:
self.test_data = test_data
self.repeat_batched_calls = repeat_batch_calls
self.exact_batch_size = exact_batch_size
self.batched = batched
self.n_calls = 1
def __call__(self, *args: Any, **kwargs: Any) -> Any:
if not self.exact_batch_size:
assert len(args) == 0
if self.repeat_batched_calls:
res = self.test_data[: None if self.n_calls else 0]
self.n_calls -= 1
else:
res = iter((self.test_data,))
return res
| MockResultSet |
python | realpython__materials | python-contact-book/source_code_final/rpcontacts/views.py | {
"start": 389,
"end": 2921
} | class ____(QMainWindow):
"""Main Window."""
def __init__(self, parent=None):
"""Initializer."""
super().__init__(parent)
self.setWindowTitle("RP Contacts")
self.resize(550, 250)
self.centralWidget = QWidget()
self.setCentralWidget(self.centralWidget)
self.layout = QHBoxLayout()
self.centralWidget.setLayout(self.layout)
self.contactsModel = ContactsModel()
self.setupUI()
def setupUI(self):
"""Setup the main window's GUI."""
# Create the table view widget
self.table = QTableView()
self.table.setModel(self.contactsModel.model)
self.table.setSelectionBehavior(QAbstractItemView.SelectRows)
self.table.resizeColumnsToContents()
# Create buttons
self.addButton = QPushButton("Add...")
self.addButton.clicked.connect(self.openAddDialog)
self.deleteButton = QPushButton("Delete")
self.deleteButton.clicked.connect(self.deleteContact)
self.clearAllButton = QPushButton("Clear All")
self.clearAllButton.clicked.connect(self.clearContacts)
# Lay out the GUI
layout = QVBoxLayout()
layout.addWidget(self.addButton)
layout.addWidget(self.deleteButton)
layout.addStretch()
layout.addWidget(self.clearAllButton)
self.layout.addWidget(self.table)
self.layout.addLayout(layout)
def openAddDialog(self):
"""Open the Add Contact dialog."""
dialog = AddDialog(self)
if dialog.exec() == QDialog.Accepted:
self.contactsModel.addContact(dialog.data)
self.table.resizeColumnsToContents()
def deleteContact(self):
"""Delete the selected contact from the database."""
row = self.table.currentIndex().row()
if row < 0:
return
messageBox = QMessageBox.warning(
self,
"Warning!",
"Do you want to remove the selected contact?",
QMessageBox.Ok | QMessageBox.Cancel,
)
if messageBox == QMessageBox.Ok:
self.contactsModel.deleteContact(row)
def clearContacts(self):
"""Remove all contacts from the database."""
messageBox = QMessageBox.warning(
self,
"Warning!",
"Do you want to remove all your contacts?",
QMessageBox.Ok | QMessageBox.Cancel,
)
if messageBox == QMessageBox.Ok:
self.contactsModel.clearContacts()
| Window |
python | apache__airflow | task-sdk/tests/task_sdk/bases/test_operator.py | {
"start": 40749,
"end": 44190
} | class ____:
@pytest.fixture(autouse=True)
def _disable_test_mode(self, monkeypatch):
monkeypatch.setattr(ExecutorSafeguard, "test_mode", False)
def test_execute_not_allow_nested_ops(self):
with DAG("d1"):
op = ExtendedHelloWorldOperator(task_id="hello_operator", allow_nested_operators=False)
with pytest.raises(RuntimeError):
op.execute(context={})
def test_execute_subclassed_op_warns_once(self, captured_logs):
with DAG("d1"):
op = ExtendedHelloWorldOperator(task_id="hello_operator")
op.execute(context={})
assert captured_logs == [
{
"event": "ExtendedHelloWorldOperator.execute cannot be called outside of the Task Runner!",
"level": "warning",
"timestamp": mock.ANY,
"logger": "tests.task_sdk.bases.test_operator",
"loc": mock.ANY,
},
]
def test_decorated_operators(self, caplog):
with DAG("d1") as dag:
@dag.task(task_id="task_id", dag=dag)
def say_hello(**context):
operator = HelloWorldOperator(task_id="hello_operator")
return operator.execute(context=context)
op = say_hello()
op.operator.execute(context={})
assert {
"event": "HelloWorldOperator.execute cannot be called outside of the Task Runner!",
"log_level": "warning",
} in caplog
@pytest.mark.log_level(logging.WARNING)
def test_python_op(self, caplog):
from airflow.providers.standard.operators.python import PythonOperator
with DAG("d1"):
def say_hello(**context):
operator = HelloWorldOperator(task_id="hello_operator")
return operator.execute(context=context)
op = PythonOperator(
task_id="say_hello",
python_callable=say_hello,
)
op.execute(context={}, PythonOperator__sentinel=ExecutorSafeguard.sentinel_value)
assert {
"event": "HelloWorldOperator.execute cannot be called outside of the Task Runner!",
"log_level": "warning",
} in caplog
def test_partial_default_args():
class MockOperator(BaseOperator):
def __init__(self, arg1, arg2, arg3, **kwargs):
self.arg1 = arg1
self.arg2 = arg2
self.arg3 = arg3
self.kwargs = kwargs
super().__init__(**kwargs)
with DAG(
dag_id="test_partial_default_args",
default_args={"queue": "THIS", "arg1": 1, "arg2": 2, "arg3": 3, "arg4": 4},
):
t1 = BaseOperator(task_id="t1")
t2 = MockOperator.partial(task_id="t2", arg2="b").expand(arg1=t1.output)
# Only default_args recognized by BaseOperator are applied.
assert t2.partial_kwargs["queue"] == "THIS"
assert "arg1" not in t2.partial_kwargs
assert t2.partial_kwargs["arg2"] == "b"
assert "arg3" not in t2.partial_kwargs
assert "arg4" not in t2.partial_kwargs
# Simulate resolving mapped operator. This should apply all default_args.
op = t2.unmap({"arg1": "a"})
assert isinstance(op, MockOperator)
assert "arg4" not in op.kwargs # Not recognized by any class; never passed.
assert op.arg1 == "a"
assert op.arg2 == "b"
assert op.arg3 == 3
assert op.queue == "THIS"
| TestExecutorSafeguard |
python | PrefectHQ__prefect | src/integrations/prefect-dbt/prefect_dbt/cloud/clients.py | {
"start": 7125,
"end": 8545
} | class ____:
"""
Client for interacting with the dbt cloud Administrative API.
Args:
api_key: API key to authenticate with the dbt Cloud administrative API.
domain: Domain at which the dbt Cloud API is hosted.
"""
def __init__(self, api_key: str, domain: str = "metadata.cloud.getdbt.com"):
self._http_endpoint = HTTPEndpoint(
base_headers={
"Authorization": f"Bearer {api_key}",
"user-agent": f"prefect-{prefect.__version__}",
"x-dbt-partner-source": "prefect",
"content-type": "application/json",
},
url=f"https://{domain}/graphql",
)
def query(
self,
query: str,
variables: Optional[Dict] = None,
operation_name: Optional[str] = None,
) -> Dict[str, Any]:
"""
Run a GraphQL query against the dbt Cloud metadata API.
Args:
query: The GraphQL query to run.
variables: The values of any variables defined in the GraphQL query.
operation_name: The name of the operation to run if multiple operations
are defined in the provided query.
Returns:
The result of the GraphQL query.
"""
return self._http_endpoint(
query=query, variables=variables, operation_name=operation_name
)
| DbtCloudMetadataClient |
python | airbytehq__airbyte | airbyte-integrations/connectors/destination-deepset/destination_deepset/models.py | {
"start": 437,
"end": 641
} | class ____(StrEnum):
"""Available stream formats for Airbyte's source connectors"""
AVRO = "avro"
CSV = "csv"
JSONL = "jsonl"
PARQUET = "parquet"
DOCUMENT = "unstructured"
| Filetypes |
python | doocs__leetcode | solution/2900-2999/2983.Palindrome Rearrangement Queries/Solution.py | {
"start": 0,
"end": 1984
} | class ____:
def canMakePalindromeQueries(self, s: str, queries: List[List[int]]) -> List[bool]:
def count(pre: List[List[int]], i: int, j: int) -> List[int]:
return [x - y for x, y in zip(pre[j + 1], pre[i])]
def sub(cnt1: List[int], cnt2: List[int]) -> List[int]:
res = []
for x, y in zip(cnt1, cnt2):
if x - y < 0:
return []
res.append(x - y)
return res
def check(
pre1: List[List[int]], pre2: List[List[int]], a: int, b: int, c: int, d: int
) -> bool:
if diff[a] > 0 or diff[m] - diff[max(b, d) + 1] > 0:
return False
if d <= b:
return count(pre1, a, b) == count(pre2, a, b)
if b < c:
return (
diff[c] - diff[b + 1] == 0
and count(pre1, a, b) == count(pre2, a, b)
and count(pre1, c, d) == count(pre2, c, d)
)
cnt1 = sub(count(pre1, a, b), count(pre2, a, c - 1))
cnt2 = sub(count(pre2, c, d), count(pre1, b + 1, d))
return bool(cnt1) and bool(cnt2) and cnt1 == cnt2
n = len(s)
m = n // 2
t = s[m:][::-1]
s = s[:m]
pre1 = [[0] * 26 for _ in range(m + 1)]
pre2 = [[0] * 26 for _ in range(m + 1)]
diff = [0] * (m + 1)
for i, (c1, c2) in enumerate(zip(s, t), 1):
pre1[i] = pre1[i - 1][:]
pre2[i] = pre2[i - 1][:]
pre1[i][ord(c1) - ord("a")] += 1
pre2[i][ord(c2) - ord("a")] += 1
diff[i] = diff[i - 1] + int(c1 != c2)
ans = []
for a, b, c, d in queries:
c, d = n - 1 - d, n - 1 - c
ok = (
check(pre1, pre2, a, b, c, d)
if a <= c
else check(pre2, pre1, c, d, a, b)
)
ans.append(ok)
return ans
| Solution |
python | sqlalchemy__sqlalchemy | test/orm/test_eager_relations.py | {
"start": 109970,
"end": 121187
} | class ____(fixtures.MappedTest, testing.AssertsCompiledSQL):
__dialect__ = "default"
__sparse_driver_backend__ = True
@classmethod
def define_tables(cls, metadata):
Table("a", metadata, Column("id", Integer, primary_key=True))
Table(
"b",
metadata,
Column("id", Integer, primary_key=True),
Column("a_id", Integer, ForeignKey("a.id")),
Column("value", String(10)),
)
Table(
"c1",
metadata,
Column("id", Integer, primary_key=True),
Column("b_id", Integer, ForeignKey("b.id")),
Column("value", String(10)),
)
Table(
"c2",
metadata,
Column("id", Integer, primary_key=True),
Column("b_id", Integer, ForeignKey("b.id")),
Column("value", String(10)),
)
Table(
"d1",
metadata,
Column("id", Integer, primary_key=True),
Column("c1_id", Integer, ForeignKey("c1.id")),
Column("value", String(10)),
)
Table(
"d2",
metadata,
Column("id", Integer, primary_key=True),
Column("c2_id", Integer, ForeignKey("c2.id")),
Column("value", String(10)),
)
Table(
"e1",
metadata,
Column("id", Integer, primary_key=True),
Column("d1_id", Integer, ForeignKey("d1.id")),
Column("value", String(10)),
)
@classmethod
def setup_classes(cls):
class A(cls.Comparable):
pass
class B(cls.Comparable):
pass
class C1(cls.Comparable):
pass
class C2(cls.Comparable):
pass
class D1(cls.Comparable):
pass
class D2(cls.Comparable):
pass
class E1(cls.Comparable):
pass
@classmethod
def setup_mappers(cls):
A, B, C1, C2, D1, D2, E1 = (
cls.classes.A,
cls.classes.B,
cls.classes.C1,
cls.classes.C2,
cls.classes.D1,
cls.classes.D2,
cls.classes.E1,
)
cls.mapper_registry.map_imperatively(
A, cls.tables.a, properties={"bs": relationship(B)}
)
cls.mapper_registry.map_imperatively(
B,
cls.tables.b,
properties=odict(
[
("c1s", relationship(C1, order_by=cls.tables.c1.c.id)),
("c2s", relationship(C2, order_by=cls.tables.c2.c.id)),
]
),
)
cls.mapper_registry.map_imperatively(
C1,
cls.tables.c1,
properties={"d1s": relationship(D1, order_by=cls.tables.d1.c.id)},
)
cls.mapper_registry.map_imperatively(
C2,
cls.tables.c2,
properties={"d2s": relationship(D2, order_by=cls.tables.d2.c.id)},
)
cls.mapper_registry.map_imperatively(
D1,
cls.tables.d1,
properties={"e1s": relationship(E1, order_by=cls.tables.e1.c.id)},
)
cls.mapper_registry.map_imperatively(D2, cls.tables.d2)
cls.mapper_registry.map_imperatively(E1, cls.tables.e1)
@classmethod
def _fixture_data(cls):
A, B, C1, C2, D1, D2, E1 = (
cls.classes.A,
cls.classes.B,
cls.classes.C1,
cls.classes.C2,
cls.classes.D1,
cls.classes.D2,
cls.classes.E1,
)
return [
A(
id=1,
bs=[
B(
id=1,
c1s=[
C1(
id=1,
value="C11",
d1s=[
D1(id=1, e1s=[E1(id=1)]),
D1(id=2, e1s=[E1(id=2)]),
],
)
],
c2s=[
C2(id=1, value="C21", d2s=[D2(id=3)]),
C2(id=2, value="C22", d2s=[D2(id=4)]),
],
),
B(
id=2,
c1s=[
C1(
id=4,
value="C14",
d1s=[
D1(
id=3,
e1s=[
E1(id=3, value="E13"),
E1(id=4, value="E14"),
],
),
D1(id=4, e1s=[E1(id=5)]),
],
)
],
c2s=[C2(id=4, value="C24", d2s=[])],
),
],
),
A(
id=2,
bs=[
B(
id=3,
c1s=[
C1(
id=8,
d1s=[D1(id=5, value="D15", e1s=[E1(id=6)])],
)
],
c2s=[C2(id=8, d2s=[D2(id=6, value="D26")])],
)
],
),
]
@classmethod
def insert_data(cls, connection):
s = Session(connection)
s.add_all(cls._fixture_data())
s.commit()
def _assert_result(self, query):
eq_(query.all(), self._fixture_data())
def test_nested_innerjoin_propagation_multiple_paths_one(self):
A, B, C1, C2 = (
self.classes.A,
self.classes.B,
self.classes.C1,
self.classes.C2,
)
s = fixture_session()
q = s.query(A).options(
joinedload(A.bs, innerjoin=False)
.joinedload(B.c1s, innerjoin=True)
.joinedload(C1.d1s, innerjoin=True),
defaultload(A.bs)
.joinedload(B.c2s, innerjoin=True)
.joinedload(C2.d2s, innerjoin=False),
)
self.assert_compile(
q,
"SELECT a.id AS a_id, d1_1.id AS d1_1_id, "
"d1_1.c1_id AS d1_1_c1_id, d1_1.value AS d1_1_value, "
"c1_1.id AS c1_1_id, c1_1.b_id AS c1_1_b_id, "
"c1_1.value AS c1_1_value, d2_1.id AS d2_1_id, "
"d2_1.c2_id AS d2_1_c2_id, d2_1.value AS d2_1_value, "
"c2_1.id AS c2_1_id, c2_1.b_id AS c2_1_b_id, "
"c2_1.value AS c2_1_value, b_1.id AS b_1_id, "
"b_1.a_id AS b_1_a_id, b_1.value AS b_1_value "
"FROM a "
"LEFT OUTER JOIN "
"(b AS b_1 JOIN c2 AS c2_1 ON b_1.id = c2_1.b_id "
"JOIN c1 AS c1_1 ON b_1.id = c1_1.b_id "
"JOIN d1 AS d1_1 ON c1_1.id = d1_1.c1_id) ON a.id = b_1.a_id "
"LEFT OUTER JOIN d2 AS d2_1 ON c2_1.id = d2_1.c2_id "
"ORDER BY c1_1.id, d1_1.id, c2_1.id, d2_1.id",
)
self._assert_result(q)
def test_nested_innerjoin_propagation_multiple_paths_two(self):
# test #3447
A, B, C1 = (self.classes.A, self.classes.B, self.classes.C1)
s = fixture_session()
q = s.query(A).options(
joinedload(A.bs),
joinedload(A.bs, B.c2s, innerjoin=True),
joinedload(A.bs, B.c1s, innerjoin=True),
joinedload(A.bs, B.c1s, C1.d1s),
)
self.assert_compile(
q,
"SELECT a.id AS a_id, d1_1.id AS d1_1_id, "
"d1_1.c1_id AS d1_1_c1_id, d1_1.value AS d1_1_value, "
"c1_1.id AS c1_1_id, c1_1.b_id AS c1_1_b_id, "
"c1_1.value AS c1_1_value, c2_1.id AS c2_1_id, "
"c2_1.b_id AS c2_1_b_id, c2_1.value AS c2_1_value, "
"b_1.id AS b_1_id, b_1.a_id AS b_1_a_id, "
"b_1.value AS b_1_value "
"FROM a LEFT OUTER JOIN "
"(b AS b_1 JOIN c2 AS c2_1 ON b_1.id = c2_1.b_id "
"JOIN c1 AS c1_1 ON b_1.id = c1_1.b_id) ON a.id = b_1.a_id "
"LEFT OUTER JOIN d1 AS d1_1 ON c1_1.id = d1_1.c1_id "
"ORDER BY c1_1.id, d1_1.id, c2_1.id",
)
self._assert_result(q)
def test_multiple_splice_points(self):
A, B, C1, C2, D1 = (
self.classes.A,
self.classes.B,
self.classes.C1,
self.classes.C2,
self.classes.D1,
)
s = fixture_session()
q = s.query(A).options(
joinedload(A.bs, innerjoin=False),
joinedload(A.bs, B.c1s, innerjoin=True),
joinedload(A.bs, B.c2s, innerjoin=True),
joinedload(A.bs, B.c1s, C1.d1s, innerjoin=False),
joinedload(A.bs, B.c2s, C2.d2s),
joinedload(A.bs, B.c1s, C1.d1s, D1.e1s, innerjoin=True),
)
self.assert_compile(
q,
"SELECT a.id AS a_id, e1_1.id AS e1_1_id, "
"e1_1.d1_id AS e1_1_d1_id, e1_1.value AS e1_1_value, "
"d1_1.id AS d1_1_id, d1_1.c1_id AS d1_1_c1_id, "
"d1_1.value AS d1_1_value, c1_1.id AS c1_1_id, "
"c1_1.b_id AS c1_1_b_id, c1_1.value AS c1_1_value, "
"d2_1.id AS d2_1_id, d2_1.c2_id AS d2_1_c2_id, "
"d2_1.value AS d2_1_value, c2_1.id AS c2_1_id, "
"c2_1.b_id AS c2_1_b_id, c2_1.value AS c2_1_value, "
"b_1.id AS b_1_id, b_1.a_id AS b_1_a_id, b_1.value AS b_1_value "
"FROM a LEFT OUTER JOIN "
"(b AS b_1 JOIN c2 AS c2_1 ON b_1.id = c2_1.b_id "
"JOIN c1 AS c1_1 ON b_1.id = c1_1.b_id) ON a.id = b_1.a_id "
"LEFT OUTER JOIN ("
"d1 AS d1_1 JOIN e1 AS e1_1 ON d1_1.id = e1_1.d1_id) "
"ON c1_1.id = d1_1.c1_id "
"LEFT OUTER JOIN d2 AS d2_1 ON c2_1.id = d2_1.c2_id "
"ORDER BY c1_1.id, d1_1.id, e1_1.id, c2_1.id, d2_1.id",
)
self._assert_result(q)
def test_splice_onto_ac(self):
A = self.classes.A
B = self.classes.B
b_table = self.tables.b
c1_table = self.tables.c1
from sqlalchemy import inspect
weird_selectable = b_table.outerjoin(c1_table)
b_np = aliased(B, weird_selectable, flat=True)
a_mapper = inspect(A)
a_mapper.add_property("bs_np", relationship(b_np, viewonly=True))
s = fixture_session()
q = s.query(A).options(joinedload(A.bs_np, innerjoin=False))
self.assert_compile(
q,
"SELECT a.id AS a_id, b_1.id AS b_1_id, b_1.a_id AS b_1_a_id, "
"b_1.value AS b_1_value FROM a LEFT OUTER JOIN "
"(b AS b_1 LEFT OUTER JOIN c1 AS c1_1 ON b_1.id = c1_1.b_id) "
"ON a.id = b_1.a_id",
)
| InnerJoinSplicingTest |
python | airbytehq__airbyte | airbyte-ci/connectors/pipelines/pipelines/models/singleton.py | {
"start": 93,
"end": 944
} | class ____:
"""
A base class for implementing the Singleton pattern.
This class stores instances and initialization flags for each subclass in dictionaries.
This allows each subclass to have its own unique instance and control over its initialization process.
The __new__ method ensures that only one instance of each subclass is created.
The _initialized dictionary is used to control when the initialization logic of each subclass is executed.
"""
_instances: dict[Type["Singleton"], Any] = {}
_initialized: dict[Type["Singleton"], bool] = {}
def __new__(cls: Type["Singleton"], *args: Any, **kwargs: Any) -> Any: # noqa: ANN401
if cls not in cls._instances:
cls._instances[cls] = super().__new__(cls)
cls._initialized[cls] = False
return cls._instances[cls]
| Singleton |
python | PrefectHQ__prefect | src/prefect/settings/sources.py | {
"start": 3997,
"end": 8423
} | class ____(PydanticBaseSettingsSource):
"""
Custom pydantic settings source to load profile settings from a toml file.
See https://docs.pydantic.dev/latest/concepts/pydantic_settings/#customise-settings-sources
"""
def __init__(self, settings_cls: Type[BaseSettings]):
super().__init__(settings_cls)
self.settings_cls = settings_cls
self.profiles_path: Path = _get_profiles_path()
self.profile_settings: dict[str, Any] = self._load_profile_settings()
def _load_profile_settings(self) -> Dict[str, Any]:
"""Helper method to load the profile settings from the profiles.toml file"""
if not self.profiles_path.exists():
return self._get_default_profile()
try:
all_profile_data = _read_toml_file(self.profiles_path)
except toml.TomlDecodeError:
warnings.warn(
f"Failed to load profiles from {self.profiles_path}. Please ensure the file is valid TOML."
)
return {}
if (
sys.argv[0].endswith("/prefect")
and len(sys.argv) >= 3
and sys.argv[1] == "--profile"
):
active_profile = sys.argv[2]
else:
active_profile = os.environ.get("PREFECT_PROFILE") or all_profile_data.get(
"active"
)
profiles_data = all_profile_data.get("profiles", {})
if not active_profile or active_profile not in profiles_data:
return self._get_default_profile()
return profiles_data[active_profile]
def _get_default_profile(self) -> Dict[str, Any]:
"""Helper method to get the default profile"""
default_profile_data = _read_toml_file(DEFAULT_PROFILES_PATH)
default_profile = default_profile_data.get("active", "ephemeral")
assert isinstance(default_profile, str)
return default_profile_data.get("profiles", {}).get(default_profile, {})
def get_field_value(
self, field: FieldInfo, field_name: str
) -> Tuple[Any, str, bool]:
"""Concrete implementation to get the field value from the profile settings"""
if field.validation_alias:
# Use validation alias as the key to ensure profile value does not
# higher priority sources. Lower priority sources that use the
# field name can override higher priority sources that use the
# validation alias as seen in https://github.com/PrefectHQ/prefect/issues/15981
if isinstance(field.validation_alias, str):
value = self.profile_settings.get(field.validation_alias.upper())
if value is not None:
return value, field.validation_alias, self.field_is_complex(field)
elif isinstance(field.validation_alias, AliasChoices):
value = None
lowest_priority_alias = next(
choice
for choice in reversed(field.validation_alias.choices)
if isinstance(choice, str)
)
for alias in field.validation_alias.choices:
if not isinstance(alias, str):
continue
value = self.profile_settings.get(alias.upper())
if value is not None:
return (
value,
lowest_priority_alias,
self.field_is_complex(field),
)
name = f"{self.config.get('env_prefix', '')}{field_name.upper()}"
value = self.profile_settings.get(name)
return value, field_name, self.field_is_complex(field)
def __call__(self) -> Dict[str, Any]:
"""Called by pydantic to get the settings from our custom source"""
if _is_test_mode():
return {}
profile_settings: Dict[str, Any] = {}
for field_name, field in self.settings_cls.model_fields.items():
value, key, is_complex = self.get_field_value(field, field_name)
if value is not None:
prepared_value = self.prepare_field_value(
field_name, field, value, is_complex
)
profile_settings[key] = prepared_value
return profile_settings
DEFAULT_PREFECT_TOML_PATH = Path("prefect.toml")
| ProfileSettingsTomlLoader |
python | spyder-ide__spyder | spyder/plugins/ipythonconsole/confpage.py | {
"start": 513,
"end": 17926
} | class ____(PluginConfigPage):
def __init__(self, plugin, parent):
super().__init__(plugin, parent)
self.buffer_spin = None
self.apply_callback = self.warn_if_large_buffer
def setup_page(self):
newcb = self.create_checkbox
# Display group
display_group = QGroupBox(_("Display"))
banner_box = newcb(
_("Show welcome message"),
'show_banner',
tip=_("Print the startup message when opening a new console"),
)
calltips_box = newcb(
_("Show calltips"),
'show_calltips',
tip=_("Show a summary help popup when typing an open parenthesis "
"after a callable function"),
)
show_time_box = newcb(
_("Show elapsed time"),
'show_elapsed_time',
tip=_("Display the time since the current console was started "
"in the tab bar"),
)
display_layout = QVBoxLayout()
display_layout.addWidget(banner_box)
display_layout.addWidget(calltips_box)
display_layout.addWidget(show_time_box)
display_group.setLayout(display_layout)
# Confirmation group
confirmations_group = QGroupBox(_("Confirmation"))
ask_box = newcb(
_("Ask for confirmation before closing"),
'ask_before_closing',
)
reset_namespace_box = newcb(
_("Ask for confirmation before removing all variables"),
'show_reset_namespace_warning',
)
ask_restart_box = newcb(
_("Ask for confirmation before restarting"),
'ask_before_restart',
)
confirmations_layout = QVBoxLayout()
confirmations_layout.addWidget(ask_box)
confirmations_layout.addWidget(ask_restart_box)
confirmations_layout.addWidget(reset_namespace_box)
confirmations_group.setLayout(confirmations_layout)
# Completion group
comp_group = QGroupBox(_("Completion"))
completers = [(_("Graphical"), 0), (_("Terminal"), 1), (_("Plain"), 2)]
comp_box = self.create_combobox(
_("Display:"),
completers,
'completion_type',
tip=_(
"Graphical shows a list of completion matches in a GUI.\n"
"Plain displays matches in the Console output, like Bash.\n"
"Terminal is Plain plus Tab selecting matches, like Zsh.\n"
),
)
jedi_box = newcb(
_("Use Jedi completion"),
"jedi_completer",
tip=_(
"Enable Jedi-based tab-completion in the IPython console.\n"
"Similar to the greedy completer, but without evaluating "
"the code and allows completion of dictionary keys, "
"nested lists and similar.\n"
"Warning: Can slow down the Console when working with "
"large dataframes."
),
)
greedy_box = newcb(
_("Use greedy completion"),
"greedy_completer",
tip=_(
"Enable <tt>Tab</tt> completion on elements of lists, "
"results of function calls and similar "
"<i>without</i> assigning them to a variable, "
"like <tt>li[0].<Tab></tt> or "
"<tt>ins.meth().<Tab></tt><br>"
"<b>Warning</b>: This can be unsafe because your code "
"is actually executed when you press <tt>Tab</tt>."
),
)
comp_layout = QVBoxLayout()
comp_layout.addWidget(comp_box)
comp_layout.addWidget(jedi_box)
comp_layout.addWidget(greedy_box)
comp_group.setLayout(comp_layout)
# Output group
output_group = QGroupBox(_("Output"))
self.buffer_spin = self.create_spinbox(
_("Buffer:"),
_(" lines"),
'buffer_size',
min_=100,
# >10k can make Spyder slow, see spyder-ide/spyder#19091
max_=50_000,
step=100,
tip=_(
"The maximum number of output lines "
"retained in each console at a time.\n"
"Warning; Buffer sizes greater than 10000 lines can slow "
"down Spyder."
),
)
sympy_box = newcb(
_("Render SymPy symbolic math"),
"symbolic_math",
tip=_(
"Pretty-print the outputs of SymPy symbolic computations\n"
"(requires SymPy installed in the console environment).\n"
"Refer to SymPy's documentation for details on using it."
),
)
output_layout = QVBoxLayout()
output_layout.addWidget(self.buffer_spin)
output_layout.addWidget(sympy_box)
output_group.setLayout(output_layout)
# --- Plotting ---
# Matplotlib group
matplotlib_group = QGroupBox(_("Matplotlib support"))
matplotlib_box = newcb(_("Activate support"), 'pylab')
autoload_matplotlib_box = newcb(
_("Automatically import NumPy and Matplotlib modules"),
'pylab/autoload',
tip=_(
"This is a convinience to use NumPy and Matplotlib\n"
"in the console without explicitly importing the modules."
)
)
autoload_matplotlib_box.setEnabled(self.get_option('pylab'))
matplotlib_box.checkbox.toggled.connect(
autoload_matplotlib_box.setEnabled
)
matplotlib_layout = QVBoxLayout()
matplotlib_layout.addWidget(matplotlib_box)
matplotlib_layout.addWidget(autoload_matplotlib_box)
matplotlib_group.setLayout(matplotlib_layout)
# Graphics backend group
inline = _("Inline")
automatic = _("Automatic")
backend_group = QGroupBox(_("Graphics backend"))
backend_label = QLabel(_("Choose how figures are displayed"))
backends = [
(inline, 'inline'),
(automatic, 'auto'),
("Qt", 'qt'),
("Tk", 'tk'),
]
if sys.platform == 'darwin':
backends.append(("macOS", 'osx'))
backends = tuple(backends)
backend_box = self.create_combobox(
_("Backend:"),
backends,
'pylab/backend',
default='inline',
tip=_(
"If unsure, select {inline} to show figures in the Plots pane"
"\nor {auto} to interact with them (zoom and pan) "
"in a new window."
).format(inline=inline, auto=automatic),
)
backend_layout = QVBoxLayout()
backend_layout.addWidget(backend_label)
backend_layout.addWidget(backend_box)
backend_group.setLayout(backend_layout)
backend_group.setEnabled(self.get_option('pylab'))
matplotlib_box.checkbox.toggled.connect(backend_group.setEnabled)
# Inline backend group
inline_group = QGroupBox(_("Inline backend"))
inline_label = QLabel(_("Settings for figures in the Plots pane"))
inline_label.setWordWrap(True)
formats = (("PNG", 'png'), ("SVG", 'svg'))
format_box = self.create_combobox(
_("Format:") + " ",
formats,
'pylab/inline/figure_format',
default='png',
tip=_(
"PNG is more widely supported, "
"while SVG is resolution-independent and easier to edit "
"but complex plots may not be displayed correctly."
),
)
resolution_spin = self.create_spinbox(
_("Resolution:") + " ",
" " + _("DPI"),
'pylab/inline/resolution',
min_=50,
max_=999,
step=0.1,
tip=_("Only used when the format is PNG. Default is 144."),
)
width_spin = self.create_spinbox(
_("Width:") + " ",
" " + _("inches"),
'pylab/inline/width',
min_=2,
max_=20,
step=1,
tip=_("Default is 6"),
)
height_spin = self.create_spinbox(
_("Height:") + " ",
" " + _("inches"),
'pylab/inline/height',
min_=1,
max_=20,
step=1,
tip=_("Default is 4"),
)
fontsize_spin = self.create_spinbox(
_("Font size:") + " ",
" " + _("points"),
'pylab/inline/fontsize',
min_=5,
max_=48,
step=1.0,
tip=_("Default is 10"),
)
bottom_spin = self.create_spinbox(
_("Bottom edge:") + " ",
" " + _("of figure height"),
'pylab/inline/bottom',
min_=0,
max_=0.3,
step=0.01,
tip=_("The position of the bottom edge of the subplots,\n"
"as a fraction of the figure height (default is 0.11).")
)
bottom_spin.spinbox.setDecimals(2)
bbox_inches_box = newcb(
_("Use a tight layout for inline plots"),
'pylab/inline/bbox_inches',
tip=_("Sets 'bbox_inches' to 'tight' for inline plots.\n"
"When enabled, figures displayed in the Plots pane\n"
"may look different from those output with 'savefig'."))
inline_v_layout = QVBoxLayout()
inline_v_layout.addWidget(inline_label)
inline_layout = QGridLayout()
inline_layout.addWidget(format_box.label, 1, 0)
inline_layout.addWidget(format_box.combobox, 1, 1)
inline_layout.addWidget(format_box.help_label, 1, 3)
spinboxes = [resolution_spin, width_spin, height_spin,
fontsize_spin, bottom_spin]
for counter, spinbox in enumerate(spinboxes):
inline_layout.addWidget(spinbox.plabel, counter + 2, 0)
inline_layout.addWidget(spinbox.spinbox, counter + 2, 1)
inline_layout.addWidget(spinbox.slabel, counter + 2, 2)
inline_layout.addWidget(spinbox.help_label, counter + 2, 3)
inline_layout.addWidget(bbox_inches_box, len(spinboxes) + 2, 0, 1, 4)
inline_h_layout = QHBoxLayout()
inline_h_layout.addLayout(inline_layout)
inline_h_layout.addStretch(1)
inline_v_layout.addLayout(inline_h_layout)
inline_group.setLayout(inline_v_layout)
inline_group.setEnabled(self.get_option('pylab'))
matplotlib_box.checkbox.toggled.connect(inline_group.setEnabled)
# --- Startup ---
# Run lines group
run_lines_group = QGroupBox(_("Run code"))
run_lines_label = QLabel(_(
"Enter a code snippet to run when a new console is started.\n"
"Separate multiple lines by semicolons, for example:<br>"
"<tt>import os; import sys</tt>"
))
run_lines_label.setWordWrap(True)
run_lines_edit = self.create_lineedit(_("Lines:"), 'startup/run_lines',
'', alignment=Qt.Horizontal)
run_lines_layout = QVBoxLayout()
run_lines_layout.addWidget(run_lines_label)
run_lines_layout.addWidget(run_lines_edit)
run_lines_group.setLayout(run_lines_layout)
# Run file group
run_file_group = QGroupBox(_("Run a file"))
run_file_label = QLabel(_(
"Specify a Python file to execute at startup, similar to "
"<tt>PYTHONSTARTUP</tt>"
))
run_file_label.setWordWrap(True)
file_radio = newcb(
_("Execute the following file:"), 'startup/use_run_file', False
)
run_file_browser = self.create_browsefile('', 'startup/run_file', '')
run_file_browser.setEnabled(False)
file_radio.checkbox.toggled.connect(run_file_browser.setEnabled)
run_file_layout = QVBoxLayout()
run_file_layout.addWidget(run_file_label)
run_file_layout.addWidget(file_radio)
run_file_layout.addWidget(run_file_browser)
run_file_group.setLayout(run_file_layout)
# ---- Advanced settings ----
# Autocall group
autocall_group = QGroupBox(_("Autocall"))
autocall_label = QLabel(_(
"Implictly insert parethesis after any callable object, "
"treating anything following it as arguments.<br>"
"For example, typing <tt>print 'Number:', 42</tt> will execute "
"<tt>print('Number:', 42)</tt>."
))
autocall_label.setWordWrap(True)
smart = _('Smart')
full = _('Full')
autocall_opts = ((_('Off'), 0), (smart, 1), (full, 2))
autocall_box = self.create_combobox(
_("Autocall: "),
autocall_opts,
'autocall',
default=0,
tip=_(
"In {smart} mode, Autocall is not applied if there are no "
"arguments after the callable.\n"
"In {full} mode, callable objects are called even if no "
"arguments are present."
).format(smart=smart, full=full),
)
autocall_layout = QVBoxLayout()
autocall_layout.addWidget(autocall_label)
autocall_layout.addWidget(autocall_box)
autocall_group.setLayout(autocall_layout)
# Autoreload group
autoreload_group = QGroupBox(_("Autoreload"))
autoreload_label = QLabel(_(
"Reload imported modules automatically before running code. "
"This is a different mechanism than the User Module Reloader "
"and can be slow on Windows due to limitations of its file system."
))
autoreload_label.setWordWrap(True)
autoreload_box = newcb(
_("Use autoreload"),
"autoreload",
tip=_(
"Enables the autoreload magic. Refer to its documentation to "
"learn how to use it."
)
)
autoreload_layout = QVBoxLayout()
autoreload_layout.addWidget(autoreload_label)
autoreload_layout.addWidget(autoreload_box)
autoreload_group.setLayout(autoreload_layout)
# Prompts group
prompts_group = QGroupBox(_("Prompts"))
prompts_label = QLabel(_(
"Modify how input and output prompts are shown in the console."
))
prompts_label.setWordWrap(True)
in_prompt_edit = self.create_lineedit(
_("Input prompt:"),
'in_prompt',
"",
_('Default is<br>'
'<tt>In [<span class="in-prompt-number">%i</span>]:</tt>'),
alignment=Qt.Horizontal,
)
out_prompt_edit = self.create_lineedit(
_("Output prompt:"),
'out_prompt',
"",
_('Default is<br>'
'<tt>Out[<span class="out-prompt-number">%i</span>]:</tt>'),
alignment=Qt.Horizontal,
)
prompts_g_layout = QGridLayout()
prompts_g_layout.addWidget(in_prompt_edit.label, 0, 0)
prompts_g_layout.addWidget(in_prompt_edit.textbox, 0, 1)
prompts_g_layout.addWidget(in_prompt_edit.help_label, 0, 2)
prompts_g_layout.addWidget(out_prompt_edit.label, 1, 0)
prompts_g_layout.addWidget(out_prompt_edit.textbox, 1, 1)
prompts_g_layout.addWidget(out_prompt_edit.help_label, 1, 2)
prompts_layout = QVBoxLayout()
prompts_layout.addWidget(prompts_label)
prompts_layout.addLayout(prompts_g_layout)
prompts_group.setLayout(prompts_layout)
# Windows adjustments
windows_group = QGroupBox(_("Windows adjustments"))
hide_cmd_windows = newcb(
_("Hide command line output windows "
"generated by the subprocess module"),
'hide_cmd_windows',
)
windows_layout = QVBoxLayout()
windows_layout.addWidget(hide_cmd_windows)
windows_group.setLayout(windows_layout)
# --- Tabs organization ---
self.create_tab(
_("Interface"),
[display_group, confirmations_group, comp_group, output_group]
)
self.create_tab(
_("Plotting"),
[matplotlib_group, backend_group, inline_group]
)
self.create_tab(
_("Startup"),
[run_lines_group, run_file_group]
)
self.create_tab(
_("Advanced"),
[autocall_group, autoreload_group, prompts_group, windows_group]
)
def warn_if_large_buffer(self):
"""Warn the user if the Console buffer size is very large."""
if "buffer_size" not in self.changed_options:
return
msg = None
buffer_size = self.buffer_spin.spinbox.value()
# >10k line buffers can make Spyder slow, see spyder-ide/spyder#19091
if buffer_size > 10_000:
msg = _("Buffer sizes over 10000 lines can slow down Spyder")
elif buffer_size == -1:
msg = _("Unlimited buffer size can slow down Spyder severely")
if msg:
QMessageBox.warning(self, _("Warning"), msg, QMessageBox.Ok)
| IPythonConsoleConfigPage |
python | ray-project__ray | python/ray/train/_internal/state/schema.py | {
"start": 2924,
"end": 3236
} | class ____(BaseModel):
name: str = Field(
description="The key of the dataset dict specified in Ray Train Trainer."
)
dataset_uuid: str = Field(description="The uuid of the dataset.")
dataset_name: Optional[str] = Field(description="The name of the dataset.")
@DeveloperAPI
| TrainDatasetInfo |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.