language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeGuard3.py | {
"start": 258,
"end": 1284
} | class ____(Enum):
NoTypeGuard = 0
TypeGuard = 1
TypeIs = 2
@overload
def is_int(obj: object, mode: Literal[TypeGuardMode.NoTypeGuard]) -> bool: ...
@overload
def is_int(obj: object, mode: Literal[TypeGuardMode.TypeGuard]) -> TypeGuard[int]: ...
@overload
def is_int(obj: object, mode: Literal[TypeGuardMode.TypeIs]) -> TypeIs[int]: ...
def is_int(obj: object, mode: TypeGuardMode) -> bool | TypeGuard[int] | TypeIs[int]: ...
def func_no_typeguard(val: int | str):
if is_int(val, TypeGuardMode.NoTypeGuard):
reveal_type(val, expected_text="int | str")
else:
reveal_type(val, expected_text="int | str")
def func_typeguard(val: int | str):
if is_int(val, TypeGuardMode.TypeGuard):
reveal_type(val, expected_text="int")
else:
reveal_type(val, expected_text="int | str")
def func_typeis(val: int | str):
if is_int(val, TypeGuardMode.TypeIs):
reveal_type(val, expected_text="int")
else:
reveal_type(val, expected_text="str")
| TypeGuardMode |
python | celery__celery | t/unit/tasks/test_canvas.py | {
"start": 761,
"end": 1169
} | class ____:
def test_when_no_len_and_no_length_hint(self):
g = MagicMock(name='group')
g.tasks.__len__.side_effect = TypeError()
g.tasks.__length_hint__ = Mock()
g.tasks.__length_hint__.return_value = 0
assert maybe_unroll_group(g) is g
g.tasks.__length_hint__.side_effect = AttributeError()
assert maybe_unroll_group(g) is g
| test_maybe_unroll_group |
python | PrefectHQ__prefect | src/integrations/prefect-databricks/prefect_databricks/models/jobs.py | {
"start": 87716,
"end": 89251
} | class ____(BaseModel):
"""
See source code for the fields' description.
"""
model_config = ConfigDict(extra="allow", frozen=True)
description: str = Field(
...,
description=(
"A string description associated with this node type. This field is"
" required."
),
)
instance_type_id: str = Field(
...,
description=(
"An identifier for the type of hardware that this node runs on. This field"
" is required."
),
)
is_deprecated: Optional[bool] = Field(
None,
description=(
"Whether the node type is deprecated. Non-deprecated node types offer"
" greater performance."
),
)
memory_mb: int = Field(
...,
description=(
"Memory (in MB) available for this node type. This field is required."
),
)
node_info: Optional[ClusterCloudProviderNodeInfo] = Field(
None, description="Node type info reported by the cloud provider."
)
node_type_id: str = Field(
..., description="Unique identifier for this node type. This field is required."
)
num_cores: Optional[float] = Field(
None,
description=(
"Number of CPU cores available for this node type. This can be fractional"
" if the number of cores on a machine instance is not divisible by the"
" number of Spark nodes on that machine. This field is required."
),
)
| NodeType |
python | weaviate__weaviate-python-client | weaviate/collections/classes/config.py | {
"start": 90122,
"end": 93305
} | class ____:
Quantizer = _VectorIndexQuantizerUpdate
@staticmethod
def hnsw(
dynamic_ef_factor: Optional[int] = None,
dynamic_ef_min: Optional[int] = None,
dynamic_ef_max: Optional[int] = None,
ef: Optional[int] = None,
flat_search_cutoff: Optional[int] = None,
filter_strategy: Optional[VectorFilterStrategy] = None,
vector_cache_max_objects: Optional[int] = None,
quantizer: Optional[
Union[_PQConfigUpdate, _BQConfigUpdate, _SQConfigUpdate, _RQConfigUpdate]
] = None,
) -> _VectorIndexConfigHNSWUpdate:
"""Create an `_VectorIndexConfigHNSWUpdate` object to update the configuration of the HNSW vector index.
Use this method when defining the `vectorizer_config` argument in `collection.update()`.
Args:
See [the docs](https://weaviate.io/developers/weaviate/configuration/indexes#configure-the-inverted-index) for a more detailed view!
""" # noqa: D417 (missing argument descriptions in the docstring)
return _VectorIndexConfigHNSWUpdate(
dynamicEfMin=dynamic_ef_min,
dynamicEfMax=dynamic_ef_max,
dynamicEfFactor=dynamic_ef_factor,
ef=ef,
filterStrategy=filter_strategy,
flatSearchCutoff=flat_search_cutoff,
vectorCacheMaxObjects=vector_cache_max_objects,
quantizer=quantizer,
)
@staticmethod
def flat(
vector_cache_max_objects: Optional[int] = None,
quantizer: Optional[Union[_BQConfigUpdate, _RQConfigUpdate]] = None,
) -> _VectorIndexConfigFlatUpdate:
"""Create an `_VectorIndexConfigFlatUpdate` object to update the configuration of the FLAT vector index.
Use this method when defining the `vectorizer_config` argument in `collection.update()`.
Args:
See [the docs](https://weaviate.io/developers/weaviate/configuration/indexes#configure-the-inverted-index) for a more detailed view!
""" # noqa: D417 (missing argument descriptions in the docstring)
return _VectorIndexConfigFlatUpdate(
vectorCacheMaxObjects=vector_cache_max_objects,
quantizer=quantizer,
)
@staticmethod
def dynamic(
*,
threshold: Optional[int] = None,
hnsw: Optional[_VectorIndexConfigHNSWUpdate] = None,
flat: Optional[_VectorIndexConfigFlatUpdate] = None,
quantizer: Optional[_BQConfigUpdate] = None,
) -> _VectorIndexConfigDynamicUpdate:
"""Create an `_VectorIndexConfigDynamicUpdate` object to update the configuration of the Dynamic vector index.
Use this method when defining the `vectorizer_config` argument in `collection.update()`.
Args:
See [the docs](https://weaviate.io/developers/weaviate/configuration/indexes#configure-the-inverted-index) for a more detailed view!
""" # noqa: D417 (missing argument descriptions in the docstring)
return _VectorIndexConfigDynamicUpdate(
threshold=threshold,
hnsw=hnsw,
flat=flat,
quantizer=quantizer,
)
| _VectorIndexUpdate |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/operators/test_dms.py | {
"start": 14008,
"end": 16630
} | class ____:
TASK_DATA = {
"replication_task_id": "task_id",
"source_endpoint_arn": "source_endpoint",
"target_endpoint_arn": "target_endpoint",
"replication_instance_arn": "replication_arn",
"migration_type": "full-load",
"table_mappings": {},
}
def test_init(self):
op = DmsStartTaskOperator(
task_id="start_task",
replication_task_arn=TASK_ARN,
# Generic hooks parameters
aws_conn_id="fake-conn-id",
region_name="us-west-1",
verify=False,
botocore_config={"read_timeout": 42},
)
assert op.replication_task_arn == TASK_ARN
assert op.start_replication_task_type == "start-replication"
assert op.hook.client_type == "dms"
assert op.hook.resource_type is None
assert op.hook.aws_conn_id == "fake-conn-id"
assert op.hook._region_name == "us-west-1"
assert op.hook._verify is False
assert op.hook._config is not None
assert op.hook._config.read_timeout == 42
op = DmsStartTaskOperator(task_id="start_task", replication_task_arn=TASK_ARN)
assert op.hook.aws_conn_id == "aws_default"
assert op.hook._region_name is None
assert op.hook._verify is None
assert op.hook._config is None
@mock.patch.object(DmsHook, "get_task_status", side_effect=("starting",))
@mock.patch.object(DmsHook, "start_replication_task")
@mock.patch.object(DmsHook, "create_replication_task", return_value=TASK_ARN)
@mock.patch.object(DmsHook, "get_conn")
def test_start_task(
self, mock_conn, mock_create_replication_task, mock_start_replication_task, mock_get_task_status
):
dms_hook = DmsHook()
task = dms_hook.create_replication_task(**self.TASK_DATA)
start_task = DmsStartTaskOperator(task_id="start_task", replication_task_arn=task)
start_task.execute(None)
mock_start_replication_task.assert_called_once_with(
replication_task_arn=TASK_ARN,
start_replication_task_type="start-replication",
)
assert dms_hook.get_task_status(TASK_ARN) == "starting"
def test_template_fields(self):
op = DmsStartTaskOperator(
task_id="start_task",
replication_task_arn=TASK_ARN,
# Generic hooks parameters
aws_conn_id="fake-conn-id",
region_name="us-west-1",
verify=False,
botocore_config={"read_timeout": 42},
)
validate_template_fields(op)
| TestDmsStartTaskOperator |
python | allegroai__clearml | clearml/backend_api/services/v2_23/frames.py | {
"start": 48769,
"end": 55113
} | class ____(NonStrictDataModel):
"""
:param id: ROI id
:type id: str
:param sources: Sources that this ROI belongs to
:type sources: Sequence[str]
:param label: ROI labels
:type label: Sequence[str]
:param label_num: Label number according to the specified labels mapping Used
only when ROI is returned as part of a task's frame.
:type label_num: int
:param poly: ROI polygon (x0, y0, ..., xn, yn)
:type poly: Sequence[float]
:param confidence: ROI confidence
:type confidence: float
:param area: ROI area (not used)
:type area: int
:param meta: Additional metadata dictionary for the roi
:type meta: dict
:param mask: Mask info for this ROI
:type mask: RoiMask
"""
_schema = {
"properties": {
"area": {"description": "ROI area (not used)", "type": ["integer", "null"]},
"confidence": {"description": "ROI confidence", "type": ["number", "null"]},
"id": {"description": "ROI id", "type": ["string", "null"]},
"label": {
"description": "ROI labels",
"items": {"type": "string"},
"type": ["array", "null"],
},
"label_num": {
"description": (
"Label number according to the specified labels mapping Used only when ROI is returned as "
"part of a task's frame."
),
"type": ["integer", "null"],
},
"mask": {
"description": "Mask info for this ROI",
"oneOf": [{"$ref": "#/definitions/roi_mask"}, {"type": "null"}],
},
"meta": {
"additionalProperties": True,
"description": "Additional metadata dictionary for the roi",
"type": ["object", "null"],
},
"poly": {
"description": "ROI polygon (x0, y0, ..., xn, yn)",
"items": {"type": "number"},
"type": ["array", "null"],
},
"sources": {
"description": "Sources that this ROI belongs to",
"items": {"type": "string"},
"type": ["array", "null"],
},
},
"type": "object",
}
def __init__(
self,
id=None,
sources=None,
label=None,
label_num=None,
poly=None,
confidence=None,
area=None,
meta=None,
mask=None,
**kwargs
):
super(Roi, self).__init__(**kwargs)
self.id = id
self.sources = sources
self.label = label
self.label_num = label_num
self.poly = poly
self.confidence = confidence
self.area = area
self.meta = meta
self.mask = mask
@schema_property("id")
def id(self):
return self._property_id
@id.setter
def id(self, value):
if value is None:
self._property_id = None
return
self.assert_isinstance(value, "id", six.string_types)
self._property_id = value
@schema_property("sources")
def sources(self):
return self._property_sources
@sources.setter
def sources(self, value):
if value is None:
self._property_sources = None
return
self.assert_isinstance(value, "sources", (list, tuple))
self.assert_isinstance(value, "sources", six.string_types, is_array=True)
self._property_sources = value
@schema_property("label")
def label(self):
return self._property_label
@label.setter
def label(self, value):
if value is None:
self._property_label = None
return
self.assert_isinstance(value, "label", (list, tuple))
self.assert_isinstance(value, "label", six.string_types, is_array=True)
self._property_label = value
@schema_property("label_num")
def label_num(self):
return self._property_label_num
@label_num.setter
def label_num(self, value):
if value is None:
self._property_label_num = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "label_num", six.integer_types)
self._property_label_num = value
@schema_property("poly")
def poly(self):
return self._property_poly
@poly.setter
def poly(self, value):
if value is None:
self._property_poly = None
return
self.assert_isinstance(value, "poly", (list, tuple))
self.assert_isinstance(
value, "poly", six.integer_types + (float,), is_array=True
)
self._property_poly = value
@schema_property("confidence")
def confidence(self):
return self._property_confidence
@confidence.setter
def confidence(self, value):
if value is None:
self._property_confidence = None
return
self.assert_isinstance(value, "confidence", six.integer_types + (float,))
self._property_confidence = value
@schema_property("area")
def area(self):
return self._property_area
@area.setter
def area(self, value):
if value is None:
self._property_area = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "area", six.integer_types)
self._property_area = value
@schema_property("meta")
def meta(self):
return self._property_meta
@meta.setter
def meta(self, value):
if value is None:
self._property_meta = None
return
self.assert_isinstance(value, "meta", (dict,))
self._property_meta = value
@schema_property("mask")
def mask(self):
return self._property_mask
@mask.setter
def mask(self, value):
if value is None:
self._property_mask = None
return
if isinstance(value, dict):
value = RoiMask.from_dict(value)
else:
self.assert_isinstance(value, "mask", RoiMask)
self._property_mask = value
| Roi |
python | huggingface__transformers | src/transformers/models/gpt_neox/modeling_gpt_neox.py | {
"start": 13386,
"end": 14113
} | class ____(nn.Module):
def __init__(self, hidden_size, eps=1e-6):
"""
GPTNeoXRMSNorm is equivalent to T5LayerNorm
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
return self.weight * hidden_states.to(input_dtype)
def extra_repr(self):
return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
| GPTNeoXRMSNorm |
python | ApeWorX__ape | src/ape/utils/abi.py | {
"start": 2333,
"end": 10577
} | class ____:
"""
A utility class responsible for parsing structs out of values.
"""
def __init__(self, method_abi: Union[ConstructorABI, MethodABI, EventABI]):
self.abi = method_abi
@property
def default_name(self) -> str:
"""
The default struct return name for unnamed structs.
This value is also used for named tuples where the tuple does not have a name
(but each item in the tuple does have a name).
"""
name = self.abi.name if isinstance(self.abi, MethodABI) else "constructor"
return f"{name}_return"
def encode_input(self, values: Union[list, tuple, dict]) -> Any:
"""
Convert dicts and other objects to struct inputs.
Args:
values (Union[list, tuple]): A list of input values.
Returns:
Any: The same input values only decoded into structs when applicable.
"""
return [self._encode(ipt, v) for ipt, v in zip(self.abi.inputs, values)]
def decode_input(self, values: Union[Sequence, dict[str, Any]]) -> Any:
return (
self._decode(self.abi.inputs, values)
if isinstance(self.abi, (EventABI, MethodABI))
else None
)
def _encode(self, _type: ABIType, value: Any):
if (
_type.type == "tuple"
and _type.components
and all(m.name for m in _type.components)
and not isinstance(value, tuple)
):
if isinstance(value, dict):
return tuple(
(
self._encode(m, value[m.name])
if isinstance(value[m.name], dict)
else value[m.name]
)
for m in _type.components
)
elif isinstance(value, (list, tuple)):
# NOTE: Args must be passed in correct order.
return tuple(value)
else:
arg = [getattr(value, m.name) for m in _type.components if m.name]
return tuple(arg)
elif (
str(_type.type).startswith("tuple[")
and isinstance(value, (list, tuple))
and len(_type.components or []) > 0
):
non_array_type_data = _type.model_dump()
non_array_type_data["type"] = "tuple"
non_array_type = ABIType(**non_array_type_data)
return [self._encode(non_array_type, v) for v in value]
return value
def decode_output(self, values: Union[list, tuple]) -> Any:
"""
Parse a list of output types and values into structs.
Values are only altered when they are a struct.
This method also handles structs within structs as well as arrays of structs.
Args:
values (Union[list, tuple]): A list of output values.
Returns:
Any: The same input values only decoded into structs when applicable.
"""
return self._decode(self.abi.outputs, values) if isinstance(self.abi, MethodABI) else None
def _decode(
self,
_types: Union[Sequence[ABIType]],
values: Union[Sequence, dict[str, Any]],
):
if is_struct(_types):
return self._create_struct(_types[0], values)
elif isinstance(values, (list, tuple)) and is_named_tuple(_types, values):
# Handle tuples. NOTE: unnamed output structs appear as tuples with named members
return create_struct(self.default_name, _types, values)
return_values: list = []
has_array_return = _is_array_return(_types)
has_array_of_tuples_return = (
has_array_return and len(_types) == 1 and "tuple" in _types[0].type
)
if has_array_return and not has_array_of_tuples_return:
# Normal array
return values
elif has_array_of_tuples_return:
item_type_str = str(_types[0].type).partition("[")[0]
data = {
**_types[0].model_dump(),
"type": item_type_str,
"internalType": item_type_str,
}
output_type = ABIType.model_validate(data)
if isinstance(values, (list, tuple)) and not values[0]:
# Only returned an empty list.
return_values.append([])
elif isinstance(values, (list, tuple)):
for value in values[0]:
item = self._decode([output_type], [value])
return_values.append(item)
else:
for output_type, value in zip(_types, values):
if isinstance(value, (tuple, list)):
item_type_str = str(output_type.type).partition("[")[0]
if item_type_str == "tuple":
# Either an array of structs or nested structs.
item_type_data = {
**output_type.model_dump(),
"type": item_type_str,
"internalType": item_type_str,
}
item_type = ABIType.model_validate(item_type_data)
if is_struct(output_type):
parsed_item = self._decode([item_type], [value])
else:
# Is array of structs.
parsed_item = [self._decode([item_type], [v]) for v in value]
# If it's an empty dynamic array of structs, replace `None` with empty list
output_raw_type = output_type.type
if (
isinstance(output_raw_type, str)
and output_raw_type.endswith("[]")
and parsed_item is None
):
parsed_item = []
else:
parsed_item = [HexBytes(v) if isinstance(v, bytes) else v for v in value]
return_values.append(parsed_item)
else:
return_values.append(value)
return return_values
def _create_struct(self, out_abi: ABIType, out_value: Any) -> Optional[Any]:
if not out_abi.components or not out_value[0]:
# Likely an empty tuple or not a struct.
return None
internal_type = out_abi.internal_type
if out_abi.name == "" and internal_type and "struct " in internal_type:
name = internal_type.replace("struct ", "").split(".")[-1]
else:
name = out_abi.name or self.default_name
components = self._parse_components(out_abi.components, out_value[0])
return create_struct(
name,
out_abi.components,
components,
)
def _parse_components(self, components: list[ABIType], values) -> list:
parsed_values = []
for component, value in zip(components, values):
if is_struct(component):
new_value = self._create_struct(component, (value,))
parsed_values.append(new_value)
elif is_array(component.type) and "tuple" in component.type and component.components:
new_value = [self._decode(component.components, v) for v in value]
parsed_values.append(new_value)
else:
parsed_values.append(value)
return parsed_values
def is_struct(outputs: Union[ABIType, Sequence[ABIType]]) -> bool:
"""
Returns ``True`` if the given output is a struct.
"""
outputs_seq = outputs if isinstance(outputs, (tuple, list)) else [outputs]
return (
len(outputs_seq) == 1
and "[" not in outputs_seq[0].type
and outputs_seq[0].components not in (None, [])
and all(c.name != "" for c in outputs_seq[0].components or [])
)
def is_named_tuple(outputs: Sequence[ABIType], output_values: Sequence) -> bool:
"""
Returns ``True`` if the given output is a tuple where every item is named.
"""
return all(o.name for o in outputs) and len(output_values) > 1
| StructParser |
python | jazzband__django-simple-history | simple_history/tests/tests/test_middleware.py | {
"start": 524,
"end": 6966
} | class ____(TestCase):
def setUp(self):
self.user = CustomUser.objects.create_superuser(
"user_login", "u@example.com", "pass"
)
def test_user_is_set_on_create_view_when_logged_in(self):
self.client.force_login(self.user)
data = {"question": "Test question", "pub_date": "2010-01-01"}
self.client.post(reverse("poll-add"), data=data)
polls = Poll.objects.all()
self.assertEqual(polls.count(), 1)
poll_history = polls.first().history.all()
self.assertListEqual(
[ph.history_user_id for ph in poll_history], [self.user.id]
)
def test_user_is_not_set_on_create_view_not_logged_in(self):
data = {"question": "Test question", "pub_date": "2010-01-01"}
self.client.post(reverse("poll-add"), data=data)
polls = Poll.objects.all()
self.assertEqual(polls.count(), 1)
poll_history = polls.first().history.all()
self.assertListEqual([ph.history_user_id for ph in poll_history], [None])
def test_user_is_set_on_update_view_when_logged_in(self):
self.client.force_login(self.user)
poll = Poll.objects.create(question="Test question", pub_date=date.today())
data = {"question": "Test question updated", "pub_date": "2010-01-01"}
self.client.post(reverse("poll-update", args=[poll.pk]), data=data)
polls = Poll.objects.all()
self.assertEqual(polls.count(), 1)
poll = polls.first()
self.assertEqual(poll.question, "Test question updated")
poll_history = poll.history.all()
self.assertListEqual(
[ph.history_user_id for ph in poll_history], [self.user.id, None]
)
def test_user_is_not_set_on_update_view_when_not_logged_in(self):
poll = Poll.objects.create(question="Test question", pub_date=date.today())
data = {"question": "Test question updated", "pub_date": "2010-01-01"}
self.client.post(reverse("poll-update", args=[poll.pk]), data=data)
polls = Poll.objects.all()
self.assertEqual(polls.count(), 1)
poll = polls.first()
self.assertEqual(poll.question, "Test question updated")
poll_history = poll.history.all()
self.assertListEqual([ph.history_user_id for ph in poll_history], [None, None])
def test_user_is_unset_on_update_view_after_logging_out(self):
self.client.force_login(self.user)
poll = Poll.objects.create(question="Test question", pub_date=date.today())
data = {"question": "Test question updated", "pub_date": "2010-01-01"}
self.client.post(reverse("poll-update", args=[poll.pk]), data=data)
polls = Poll.objects.all()
self.assertEqual(polls.count(), 1)
poll = polls.first()
self.assertEqual(poll.question, "Test question updated")
self.client.logout()
new_data = {
"question": "Test question updated part 2",
"pub_date": "2010-01-01",
}
self.client.post(reverse("poll-update", args=[poll.pk]), data=new_data)
polls = Poll.objects.all()
self.assertEqual(polls.count(), 1)
poll = polls.first()
self.assertEqual(poll.question, "Test question updated part 2")
poll_history = poll.history.all()
self.assertListEqual(
[ph.history_user_id for ph in poll_history], [None, self.user.id, None]
)
def test_user_is_set_on_delete_view_when_logged_in(self):
self.client.force_login(self.user)
poll = Poll.objects.create(question="Test question", pub_date=date.today())
self.client.post(reverse("poll-delete", args=[poll.pk]))
polls = Poll.objects.all()
self.assertEqual(polls.count(), 0)
poll_history = poll.history.all()
self.assertListEqual(
[ph.history_user_id for ph in poll_history], [self.user.id, None]
)
def test_user_is_not_set_on_delete_view_when_not_logged_in(self):
poll = Poll.objects.create(question="Test question", pub_date=date.today())
self.client.post(reverse("poll-delete", args=[poll.pk]))
polls = Poll.objects.all()
self.assertEqual(polls.count(), 0)
poll_history = poll.history.all()
self.assertListEqual([ph.history_user_id for ph in poll_history], [None, None])
def test_bucket_member_is_set_on_create_view_when_logged_in(self):
self.client.force_login(self.user)
member1 = BucketMember.objects.create(name="member1", user=self.user)
data = {"data": "Test Data"}
self.client.post(reverse("bucket_data-add"), data=data)
bucket_datas = BucketDataRegisterRequestUser.objects.all()
self.assertEqual(bucket_datas.count(), 1)
history = bucket_datas.first().history.all()
self.assertListEqual([h.history_user_id for h in history], [member1.id])
# The `request` attribute of `HistoricalRecords.context` should be deleted
# even if this setting is set to `True`
@override_settings(DEBUG_PROPAGATE_EXCEPTIONS=True)
@mock.patch("simple_history.tests.view.MockableView.get")
def test_request_attr_is_deleted_after_each_response(self, func_mock):
"""https://github.com/django-commons/django-simple-history/issues/1189"""
def assert_has_request_attr(has_attr: bool):
self.assertEqual(hasattr(HistoricalRecords.context, "request"), has_attr)
def mocked_get(*args, **kwargs):
assert_has_request_attr(True)
response_ = HttpResponse(status=200)
response_.historical_records_request = HistoricalRecords.context.request
return response_
func_mock.side_effect = mocked_get
self.client.force_login(self.user)
mockable_url = reverse("mockable")
assert_has_request_attr(False)
response = self.client.get(mockable_url)
assert_has_request_attr(False)
# Check that the `request` attr existed while handling the request
self.assertEqual(response.historical_records_request.user, self.user)
func_mock.side_effect = RuntimeError()
with self.assertRaises(RuntimeError):
self.client.get(mockable_url)
# The request variable should be deleted even if an exception was raised
assert_has_request_attr(False)
@override_settings(**middleware_override_settings)
| MiddlewareTest |
python | Textualize__textual | docs/examples/guide/widgets/tooltip02.py | {
"start": 214,
"end": 643
} | class ____(App):
CSS = """
Screen {
align: center middle;
}
Tooltip {
padding: 2 4;
background: $primary;
color: auto 90%;
}
"""
def compose(self) -> ComposeResult:
yield Button("Click me", variant="success")
def on_mount(self) -> None:
self.query_one(Button).tooltip = TEXT
if __name__ == "__main__":
app = TooltipApp()
app.run()
| TooltipApp |
python | run-llama__llama_index | llama-index-integrations/protocols/llama-index-protocols-ag-ui/llama_index/protocols/ag_ui/router.py | {
"start": 1386,
"end": 4359
} | class ____:
def __init__(self, workflow_factory: Callable[[], Awaitable[Workflow]]):
self.workflow_factory = workflow_factory
self.router = APIRouter()
self.router.add_api_route("/run", self.run, methods=["POST"])
async def run(self, input: RunAgentInput):
workflow = await self.workflow_factory()
handler = workflow.run(
input_data=input,
)
async def stream_response():
try:
yield workflow_event_to_sse(
RunStartedWorkflowEvent(
timestamp=timestamp(),
thread_id=input.thread_id,
run_id=input.run_id,
)
)
async for ev in handler.stream_events():
if isinstance(ev, AG_UI_EVENTS):
yield workflow_event_to_sse(ev)
# Finish the run
_ = await handler
yield workflow_event_to_sse(
RunFinishedWorkflowEvent(
timestamp=timestamp(),
thread_id=input.thread_id,
run_id=input.run_id,
)
)
except Exception as e:
yield workflow_event_to_sse(
RunErrorWorkflowEvent(
timestamp=timestamp(),
message=str(e),
code=str(type(e)),
)
)
await handler.cancel_run()
raise
return StreamingResponse(stream_response(), media_type="text/event-stream")
def get_default_workflow_factory(
llm: Optional[FunctionCallingLLM] = None,
frontend_tools: Optional[List[str]] = None,
backend_tools: Optional[List[str]] = None,
initial_state: Optional[Dict[str, Any]] = None,
system_prompt: Optional[str] = None,
timeout: Optional[float] = 120,
) -> Callable[[], Workflow]:
async def workflow_factory():
return AGUIChatWorkflow(
llm=llm,
frontend_tools=frontend_tools,
backend_tools=backend_tools,
initial_state=initial_state,
system_prompt=system_prompt,
timeout=timeout,
)
return workflow_factory
def get_ag_ui_workflow_router(
workflow_factory: Optional[Callable[[], Awaitable[Workflow]]] = None,
llm: Optional[FunctionCallingLLM] = None,
frontend_tools: Optional[List[str]] = None,
backend_tools: Optional[List[str]] = None,
initial_state: Optional[Dict[str, Any]] = None,
system_prompt: Optional[str] = None,
timeout: Optional[float] = 120,
) -> APIRouter:
workflow_factory = workflow_factory or get_default_workflow_factory(
llm, frontend_tools, backend_tools, initial_state, system_prompt, timeout
)
return AGUIWorkflowRouter(workflow_factory).router
| AGUIWorkflowRouter |
python | pypa__setuptools | setuptools/_vendor/typeguard/_exceptions.py | {
"start": 218,
"end": 411
} | class ____(UserWarning):
"""Emitted by typeguard's type checkers when a type mismatch is detected."""
def __init__(self, message: str):
super().__init__(message)
| TypeCheckWarning |
python | python-poetry__poetry | src/poetry/repositories/link_sources/html.py | {
"start": 2409,
"end": 3424
} | class ____:
"""
This class represents the parsed content of a "simple" repository's root page. This follows the
specification laid out in PEP 503.
See: https://peps.python.org/pep-0503/
"""
def __init__(self, content: str | None = None) -> None:
parser = HTMLPageParser()
parser.feed(content or "")
self._parsed = parser.anchors
def search(self, query: str | list[str]) -> list[str]:
results: list[str] = []
tokens = query if isinstance(query, list) else [query]
for anchor in self._parsed:
href = anchor.get("href")
if href and any(token in href for token in tokens):
results.append(href.rstrip("/"))
return results
@cached_property
def package_names(self) -> list[str]:
results: list[str] = []
for anchor in self._parsed:
if href := anchor.get("href"):
results.append(href.rstrip("/"))
return results
| SimpleRepositoryRootPage |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/executors/batch/boto_schema.py | {
"start": 1279,
"end": 2119
} | class ____(Schema):
"""API Response for Describe Jobs."""
# The unique identifier for the job.
job_id = fields.String(data_key="jobId", required=True)
# The current status for the job:
# 'SUBMITTED', 'PENDING', 'RUNNABLE', 'STARTING', 'RUNNING', 'SUCCEEDED', 'FAILED'
status = fields.String(required=True)
# A short, human-readable string to provide additional details about the current status of the job.
status_reason = fields.String(data_key="statusReason")
@post_load
def make_job(self, data, **kwargs):
"""Overwrite marshmallow load() to return an instance of BatchJob instead of a dictionary."""
return BatchJob(**data)
class Meta:
"""Options object for a Schema. See Schema.Meta for more details and valid values."""
unknown = EXCLUDE
| BatchJobDetailSchema |
python | langchain-ai__langchain | libs/core/tests/unit_tests/runnables/test_history.py | {
"start": 26528,
"end": 30217
} | class ____(RunnableLambda[Input, Output]):
def with_listeners(
self,
*,
on_start: Callable[[Run], None]
| Callable[[Run, RunnableConfig], None]
| None = None,
on_end: Callable[[Run], None]
| Callable[[Run, RunnableConfig], None]
| None = None,
on_error: Callable[[Run], None]
| Callable[[Run, RunnableConfig], None]
| None = None,
) -> Runnable[Input, Output]:
def create_tracer(config: RunnableConfig) -> RunnableConfig:
tracer = RootListenersTracer(
config=config,
on_start=on_start,
on_end=on_end,
on_error=on_error,
)
tracer.raise_error = True
return {
"callbacks": [tracer],
}
return RunnableBinding(
bound=self,
config_factories=[create_tracer],
)
def with_alisteners(
self,
*,
on_start: AsyncListener | None = None,
on_end: AsyncListener | None = None,
on_error: AsyncListener | None = None,
) -> Runnable[Input, Output]:
def create_tracer(config: RunnableConfig) -> RunnableConfig:
tracer = AsyncRootListenersTracer(
config=config,
on_start=on_start,
on_end=on_end,
on_error=on_error,
)
tracer.raise_error = True
return {
"callbacks": [tracer],
}
return RunnableBinding(
bound=self,
config_factories=[create_tracer],
)
def test_get_output_messages_no_value_error() -> None:
runnable = _RunnableLambdaWithRaiseError(
lambda messages: "you said: "
+ "\n".join(str(m.content) for m in messages if isinstance(m, HumanMessage))
)
store: dict = {}
get_session_history = _get_get_session_history(store=store)
with_history = RunnableWithMessageHistory(runnable, get_session_history)
config: RunnableConfig = {
"configurable": {"session_id": "1", "message_history": get_session_history("1")}
}
may_catch_value_error = None
try:
with_history.bound.invoke([HumanMessage(content="hello")], config)
except ValueError as e:
may_catch_value_error = e
assert may_catch_value_error is None
def test_get_output_messages_with_value_error() -> None:
illegal_bool_message = False
runnable = _RunnableLambdaWithRaiseError(lambda _: illegal_bool_message)
store: dict = {}
get_session_history = _get_get_session_history(store=store)
with_history = RunnableWithMessageHistory(runnable, get_session_history) # type: ignore[arg-type]
config: RunnableConfig = {
"configurable": {"session_id": "1", "message_history": get_session_history("1")}
}
with pytest.raises(
ValueError,
match=re.escape(
"Expected str, BaseMessage, list[BaseMessage], or tuple[BaseMessage]."
f" Got {illegal_bool_message}."
),
):
with_history.bound.invoke([HumanMessage(content="hello")], config)
illegal_int_message = 123
runnable2 = _RunnableLambdaWithRaiseError(lambda _: illegal_int_message)
with_history = RunnableWithMessageHistory(runnable2, get_session_history) # type: ignore[arg-type]
with pytest.raises(
ValueError,
match=re.escape(
"Expected str, BaseMessage, list[BaseMessage], or tuple[BaseMessage]."
f" Got {illegal_int_message}."
),
):
with_history.bound.invoke([HumanMessage(content="hello")], config)
| _RunnableLambdaWithRaiseError |
python | getsentry__sentry | src/sentry/analytics/events/metric_alert_with_ui_component_created.py | {
"start": 95,
"end": 298
} | class ____(analytics.Event):
user_id: int | None = None
alert_rule_id: int
organization_id: int
analytics.register(MetricAlertWithUiComponentCreatedEvent)
| MetricAlertWithUiComponentCreatedEvent |
python | pytorch__pytorch | benchmarks/operator_benchmark/pt/qinstancenorm_test.py | {
"start": 289,
"end": 1337
} | class ____(op_bench.TorchBenchmarkBase):
def init(self, dims, dtype):
X = (torch.rand(*dims) - 0.5) * 256
num_channels = dims[1]
scale = 1.0
zero_point = 0
self.inputs = {
"qX": torch.quantize_per_tensor(
X, scale=scale, zero_point=zero_point, dtype=dtype
),
"weight": torch.rand(num_channels, dtype=torch.float),
"bias": torch.rand(num_channels, dtype=torch.float),
"eps": 1e-5,
"Y_scale": 0.1,
"Y_zero_point": 0,
}
def forward(self, qX, weight, bias, eps: float, Y_scale: float, Y_zero_point: int):
return torch.ops.quantized.instance_norm(
qX,
weight=weight,
bias=bias,
eps=eps,
output_scale=Y_scale,
output_zero_point=Y_zero_point,
)
op_bench.generate_pt_test(instancenorm_configs_short, QInstanceNormBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| QInstanceNormBenchmark |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 114011,
"end": 114328
} | class ____(sgqlc.types.Enum):
"""Properties by which sponsorship update connections can be ordered.
Enumeration Choices:
* `CREATED_AT`: Order sponsorship newsletters by when they were
created.
"""
__schema__ = github_schema
__choices__ = ("CREATED_AT",)
| SponsorshipNewsletterOrderField |
python | mahmoud__glom | glom/tutorial.py | {
"start": 15892,
"end": 16340
} | class ____:
"""This type implements an oversimplified storage manager, wrapping an
OrderedDict instead of a database. Those familiar with Django and
SQLAlchemy will recognize the pattern being sketched here.
"""
def all(self):
return list(CONTACTS.values())
def save(self, contact):
CONTACTS[contact.id] = contact
def get(self, contact_id):
return CONTACTS.get(contact_id)
@attr.s
| ContactManager |
python | tensorflow__tensorflow | tensorflow/python/framework/extension_type_test.py | {
"start": 4570,
"end": 5675
} | class ____(extension_type.BatchableExtensionType):
"""Example subclass of ExtensionType, used for testing.
This version adds Keras required properties to MaskedTensor and its Spec
class, to test Keras integration.
"""
__name__ = 'tf.test.MaskedTensorV3.Spec'
values: typing.Union[tensor.Tensor, ragged_tensor.RaggedTensor]
mask: typing.Union[tensor.Tensor, ragged_tensor.RaggedTensor]
def __init__(self, values, mask):
if isinstance(values, ragged_tensor.RaggedTensor):
assert isinstance(mask, ragged_tensor.RaggedTensor)
assert mask.dtype == dtypes.bool
else:
values = ops.convert_to_tensor(values)
mask = ops.convert_to_tensor(mask, dtypes.bool)
self.values = values
self.mask = mask
# Required by assert_input_compatibility in keras/engine/input_spec.py
@property
def shape(self):
return self.values.shape
@property
def dtype(self):
return self.values.dtype
class Spec:
# Required by KerasTensor.shape in keras/engine/keras_tensor.py
@property
def _shape(self):
return self.values._shape
| MaskedTensorV3 |
python | fastai__fastai | fastai/vision/augment.py | {
"start": 13635,
"end": 19856
} | class ____(DisplayedTransform):
'Resizes the biggest dimension of an image to `max_sz` maintaining the aspect ratio'
order = 1
def __init__(self,
max_sz: int, # Biggest dimension of the resized image
resamples=(BILINEAR, NEAREST), # Pillow `Image` resample mode, resamples[1] for mask
**kwargs
):
store_attr()
super().__init__(**kwargs)
def encodes(self, x:Image.Image|TensorBBox|TensorPoint):
w,h = _get_sz(x)
if w >= h: nw,nh = self.max_sz,h*self.max_sz/w
else: nw,nh = w*self.max_sz/h,self.max_sz
return Resize(size=(int(nh),int(nw)), resamples=self.resamples)(x)
# %% ../../nbs/09_vision.augment.ipynb 77
def _init_mat(x):
mat = torch.eye(3, device=x.device).float()
return mat.unsqueeze(0).expand(x.size(0), 3, 3).contiguous()
# %% ../../nbs/09_vision.augment.ipynb 80
def _grid_sample(x, coords, mode='bilinear', padding_mode='reflection', align_corners=None):
"Resample pixels in `coords` from `x` by `mode`, with `padding_mode` in ('reflection','border','zeros')."
#coords = coords.permute(0, 3, 1, 2).contiguous().permute(0, 2, 3, 1) # optimize layout for grid_sample
if mode=='bilinear': # hack to get smoother downwards resampling
mn,mx = coords.min(),coords.max()
# max amount we're affine zooming by (>1 means zooming in)
z = 1/(mx-mn).item()*2
# amount we're resizing by, with 100% extra margin
d = min(x.shape[-2]/coords.shape[-2], x.shape[-1]/coords.shape[-1])/2
# If we're resizing up by >200%, and we're zooming less than that, interpolate first
if d>1 and d>z:
x = F.interpolate(x, scale_factor=1/d, mode='area', recompute_scale_factor=True)
return F.grid_sample(x, coords, mode=mode, padding_mode=padding_mode, align_corners=align_corners)
# %% ../../nbs/09_vision.augment.ipynb 90
def affine_grid(
theta:Tensor, # Batch of affine transformation matrices
size:tuple, # Output size
align_corners:bool=None # PyTorch `F.grid_sample` align_corners
):
" Generates `TensorFlowField` from a transformation affine matrices `theta`"
return TensorFlowField(F.affine_grid(theta, size, align_corners=align_corners))
# %% ../../nbs/09_vision.augment.ipynb 91
@patch
def affine_coord(x: TensorImage,
mat:Tensor=None, # Batch of affine transformation matrices
coord_tfm:Callable=None, # Partial function of composable coordinate transforms
sz:int|tuple=None, # Output size, duplicated if one value is specified
mode:str='bilinear', # PyTorch `F.grid_sample` interpolation applied to `TensorImage`
pad_mode=PadMode.Reflection, # Padding applied to `TensorImage`
align_corners=True # PyTorch `F.grid_sample` align_corners
):
"Apply affine and coordinate transforms to `TensorImage`"
if mat is None and coord_tfm is None and sz is None: return x
size = tuple(x.shape[-2:]) if sz is None else (sz,sz) if isinstance(sz,int) else tuple(sz)
if mat is None: mat = _init_mat(x)[:,:2]
coords = affine_grid(mat, x.shape[:2] + size, align_corners=align_corners)
if coord_tfm is not None: coords = coord_tfm(coords)
return TensorImage(_grid_sample(x, coords, mode=mode, padding_mode=pad_mode, align_corners=align_corners))
@patch
def affine_coord(x: TensorMask,
mat:Tensor=None, # Batch of affine transformation matrices
coord_tfm:Callable=None, # Partial function of composable coordinate transforms
sz:int|tuple=None, # Output size, duplicated if one value is specified
mode='nearest', # PyTorch `F.grid_sample` interpolation applied to `TensorMask`
pad_mode=PadMode.Reflection, # Padding applied to `TensorMask`
align_corners=True # PyTorch `F.grid_sample` align_corners
):
"Apply affine and coordinate transforms to `TensorMask`"
add_dim = (x.ndim==3)
if add_dim: x = x[:,None]
res = TensorImage.affine_coord(x.float(), mat, coord_tfm, sz, mode, pad_mode, align_corners).long()
if add_dim: res = res[:,0]
return TensorMask(res)
@patch
def affine_coord(x: TensorPoint,
mat:Tensor=None, # Batch of affine transformation matrices
coord_tfm=None, # Partial function of composable coordinate transforms
sz=None, # Output size, duplicated if one value is specified
mode='nearest', # PyTorch `F.grid_sample` interpolation applied to `TensorPoint`
pad_mode=PadMode.Zeros, # Padding applied to `TensorPoint`
align_corners=True # PyTorch `F.grid_sample` align_corners
):
"Apply affine and coordinate transforms to `TensorPoint`"
#assert pad_mode==PadMode.Zeros, "Only zero padding is supported for `TensorPoint` and `TensorBBox`"
if sz is None: sz = getattr(x, "img_size", None)
if coord_tfm is not None: x = coord_tfm(x, invert=True)
if mat is not None:
mat = TensorPoint(mat)
x = (x - mat[:,:,2].unsqueeze(1)) @ torch.inverse(mat[:,:,:2].transpose(1,2))
return TensorPoint(x, sz=sz)
@patch
def affine_coord(x: TensorBBox,
mat=None, # Batch of affine transformation matrices
coord_tfm=None, # Partial function of composable coordinate transforms
sz=None, # Output size, duplicated if one value is specified
mode='nearest', # PyTorch `F.grid_sample` interpolation applied to `TensorBBox`
pad_mode=PadMode.Zeros, # Padding applied to `TensorBBox`
align_corners=True # PyTorch `F.grid_sample` align_corners
):
"Apply affine and coordinate transforms to `TensorBBox`"
if mat is None and coord_tfm is None: return x
if sz is None: sz = getattr(x, "img_size", None)
bs,n = x.shape[:2]
pnts = stack([x[...,:2], stack([x[...,0],x[...,3]],dim=2),
stack([x[...,2],x[...,1]],dim=2), x[...,2:]], dim=2)
pnts = TensorPoint(pnts.view(bs, 4*n, 2), img_size=sz).affine_coord(mat, coord_tfm, sz, mode, pad_mode)
pnts = pnts.view(bs, n, 4, 2)
tl,dr = pnts.min(dim=2)[0],pnts.max(dim=2)[0]
return TensorBBox(torch.cat([tl, dr], dim=2), img_size=sz)
# %% ../../nbs/09_vision.augment.ipynb 92
def _prepare_mat(x, mat):
h,w = getattr(x, 'img_size', x.shape[-2:])
mat[:,0,1] *= h/w
mat[:,1,0] *= w/h
return mat[:,:2]
# %% ../../nbs/09_vision.augment.ipynb 93
| RatioResize |
python | openai__openai-python | src/openai/types/conversations/conversation_item.py | {
"start": 2197,
"end": 2742
} | class ____(BaseModel):
command: List[str]
"""The command to run."""
env: Dict[str, str]
"""Environment variables to set for the command."""
type: Literal["exec"]
"""The type of the local shell action. Always `exec`."""
timeout_ms: Optional[int] = None
"""Optional timeout in milliseconds for the command."""
user: Optional[str] = None
"""Optional user to run the command as."""
working_directory: Optional[str] = None
"""Optional working directory to run the command in."""
| LocalShellCallAction |
python | google__jax | jax/_src/debugging.py | {
"start": 2005,
"end": 17234
} | class ____(effects.Effect):
__str__ = lambda self: "OrderedDebug"
ordered_debug_effect = OrderedDebugEffect()
effects.ordered_effects.add_type(OrderedDebugEffect)
effects.lowerable_effects.add_type(DebugEffect)
effects.lowerable_effects.add_type(OrderedDebugEffect)
effects.control_flow_allowed_effects.add_type(DebugEffect)
effects.control_flow_allowed_effects.add_type(OrderedDebugEffect)
effects.remat_allowed_effects.add_type(DebugEffect)
effects.remat_allowed_effects.add_type(OrderedDebugEffect)
effects.custom_derivatives_allowed_effects.add_type(DebugEffect)
effects.custom_derivatives_allowed_effects.add_type(OrderedDebugEffect)
effects.partial_eval_kept_effects.add_type(DebugEffect)
effects.partial_eval_kept_effects.add_type(OrderedDebugEffect)
# `debug_callback_p` is the main primitive for staging out Python callbacks.
debug_callback_p = core.Primitive('debug_callback')
debug_callback_p.multiple_results = True
map, unsafe_map = util.safe_map, map
@debug_callback_p.def_impl
def debug_callback_impl(*args, callback: Callable[..., Any],
effect: DebugEffect, partitioned: bool):
del effect, partitioned
try:
cpu_device, *_ = xla_bridge.local_devices(backend="cpu")
except RuntimeError as e:
raise RuntimeError(
"jax.debug.callback failed to find a local CPU device to place the"
" inputs on. Make sure \"cpu\" is listed in --jax_platforms or the"
" JAX_PLATFORMS environment variable."
) from e
args = api.device_put(args, cpu_device)
with (config.default_device(cpu_device),
sharding_impls._internal_use_concrete_mesh(mesh_lib.empty_concrete_mesh),
mesh_lib.use_abstract_mesh(mesh_lib.empty_abstract_mesh)):
try:
callback(*args)
except BaseException:
logger.exception("jax.debug.callback failed")
raise
return ()
@debug_callback_p.def_effectful_abstract_eval
def debug_callback_abstract_eval(*flat_avals, callback: Callable[..., Any],
effect: DebugEffect, partitioned: bool):
del flat_avals, callback, partitioned
return [], {effect}
def debug_batching_rule(args, dims, *, primitive, **params):
"""Unrolls the debug callback across the mapped axis."""
axis_size = next(x.shape[i] for x, i in zip(args, dims)
if i is not None)
# TODO(sharadmv): implement in terms of rolled loop unstead of unrolled.
def get_arg_at_dim(i, dim, arg):
if dim is batching.not_mapped:
# Broadcast unmapped argument
return arg
return lax.index_in_dim(arg, i, axis=dim, keepdims=False)
outs = []
for i in range(axis_size):
args_idx = map(partial(get_arg_at_dim, i), dims, args)
outs.append(primitive.bind(*args_idx, **params))
outs = [jnp.stack(xs) for xs in zip(*outs)]
return outs, (0,) * len(outs)
batching.primitive_batchers[debug_callback_p] = partial(
debug_batching_rule, primitive=debug_callback_p
)
def debug_callback_jvp_rule(primals, tangents, **params):
return debug_callback_p.bind(*primals, **params), []
ad.primitive_jvps[debug_callback_p] = debug_callback_jvp_rule
def debug_callback_transpose_rule(_, *flat_args, callback: Callable[..., Any],
effect: DebugEffect, partitioned):
del callback, effect, partitioned
return [None for _ in flat_args]
ad.primitive_transposes[debug_callback_p] = debug_callback_transpose_rule
def _debug_callback_partial_auto(axis_context, *args, **params):
partial_auto = list(set(axis_context.mesh.axis_names) - axis_context.manual_axes)
def f():
idx = lax.axis_index(*partial_auto)
return lax.cond(idx == 0,
lambda: debug_callback_p.bind(*args, **params),
lambda: [])
return shard_map.shard_map(f, in_specs=(), out_specs=[])()
def debug_callback_lowering(ctx, *args, effect, partitioned, callback, **params):
axis_context = ctx.module_context.axis_context
if isinstance(axis_context, sharding_impls.SPMDAxisContext):
# We're a shard_map, which might be partial-manual or full-manual.
partial_auto = set(axis_context.mesh.axis_names) - axis_context.manual_axes
if partial_auto:
# If we have partial manual / partial auto sharding, we gather and
# conditionally run the callback.
lower = partial(
_debug_callback_partial_auto,
axis_context,
effect=effect,
partitioned=partitioned,
callback=callback,
**params,
)
return mlir.lower_fun(lower)(ctx, *args)
elif set(axis_context.manual_axes) == set(axis_context.mesh.axis_names):
# If we have fully manual sharding during lowering, that means the JAX
# program has per-device semantics, so we run the callback on each device.
if config.use_shardy_partitioner.value:
sharding = cb._get_sdy_array_list_for_callbacks(ctx.avals_out)
else:
sharding = xc.OpSharding()
sharding.type = xc.OpSharding.Type.MANUAL
else:
assert False # Unreachable
elif isinstance(axis_context, sharding_impls.ShardingContext):
# If we have fully automatic sharding during lowering, that means the JAX
# program has bulk array semantics, so we run the callback with a MAXIMAL
# sharding and hence execute it only once on the full logical value).
if config.use_shardy_partitioner.value:
sharding = sharding_impls.SdyArrayList([
sharding_impls.SdyArray(
mesh_shape=(), dim_shardings=[], logical_device_ids=(0,))])
else:
sharding = xc.OpSharding()
sharding.type = xc.OpSharding.Type.MAXIMAL
sharding.tile_assignment_dimensions = [1]
sharding.tile_assignment_devices = [0]
else:
# When there's no SPMD partitioning going on, don't annotate a sharding.
sharding = None
def _callback(*flat_args):
debug_callback_p.impl(
*flat_args,
effect=effect,
partitioned=partitioned,
callback=callback,
**params,
)
return ()
if effects.ordered_effects.contains(effect):
token = ctx.tokens_in.get(effect)
result, token, _ = cb.emit_python_callback(
ctx, _callback, token, list(args), ctx.avals_in, ctx.avals_out,
has_side_effect=True, returns_token=True, partitioned=partitioned)
ctx.set_tokens_out(mlir.TokenSet({effect: token}))
else:
result, _, _ = cb.emit_python_callback(
ctx, _callback, None, list(args), ctx.avals_in, ctx.avals_out,
has_side_effect=True, returns_token=True, partitioned=partitioned,
sharding=sharding)
return result
mlir.register_lowering(debug_callback_p, debug_callback_lowering,
platform="cpu")
mlir.register_lowering(
debug_callback_p, debug_callback_lowering, platform="gpu")
# Debug callbacks use channel IDs on TPU, which require non-caching.
mlir.register_lowering(
debug_callback_p, debug_callback_lowering, platform="tpu",
cacheable=False)
def _debug_partial_eval_custom(saveable, unks_in, inst_in, eqn, primitive):
# The default behavior for effectful primitives is to not stage them if
# possible. For debug callback, we actually want it to be staged to
# provide more information to the user. This rule bypasses partial_eval's
# regular behavior to do that. Specifically, we will stage the callback
# if:
# 1) the policy says debug_callbacks are not saveable
# 2) the policy says debug_callbacks are saveable BUT all of the input
# values are instantiated.
# The purpose is to call back with as much information as possible while
# avoiding unnecessarily staging out other values.
if any(unks_in):
# The usual case (if we have any unknowns, we need to stage it out)
res = [v for v, inst in zip(eqn.invars, inst_in) if not inst]
return None, eqn, [], [], res
if saveable(primitive, *[v.aval for v in eqn.invars], **eqn.params):
# The policy is telling us we can save the debug callback.
if all(inst_in):
# If all of the inputs are instantiated, we also stage out the
# debug_callback.
return eqn, eqn, [], [], []
else:
# If any are not instantiated, we don't do any extra staging to avoid
# affecting the computation.
return eqn, None, [], [], []
# If we can't save the debug callback (thanks to the policy) we listen to
# the policy and stage out the debug callback.
return eqn, eqn, [], [], []
pe.partial_eval_jaxpr_custom_rules[debug_callback_p] = partial(
_debug_partial_eval_custom, primitive=debug_callback_p
)
@state_discharge.register_discharge_rule(debug_callback_p)
def _debug_callback_state_discharge_rule(
in_avals, out_avals, *args, effect, partitioned, callback, **params
):
del in_avals, out_avals # Unused.
out = debug_callback_p.bind(
*args, effect=effect, partitioned=partitioned, callback=callback, **params
)
return args, out
def _split_callback_args(args, kwargs):
flat_args, in_tree = tree_util.tree_flatten((args, kwargs))
static_args, dyn_args = {}, []
for i, a in enumerate(flat_args):
try:
core.shaped_abstractify(a)
dyn_args.append(a)
except (AssertionError, TypeError):
static_args[i] = a
return in_tree, dyn_args, static_args
def merge_callback_args(in_tree, dyn_args, static_args):
static_args_dict = dict(static_args)
all_args = [None] * (len(static_args) + len(dyn_args))
di = iter(dyn_args)
for i in range(len(all_args)):
if i in static_args_dict:
all_args[i] = static_args_dict[i]
else:
all_args[i] = next(di)
assert next(di, None) is None
return tree_util.tree_unflatten(in_tree, all_args)
def _make_flat_callback(in_tree, callback, static_args):
def _flat_callback(*dyn_args):
args, kwargs = merge_callback_args(in_tree, dyn_args, static_args)
callback(*args, **kwargs)
return ()
return _flat_callback
def _check_format(fmt, in_tree, dyn_args, static_args):
args, kwargs = merge_callback_args(in_tree, dyn_args, static_args)
formatter.format(fmt, *args, **kwargs)
debug_print_p = core.Primitive("debug_print")
debug_print_p.multiple_results = True
@debug_print_p.def_impl
def debug_print_impl(
*args: Any,
fmt: str,
ordered,
partitioned,
in_tree,
static_args,
np_printoptions,
has_placeholders,
logging_record,
):
callback = partial(
_format_print_callback, fmt, dict(np_printoptions), has_placeholders,
logging_record,
)
callback = _make_flat_callback(in_tree, callback, static_args)
effect = ordered_debug_effect if ordered else debug_effect
debug_callback_impl(
*args, callback=callback, effect=effect, partitioned=partitioned
)
return ()
@debug_print_p.def_effectful_abstract_eval
def debug_print_abstract_eval(*avals: Any, fmt: str, ordered, **kwargs):
del avals, fmt, kwargs # Unused.
effect = ordered_debug_effect if ordered else debug_effect
return [], {effect}
batching.primitive_batchers[debug_print_p] = partial(
debug_batching_rule, primitive=debug_print_p
)
def debug_print_jvp_rule(primals, tangents, **params):
return debug_print_p.bind(*primals, **params), []
ad.primitive_jvps[debug_print_p] = debug_print_jvp_rule
def debug_print_transpose_rule(_, *args, **kwargs):
del kwargs
return [None for _ in args]
ad.primitive_transposes[debug_print_p] = debug_print_transpose_rule
def debug_print_lowering_rule(
ctx,
*dyn_args,
fmt,
ordered,
partitioned,
in_tree,
static_args,
np_printoptions,
has_placeholders,
logging_record,
):
callback = partial(
_format_print_callback,
fmt,
dict(np_printoptions),
has_placeholders,
logging_record,
)
callback = _make_flat_callback(in_tree, callback, static_args)
effect = ordered_debug_effect if ordered else debug_effect
return debug_callback_lowering(
ctx, *dyn_args, effect=effect, partitioned=partitioned, callback=callback
)
mlir.register_lowering(debug_print_p, debug_print_lowering_rule, platform="cpu")
mlir.register_lowering(debug_print_p, debug_print_lowering_rule, platform="gpu")
mlir.register_lowering(
debug_print_p, debug_print_lowering_rule, platform="tpu", cacheable=False
)
pe.partial_eval_jaxpr_custom_rules[debug_print_p] = partial(
_debug_partial_eval_custom, primitive=debug_print_p
)
@state_discharge.register_discharge_rule(debug_print_p)
def _debug_print_state_discharge_rule(in_avals, out_avals, *args, **kwargs):
del in_avals, out_avals # Unused.
out = debug_print_p.bind(*args, **kwargs)
return args, out
def debug_callback(
callback: Callable[..., None],
*args: Any,
ordered: bool = False,
partitioned: bool = False,
**kwargs: Any,
) -> None:
"""Calls a stageable Python callback.
For more explanation, see `External Callbacks`_.
``jax.debug.callback`` enables you to pass in a Python function that can be called
inside of a staged JAX program. A ``jax.debug.callback`` follows existing JAX
transformation *pure* operational semantics, which are therefore unaware of
side-effects. This means the effect could be dropped, duplicated, or
potentially reordered in the presence of higher-order primitives and
transformations.
We want this behavior because we'd like ``jax.debug.callback`` to be "innocuous",
i.e. we want these primitives to change the JAX computation as little as
possible while revealing as much about them as possible, such as which parts
of the computation are duplicated or dropped.
Args:
callback: A Python callable returning None.
*args: The positional arguments to the callback.
ordered: A keyword only argument used to indicate whether or not the
staged out computation will enforce ordering of this callback w.r.t.
other ordered callbacks.
partitioned: If True, then print local shards only; this option avoids an
all-gather of the operands. If False, print with logical operands; this
option requires an all-gather of operands first.
**kwargs: The keyword arguments to the callback.
Returns:
None
See Also:
- :func:`jax.experimental.io_callback`: callback designed for impure functions.
- :func:`jax.pure_callback`: callback designed for pure functions.
- :func:`jax.debug.print`: callback designed for printing.
.. _External Callbacks: https://docs.jax.dev/en/latest/notebooks/external_callbacks.html
"""
if not callable(callback):
raise TypeError("first argument to jax.debug.callback must be callable, "
f"but got an object of type {type(callback)}")
in_tree, dyn_args, static_args = _split_callback_args(args, kwargs)
def _flat_callback(*dyn_args):
all_args = [None] * (len(static_args) + len(dyn_args))
di = iter(dyn_args)
for i in range(len(all_args)):
if i in static_args:
all_args[i] = static_args[i]
else:
all_args[i] = next(di)
assert next(di, None) is None
args, kwargs = tree_util.tree_unflatten(in_tree, all_args)
callback(*args, **kwargs)
return ()
effect = ordered_debug_effect if ordered else debug_effect
debug_callback_p.bind(
*dyn_args, callback=_flat_callback, effect=effect, partitioned=partitioned
)
| OrderedDebugEffect |
python | walkccc__LeetCode | solutions/2699. Modify Graph Edge Weights/2699.py | {
"start": 0,
"end": 1590
} | class ____:
def modifiedGraphEdges(self, n: int, edges: list[list[int]], source: int, destination: int, target: int) -> list[list[int]]:
MAX = 2_000_000_000
graph = [[] for _ in range(n)]
for u, v, w in edges:
if w == -1:
continue
graph[u].append((v, w))
graph[v].append((u, w))
distToDestination = self._dijkstra(graph, source, destination)
if distToDestination < target:
return []
if distToDestination == target:
# Change the weights of negative edges to an impossible value.
for edge in edges:
if edge[2] == -1:
edge[2] = MAX
return edges
for i, (u, v, w) in enumerate(edges):
if w != -1:
continue
edges[i][2] = 1
graph[u].append((v, 1))
graph[v].append((u, 1))
distToDestination = self._dijkstra(graph, source, destination)
if distToDestination <= target:
edges[i][2] += target - distToDestination
# Change the weights of negative edges to an impossible value.
for j in range(i + 1, len(edges)):
if edges[j][2] == -1:
edges[j][2] = MAX
return edges
return []
def _dijkstra(self, graph: list[list[int]], src: int, dst: int) -> int:
dist = [math.inf] * len(graph)
dist[src] = 0
minHeap = [(dist[src], src)] # (d, u)
while minHeap:
d, u = heapq.heappop(minHeap)
if d > dist[u]:
continue
for v, w in graph[u]:
if d + w < dist[v]:
dist[v] = d + w
heapq.heappush(minHeap, (dist[v], v))
return dist[dst]
| Solution |
python | sanic-org__sanic | sanic/http/http3.py | {
"start": 1480,
"end": 2224
} | class ____(TransportProtocol):
"""HTTP/3 transport implementation."""
__slots__ = ("_protocol",)
def __init__(self, protocol: Http3Protocol):
self._protocol = protocol
def get_protocol(self) -> Http3Protocol:
return self._protocol
def get_extra_info(self, info: str, default: Any = None) -> Any:
if (
info in ("socket", "sockname", "peername")
and self._protocol._transport
):
return self._protocol._transport.get_extra_info(info, default)
elif info == "network_paths":
return self._protocol._quic._network_paths
elif info == "ssl_context":
return self._protocol.app.state.ssl
return default
| HTTP3Transport |
python | airbytehq__airbyte | airbyte-ci/connectors/pipelines/pipelines/airbyte_ci/steps/bump_version.py | {
"start": 4501,
"end": 5930
} | class ____(SetConnectorVersion):
def __init__(self, context: ConnectorContext, connector_directory: dagger.Directory, bump_type: str, rc: bool = False) -> None:
self.bump_type = bump_type
new_version = self.get_bumped_version(context.connector.version, bump_type, rc)
super().__init__(
context,
connector_directory,
new_version,
)
@property
def title(self) -> str:
return f"{self.bump_type.upper()} bump {self.context.connector.technical_name} version to {self.new_version}"
@staticmethod
def get_bumped_version(version: str | None, bump_type: str, rc: bool) -> str:
if version is None:
raise ValueError("Version is not set")
current_version = semver.VersionInfo.parse(version)
if bump_type in BUMP_VERSION_METHOD_MAPPING:
new_version = BUMP_VERSION_METHOD_MAPPING[bump_type](current_version)
if rc:
new_version = new_version.bump_prerelease()
elif bump_type.startswith("version:"):
version_str = bump_type.split("version:", 1)[1]
if semver.VersionInfo.is_valid(version_str):
return version_str
else:
raise ValueError(f"Invalid version: {version_str}")
else:
raise ValueError(f"Unknown bump type: {bump_type}")
return str(new_version)
| BumpConnectorVersion |
python | tensorflow__tensorflow | tensorflow/python/tpu/tpu_embedding_v3_checkpoint_adapter.py | {
"start": 14590,
"end": 20993
} | class ____(
checkpoint_adapter.AbstractCheckpointAdapter
):
"""Adapter for TPU Embedding V3 to handle checkpoint resharding."""
def __init__(
self,
layouts: Optional[sparse_core_layout_pb2.SparseCoreTableLayouts] = None,
):
"""An adapter for TPUEmbeddingV3 checkpoints.
Constructs an adapter for TPUEmbeddingV3 to handle layout changes. between
checkpoint values and embedding object being restored.
Args:
layouts: The target layouts required.
"""
self._checkpoint_layouts = {}
self._checkpoint_to_reshard_callback = {}
if layouts:
for layout in layouts.tables:
self._checkpoint_layouts[layout.table_name] = layout
@classmethod
def create_from_checkpoint(cls, save_path: str):
reader = py_checkpoint_reader.NewCheckpointReader(save_path)
sparsecore_layouts_str = None
for name in reader.get_variable_to_dtype_map():
if tpu_embedding_v3_utils.SPARSECORE_LAYOUTS_CHECKPOINT_KEY in name:
sparsecore_layouts_str = reader.get_tensor(name)
break
if sparsecore_layouts_str is None:
return cls(None)
layouts = sparse_core_layout_pb2.SparseCoreTableLayouts()
layouts.ParseFromString(sparsecore_layouts_str)
logging.info("Loaded layouts from checkpoint: %s", layouts)
return cls(layouts)
def initialize_reshard_callbacks(
self,
embedding_layouts: Optional[
Mapping[str, sparse_core_layout_pb2.SparseCoreTableLayout]
] = None,
):
if not self._checkpoint_layouts and embedding_layouts:
# From Unsharded to Sharded
stacked_name_to_table_names = collections.defaultdict(list)
for layout in embedding_layouts.values():
stacked_name_to_table_names[layout.stacked_table_name].append(layout)
for stacked_name, layouts in stacked_name_to_table_names.items():
# Make the first table name as the key for checkpoint position
# The sorting here is by the position of the logical table in the shard
sorted_layouts = sorted(
layouts, key=lambda layout: layout.sparse_core_shard_row_offset
)
logging.info("Creating resharding plan for %s", stacked_name)
self._checkpoint_to_reshard_callback[sorted_layouts[0].table_name] = (
EmbeddingUnshardToShardCallback(
stacked_name,
[l.table_name for l in sorted_layouts],
sorted_layouts,
None,
)
)
return
if not embedding_layouts:
# TODO(b/326644306): From sharded to unsharded
raise NotImplementedError("Sharded to Unsharded is not implemented yet.")
# Reshard to different SC Layout
from_layouts = _reorg_layouts(list(self._checkpoint_layouts.values()))
to_layouts = _reorg_layouts(list(embedding_layouts.values()))
for stacked_name, table_layouts in to_layouts.items():
# look for required stacked tables
required_stacked_tables = dict()
for table_layout in table_layouts:
for from_stacked_name, from_table_layouts in from_layouts.items():
if table_layout.table_name in {
layout.table_name for layout in from_table_layouts
}:
required_stacked_tables[from_stacked_name] = from_table_layouts
logging.info(
"Creating resharding plan for %s, required stacked_tables: %s",
stacked_name,
", ".join(required_stacked_tables.keys()),
)
self._checkpoint_to_reshard_callback[stacked_name] = (
EmbeddingReshardCallback(
object_local_name=stacked_name,
from_shard_layouts=required_stacked_tables,
to_shard_layouts=to_layouts[stacked_name],
)
)
def is_layouts_same(self, embedding_layouts) -> bool:
"""Returns True if the all the embedding and checkpoint layouts are the same.
Args:
embedding_layouts: dict of layouts for embedding tables.
Raises: ValueError if the embedding layouts and checkpoint layouts do not
have the same keys.
Returns: Bool representing if the embedding layouts match the layouts in
checkpoint.
"""
if self._checkpoint_layouts.keys() != embedding_layouts.keys():
raise ValueError(
"Layouts in checkpoint and embedding must have the same keys. found"
" {} and {}".format(
self._checkpoint_layouts.keys(), embedding_layouts.keys()
)
)
for key, layout in self._checkpoint_layouts.items():
if not compare.ProtoEq(layout, embedding_layouts[key]):
logging.info(
"Layouts do not match for %s this will require resharding; %s"
" vs %s",
key,
layout,
embedding_layouts[key],
)
return False
return True
def is_applicable(self, trackable: trackable_base.Trackable) -> bool:
# issubclass(trackable, TPUEmbeddingBase) adds circular deps, hence using
# a workaround to select the applicable embedding implementations.
allowed_class_names = [".TPUEmbeddingV2Plus", ".TPUEmbeddingV2"]
if not any(x in str(type(trackable)) for x in allowed_class_names):
return False
embedding_layouts = None
if hasattr(trackable, "embedding_layouts"):
embedding_layouts = trackable.embedding_layouts
# Neither checkpoint not target embedding has layout, no resharding needed.
if not self._checkpoint_layouts and not embedding_layouts:
logging.info("No resharding needed, no layouts")
return False
# Only if both checkpoint and embedding have layouts and they match,
# no resharding needed.
if (
self._checkpoint_layouts
and embedding_layouts
and self.is_layouts_same(embedding_layouts)
):
logging.info("No resharding needed; layouts match")
return False
# Else we need to reshard.
self.initialize_reshard_callbacks(embedding_layouts)
return True
def get_reshard_callback(
self, name: str
) -> Optional[checkpoint_adapter.ReshardCallback]:
if name in self._checkpoint_to_reshard_callback:
return self._checkpoint_to_reshard_callback[name]
# Check if this is slot variable
var_name = name.split("/")[0]
if var_name in self._checkpoint_to_reshard_callback:
return self._checkpoint_to_reshard_callback[var_name]
return None
| TpuEmbeddingV3CheckpointAdapter |
python | ApeWorX__ape | src/ape/types/basic.py | {
"start": 699,
"end": 2260
} | class ____(Sequence[_T]):
def __init__(self, generator: Union[Iterator[_T], Callable[[], Iterator[_T]]]):
self._generator = generator
self.cache: list = []
@overload
def __getitem__(self, index: int) -> _T: ...
@overload
def __getitem__(self, index: slice) -> Sequence[_T]: ...
def __getitem__(self, index: Union[int, slice]) -> Union[_T, Sequence[_T]]:
if isinstance(index, int):
while len(self.cache) <= index:
# Catch up the cache.
if value := next(self.generator, None):
self.cache.append(value)
return self.cache[index]
elif isinstance(index, slice):
# TODO: Make slices lazier. Right now, it deqeues all.
for item in self.generator:
self.cache.append(item)
return self.cache[index]
else:
raise TypeError("Index must be int or slice.")
def __len__(self) -> int:
# NOTE: This will deque everything.
for value in self.generator:
self.cache.append(value)
return len(self.cache)
def __iter__(self) -> Iterator[_T]:
yield from self.cache
for value in self.generator:
yield value
self.cache.append(value)
@property
def generator(self) -> Iterator:
if callable(self._generator):
self._generator = self._generator()
assert isinstance(self._generator, Iterator) # For type-checking.
yield from self._generator
| _LazySequence |
python | Textualize__textual | examples/merlin.py | {
"start": 885,
"end": 1505
} | class ____(Widget):
"""Switch with a numeric label."""
DEFAULT_CSS = """
LabelSwitch Label {
text-align: center;
width: 1fr;
text-style: bold;
}
LabelSwitch Label#label-5 {
color: $text-disabled;
}
"""
def __init__(self, switch_no: int) -> None:
self.switch_no = switch_no
super().__init__()
def compose(self) -> ComposeResult:
"""Compose the label and a switch."""
yield Label(str(self.switch_no), id=f"label-{self.switch_no}")
yield Switch(id=f"switch-{self.switch_no}", name=str(self.switch_no))
| LabelSwitch |
python | rapidsai__cudf | python/cudf_polars/cudf_polars/containers/column.py | {
"start": 1102,
"end": 16712
} | class ____:
"""An immutable column with sortedness metadata."""
obj: plc.Column
is_sorted: plc.types.Sorted
order: plc.types.Order
null_order: plc.types.NullOrder
is_scalar: bool
# Optional name, only ever set by evaluation of NamedExpr nodes
# The internal evaluation should not care about the name.
name: str | None
dtype: DataType
def __init__(
self,
column: plc.Column,
dtype: DataType,
*,
is_sorted: plc.types.Sorted = plc.types.Sorted.NO,
order: plc.types.Order = plc.types.Order.ASCENDING,
null_order: plc.types.NullOrder = plc.types.NullOrder.BEFORE,
name: str | None = None,
):
self.obj = column
self.is_scalar = self.size == 1
self.name = name
self.dtype = dtype
self.set_sorted(is_sorted=is_sorted, order=order, null_order=null_order)
@classmethod
def deserialize(
cls,
header: ColumnHeader,
frames: tuple[memoryview[bytes], plc.gpumemoryview],
stream: Stream,
) -> Self:
"""
Create a Column from a serialized representation returned by `.serialize()`.
Parameters
----------
header
The (unpickled) metadata required to reconstruct the object.
frames
Two-tuple of frames (a memoryview and a gpumemoryview).
stream
CUDA stream used for device memory operations and kernel launches
on this column. The caller is responsible for ensuring that
the data in ``frames`` is valid on ``stream``.
Returns
-------
Column
The deserialized Column.
"""
packed_metadata, packed_gpu_data = frames
(plc_column,) = plc.contiguous_split.unpack_from_memoryviews(
packed_metadata, packed_gpu_data, stream
).columns()
return cls(plc_column, **cls.deserialize_ctor_kwargs(header["column_kwargs"]))
@staticmethod
def deserialize_ctor_kwargs(
column_kwargs: ColumnOptions,
) -> DeserializedColumnOptions:
"""Deserialize the constructor kwargs for a Column."""
return {
"is_sorted": column_kwargs["is_sorted"],
"order": column_kwargs["order"],
"null_order": column_kwargs["null_order"],
"name": column_kwargs["name"],
"dtype": DataType(_dtype_from_header(column_kwargs["dtype"])),
}
def serialize(
self,
stream: Stream,
) -> tuple[ColumnHeader, tuple[memoryview[bytes], plc.gpumemoryview]]:
"""
Serialize the Column into header and frames.
Follows the Dask serialization scheme with a picklable header (dict) and
a tuple of frames (in this case a contiguous host and device buffer).
To enable dask support, dask serializers must be registered
>>> from cudf_polars.experimental.dask_serialize import register
>>> register()
Returns
-------
header
A dict containing any picklable metadata required to reconstruct the object.
frames
Two-tuple of frames suitable for passing to `plc.contiguous_split.unpack_from_memoryviews`
"""
packed = plc.contiguous_split.pack(plc.Table([self.obj]), stream=stream)
header: ColumnHeader = {
"column_kwargs": self.serialize_ctor_kwargs(),
"frame_count": 2,
}
return header, packed.release()
def serialize_ctor_kwargs(self) -> ColumnOptions:
"""Serialize the constructor kwargs for self."""
return {
"is_sorted": self.is_sorted,
"order": self.order,
"null_order": self.null_order,
"name": self.name,
"dtype": _dtype_to_header(self.dtype.polars_type),
}
def obj_scalar(self, stream: Stream) -> plc.Scalar:
"""
A copy of the column object as a pylibcudf Scalar.
Parameters
----------
stream
CUDA stream used for device memory operations and kernel launches.
``self.obj`` must be valid on this stream, and the result will be
valid on this stream.
Returns
-------
pylibcudf Scalar object.
Raises
------
ValueError
If the column is not length-1.
"""
if not self.is_scalar:
raise ValueError(f"Cannot convert a column of length {self.size} to scalar")
return plc.copying.get_element(self.obj, 0, stream=stream)
def rename(self, name: str | None, /) -> Self:
"""
Return a shallow copy with a new name.
Parameters
----------
name
New name
Returns
-------
Shallow copy of self with new name set.
"""
new = self.copy()
new.name = name
return new
def sorted_like(self, like: Column, /) -> Self:
"""
Return a shallow copy with sortedness from like.
Parameters
----------
like
The column to copy sortedness metadata from.
Returns
-------
Shallow copy of self with metadata set.
See Also
--------
set_sorted, copy_metadata
"""
return type(self)(
self.obj,
name=self.name,
dtype=self.dtype,
is_sorted=like.is_sorted,
order=like.order,
null_order=like.null_order,
)
def check_sorted(
self,
*,
order: plc.types.Order,
null_order: plc.types.NullOrder,
stream: Stream,
) -> bool:
"""
Check if the column is sorted.
Parameters
----------
order
The requested sort order.
null_order
Where nulls sort to.
stream
CUDA stream used for device memory operations and kernel launches
on this Column. The data in ``self.obj`` must be valid on this stream.
Returns
-------
True if the column is sorted, false otherwise.
Notes
-----
If the sortedness flag is not set, this launches a kernel to
check sortedness.
"""
if self.size <= 1 or self.size == self.null_count:
return True
if self.is_sorted == plc.types.Sorted.YES:
return self.order == order and (
self.null_count == 0 or self.null_order == null_order
)
if plc.sorting.is_sorted(
plc.Table([self.obj]), [order], [null_order], stream=stream
):
self.sorted = plc.types.Sorted.YES
self.order = order
self.null_order = null_order
return True
return False
def astype(self, dtype: DataType, stream: Stream) -> Column:
"""
Cast the column to as the requested dtype.
Parameters
----------
dtype
Datatype to cast to.
stream
CUDA stream used for device memory operations and kernel launches
on this Column. The data in ``self.obj`` must be valid on this stream.
Returns
-------
Column of requested type.
Raises
------
RuntimeError
If the cast is unsupported.
Notes
-----
This only produces a copy if the requested dtype doesn't match
the current one.
"""
plc_dtype = dtype.plc_type
if self.obj.type() == plc_dtype:
return self
if (
plc_dtype.id() == plc.TypeId.STRING
or self.obj.type().id() == plc.TypeId.STRING
):
return Column(
self._handle_string_cast(plc_dtype, stream=stream), dtype=dtype
)
elif plc.traits.is_integral_not_bool(
self.obj.type()
) and plc.traits.is_timestamp(plc_dtype):
upcasted = plc.unary.cast(
self.obj, plc.DataType(plc.TypeId.INT64), stream=stream
)
plc_col = plc.column.Column(
plc_dtype,
upcasted.size(),
upcasted.data(),
upcasted.null_mask(),
upcasted.null_count(),
upcasted.offset(),
upcasted.children(),
)
return Column(plc_col, dtype=dtype).sorted_like(self)
elif plc.traits.is_integral_not_bool(plc_dtype) and plc.traits.is_timestamp(
self.obj.type()
):
plc_col = plc.column.Column(
plc.DataType(plc.TypeId.INT64),
self.obj.size(),
self.obj.data(),
self.obj.null_mask(),
self.obj.null_count(),
self.obj.offset(),
self.obj.children(),
)
return Column(
plc.unary.cast(plc_col, plc_dtype, stream=stream), dtype=dtype
).sorted_like(self)
else:
result = Column(
plc.unary.cast(self.obj, plc_dtype, stream=stream), dtype=dtype
)
if is_order_preserving_cast(self.obj.type(), plc_dtype):
return result.sorted_like(self)
return result
def _handle_string_cast(self, dtype: plc.DataType, stream: Stream) -> plc.Column:
if dtype.id() == plc.TypeId.STRING:
if is_floating_point(self.obj.type()):
return from_floats(self.obj, stream=stream)
else:
return from_integers(self.obj, stream=stream)
else:
if is_floating_point(dtype):
floats = is_float(self.obj, stream=stream)
if not plc.reduce.reduce(
floats,
plc.aggregation.all(),
plc.DataType(plc.TypeId.BOOL8),
stream=stream,
).to_py(stream=stream):
raise InvalidOperationError("Conversion from `str` failed.")
return to_floats(self.obj, dtype)
else:
integers = is_integer(self.obj, stream=stream)
if not plc.reduce.reduce(
integers,
plc.aggregation.all(),
plc.DataType(plc.TypeId.BOOL8),
stream=stream,
).to_py(stream=stream):
raise InvalidOperationError("Conversion from `str` failed.")
return to_integers(self.obj, dtype, stream=stream)
def copy_metadata(self, from_: pl_Series, /) -> Self:
"""
Copy metadata from a host series onto self.
Parameters
----------
from_
Polars series to copy metadata from
Returns
-------
Self with metadata set.
See Also
--------
set_sorted, sorted_like
"""
self.name = from_.name
if len(from_) <= 1:
return self
ascending = from_.flags["SORTED_ASC"]
descending = from_.flags["SORTED_DESC"]
if ascending or descending:
has_null_first = from_.item(0) is None
has_null_last = from_.item(-1) is None
order = (
plc.types.Order.ASCENDING if ascending else plc.types.Order.DESCENDING
)
null_order = plc.types.NullOrder.BEFORE
if (descending and has_null_first) or (ascending and has_null_last):
null_order = plc.types.NullOrder.AFTER
return self.set_sorted(
is_sorted=plc.types.Sorted.YES,
order=order,
null_order=null_order,
)
return self
def set_sorted(
self,
*,
is_sorted: plc.types.Sorted,
order: plc.types.Order,
null_order: plc.types.NullOrder,
) -> Self:
"""
Modify sortedness metadata in place.
Parameters
----------
is_sorted
Is the column sorted
order
The order if sorted
null_order
Where nulls sort, if sorted
Returns
-------
Self with metadata set.
"""
if self.size <= 1:
is_sorted = plc.types.Sorted.YES
self.is_sorted = is_sorted
self.order = order
self.null_order = null_order
return self
def copy(self) -> Self:
"""
A shallow copy of the column.
Returns
-------
New column sharing data with self.
"""
return type(self)(
self.obj,
is_sorted=self.is_sorted,
order=self.order,
null_order=self.null_order,
name=self.name,
dtype=self.dtype,
)
def mask_nans(self, stream: Stream) -> Self:
"""Return a shallow copy of self with nans masked out."""
if plc.traits.is_floating_point(self.obj.type()):
old_count = self.null_count
mask, new_count = plc.transform.nans_to_nulls(self.obj, stream=stream)
result = type(self)(self.obj.with_mask(mask, new_count), self.dtype)
if old_count == new_count:
return result.sorted_like(self)
return result
return self.copy()
def nan_count(self, stream: Stream) -> int:
"""
Return the number of NaN values in the column.
Parameters
----------
stream
CUDA stream used for device memory operations and kernel launches.
``self.obj`` must be valid on this stream, and the result will be
valid on this stream.
Returns
-------
Number of NaN values in the column.
"""
result: int
if self.size > 0 and plc.traits.is_floating_point(self.obj.type()):
# See https://github.com/rapidsai/cudf/issues/20202 for we type ignore
result = plc.reduce.reduce( # type: ignore[assignment]
plc.unary.is_nan(self.obj, stream=stream),
plc.aggregation.sum(),
plc.types.SIZE_TYPE,
stream=stream,
).to_py(stream=stream)
else:
result = 0
return result
@property
def size(self) -> int:
"""Return the size of the column."""
return self.obj.size()
@property
def null_count(self) -> int:
"""Return the number of Null values in the column."""
return self.obj.null_count()
def slice(self, zlice: Slice | None, stream: Stream) -> Self:
"""
Slice a column.
Parameters
----------
zlice
optional, tuple of start and length, negative values of start
treated as for python indexing. If not provided, returns self.
stream
CUDA stream used for device memory operations and kernel launches
on this Column. The data in ``self.obj`` must be valid on this stream.
Returns
-------
New column (if zlice is not None) otherwise self (if it is)
"""
if zlice is None:
return self
(table,) = plc.copying.slice(
plc.Table([self.obj]),
conversion.from_polars_slice(zlice, num_rows=self.size),
stream=stream,
)
(column,) = table.columns()
return type(self)(column, name=self.name, dtype=self.dtype).sorted_like(self)
| Column |
python | run-llama__llama_index | llama-index-integrations/postprocessor/llama-index-postprocessor-nvidia-rerank/llama_index/postprocessor/nvidia_rerank/utils.py | {
"start": 246,
"end": 3716
} | class ____:
"""
Model information.
id: unique identifier for the model, passed as model parameter for requests
model_type: API type (ranking)
client: client name, e.g. NVIDIARerank
endpoint: custom endpoint for the model
aliases: list of aliases for the model
All aliases are deprecated and will trigger a warning when used.
"""
id: str
model_type: Optional[str] = "ranking"
client: str = "NVIDIARerank"
endpoint: Optional[str] = None
aliases: Optional[list] = None
base_model: Optional[str] = None
supports_tools: Optional[bool] = False
supports_structured_output: Optional[bool] = False
def __hash__(self) -> int:
return hash(self.id)
def validate(self):
if self.client:
supported = {"NVIDIARerank": ("ranking",)}
model_type = self.model_type
if model_type not in supported[self.client]:
err_msg = (
f"Model type '{model_type}' not supported by client '{self.client}'"
)
raise ValueError(err_msg)
return hash(self.id)
RANKING_MODEL_TABLE = {
"nv-rerank-qa-mistral-4b:1": Model(
id="nv-rerank-qa-mistral-4b:1",
model_type="ranking",
client="NVIDIARerank",
endpoint="https://ai.api.nvidia.com/v1/retrieval/nvidia/reranking",
aliases=["ai-rerank-qa-mistral-4b"],
),
"nvidia/nv-rerankqa-mistral-4b-v3": Model(
id="nvidia/nv-rerankqa-mistral-4b-v3",
model_type="ranking",
client="NVIDIARerank",
endpoint="https://ai.api.nvidia.com/v1/retrieval/nvidia/nv-rerankqa-mistral-4b-v3/reranking",
),
"nvidia/llama-3.2-nv-rerankqa-1b-v1": Model(
id="nvidia/llama-3.2-nv-rerankqa-1b-v1",
model_type="ranking",
client="NVIDIARerank",
endpoint="https://ai.api.nvidia.com/v1/retrieval/nvidia/llama-3_2-nv-rerankqa-1b-v1/reranking",
),
"nvidia/llama-3.2-nv-rerankqa-1b-v2": Model(
id="nvidia/llama-3.2-nv-rerankqa-1b-v2",
model_type="ranking",
client="NVIDIARerank",
endpoint="https://ai.api.nvidia.com/v1/retrieval/nvidia/llama-3_2-nv-rerankqa-1b-v2/reranking",
),
}
def lookup_model(name: str) -> Optional[Model]:
"""
Lookup a model by name, using only the table of known models.
The name is either:
- directly in the table
- an alias in the table
- not found (None)
Callers can check to see if the name was an alias by
comparing the result's id field to the name they provided.
"""
if not (model := RANKING_MODEL_TABLE.get(name)):
for mdl in RANKING_MODEL_TABLE.values():
if mdl.aliases and name in mdl.aliases:
model = mdl
break
return model
def determine_model(name: str) -> Optional[Model]:
"""
Determine the model to use based on a name, using
only the table of known models.
Raise a warning if the model is found to be
an alias of a known model.
If the model is not found, return None.
"""
if model := lookup_model(name):
# all aliases are deprecated
if model.id != name:
warn_msg = f"Model {name} is deprecated. Using {model.id} instead."
warnings.warn(warn_msg, UserWarning, stacklevel=1)
return model
KNOWN_URLS = [
BASE_URL,
"https://ai.api.nvidia.com/v1/retrieval/snowflake/arctic-embed-l",
]
| Model |
python | huggingface__transformers | src/transformers/models/janus/modular_janus.py | {
"start": 54307,
"end": 77700
} | class ____(BlipImageProcessor):
r"""
Constructs a JANUS image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the
`do_resize` parameter in the `preprocess` method.
size (`dict`, *optional*, defaults to `{"height": 384, "width": 384}`):
Size of the output image after resizing. Can be overridden by the `size` parameter in the `preprocess`
method.
min_size (`int`, *optional*, defaults to 14):
The minimum allowed size for the resized image. Ensures that neither the height nor width
falls below this value after resizing.
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
Resampling filter to use if resizing the image. Only has an effect if `do_resize` is set to `True`. Can be
overridden by the `resample` parameter in the `preprocess` method.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the
`do_rescale` parameter in the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Only has an effect if `do_rescale` is set to `True`. Can be
overridden by the `rescale_factor` parameter in the `preprocess` method.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
method. Can be overridden by the `do_normalize` parameter in the `preprocess` method.
image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. Can be
overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
Can be overridden by the `image_std` parameter in the `preprocess` method.
do_convert_rgb (`bool`, *optional*, defaults to `True`):
Whether to convert the image to RGB.
do_pad (`bool`, *optional*, defaults to `True`):
Whether to pad the image to square or not.
"""
valid_kwargs = JanusImageProcessorKwargs
def __init__(
self,
do_resize: bool = True,
size: Optional[dict[str, int]] = None,
min_size: int = 14,
resample: PILImageResampling = PILImageResampling.BICUBIC,
do_rescale: bool = True,
rescale_factor: Union[int, float] = 1 / 255,
do_normalize: bool = True,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
do_convert_rgb: Optional[bool] = None,
do_pad: Optional[bool] = True,
**kwargs,
):
super().__init__(**kwargs)
self.do_pad = do_pad
self.min_size = min_size
if image_mean is None:
self.background_color = (127, 127, 127)
else:
self.background_color = tuple(int(x * 255) for x in image_mean)
def pad_to_square(
self,
image: np.ndarray,
background_color: Union[int, tuple[int, int, int]] = 0,
data_format: Optional[Union[str, ChannelDimension]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> np.ndarray:
"""
Pads an image to a square based on the longest edge.
Args:
image (`np.ndarray`):
The image to pad.
background_color (`int` or `tuple[int, int, int]`, *optional*, defaults to 0):
The color to use for the padding. Can be an integer for single channel or a
tuple of integers representing for multi-channel images. If passed as integer
in multi-channel mode, it will default to `0` in subsequent channels.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
If unset, will use same as the input image.
input_data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format for the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
Returns:
`np.ndarray`: The padded image.
"""
height, width = get_image_size(image, input_data_format)
num_channels = image.shape[0] if input_data_format == ChannelDimension.FIRST else image.shape[-1]
if height == width:
image = (
to_channel_dimension_format(image, data_format, input_data_format)
if data_format is not None
else image
)
return image
max_dim = max(height, width)
# Ensure background_color is the correct shape
if isinstance(background_color, int):
background_color = [background_color]
elif len(background_color) != num_channels:
raise ValueError(
f"background_color must have no more than {num_channels} elements to match the number of channels"
)
if input_data_format == ChannelDimension.FIRST:
result = np.zeros((num_channels, max_dim, max_dim), dtype=image.dtype)
for i, color in enumerate(background_color):
result[i, :, :] = color
if width > height:
start = (max_dim - height) // 2
result[:, start : start + height, :] = image
else:
start = (max_dim - width) // 2
result[:, :, start : start + width] = image
else:
result = np.zeros((max_dim, max_dim, num_channels), dtype=image.dtype)
for i, color in enumerate(background_color):
result[:, :, i] = color
if width > height:
start = (max_dim - height) // 2
result[start : start + height, :, :] = image
else:
start = (max_dim - width) // 2
result[:, start : start + width, :] = image
return result
def resize(
self,
image: np.ndarray,
size: Union[dict[str, int], int],
resample: PILImageResampling = PILImageResampling.BICUBIC,
data_format: Optional[Union[str, ChannelDimension]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs,
) -> np.ndarray:
"""
Resize an image to dynamically calculated size.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]` or `int`):
The size to resize the image to. If a dictionary, it should have the keys `"height"` and `"width"`.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
`PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BICUBIC`.
data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the output image. If unset, the channel dimension format of the input
image is used. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `None`: will be inferred from input
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
Returns:
`np.ndarray`: The resized image.
"""
if input_data_format is None:
input_data_format = infer_channel_dimension_format(image)
height, width = get_image_size(image, input_data_format)
max_size = max(height, width)
size = get_size_dict(size, default_to_square=True)
if size["height"] != size["width"]:
raise ValueError(
f"Output height and width must be the same. Got height={size['height']} and width={size['width']}"
)
size = size["height"]
delta = size / max_size
# Largest side becomes `size` and the other side is scaled according to the aspect ratio.
output_size_nonpadded = [
max(int(height * delta), self.min_size),
max(int(width * delta), self.min_size),
]
image = resize(
image,
size=output_size_nonpadded,
resample=resample,
data_format=data_format,
input_data_format=input_data_format,
**kwargs,
)
return image
@filter_out_non_signature_kwargs()
def preprocess(
self,
images: ImageInput,
do_resize: Optional[bool] = None,
size: Optional[dict[str, int]] = None,
resample: Optional[PILImageResampling] = None,
do_rescale: Optional[bool] = None,
rescale_factor: Optional[float] = None,
do_normalize: Optional[bool] = None,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
do_convert_rgb: Optional[bool] = None,
background_color: Optional[Union[int, tuple[int, int, int]]] = None,
do_pad: Optional[bool] = None,
data_format: ChannelDimension = ChannelDimension.FIRST,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> PIL.Image.Image:
"""
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Controls the size of the image after `resize`. The shortest edge of the image is resized to
`size["shortest_edge"]` whilst preserving the aspect ratio. If the longest edge of this resized image
is > `int(size["shortest_edge"] * (1333 / 800))`, then the image is resized again to make the longest
edge equal to `int(size["shortest_edge"] * (1333 / 800))`.
resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. Only has an effect if `do_resize` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image values between [0 - 1].
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean to normalize the image by if `do_normalize` is set to `True`.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation to normalize the image by if `do_normalize` is set to `True`.
do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
Whether to convert the image to RGB.
background_color (`tuple[int, int, int]`):
The background color to use for the padding.
do_pad (`bool`, *optional*, defaults to `self.do_pad`):
Whether to pad the image to square or not.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
do_resize = do_resize if do_resize is not None else self.do_resize
resample = resample if resample is not None else self.resample
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
do_pad = do_pad if do_pad is not None else self.do_pad
background_color = background_color if background_color is not None else self.background_color
size = size if size is not None else self.size
size = get_size_dict(size, default_to_square=False)
images = self.fetch_images(images)
images = make_flat_list_of_images(images)
if not valid_images(images):
raise ValueError("Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor")
validate_preprocess_arguments(
do_rescale=do_rescale,
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
do_resize=do_resize,
size=size,
resample=resample,
)
# PIL RGBA images are converted to RGB
if do_convert_rgb:
images = [convert_to_rgb(image) for image in images]
# All transformations expect numpy arrays.
images = [to_numpy_array(image) for image in images]
if do_rescale and is_scaled_image(images[0]):
logger.warning_once(
"It looks like you are trying to rescale already rescaled images. If the input"
" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
)
if input_data_format is None:
# We assume that all images have the same channel dimension format.
input_data_format = infer_channel_dimension_format(images[0])
if do_resize:
images = [
self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
for image in images
]
if do_pad:
# Expand and pad the images to obtain a square image of dimensions `size x size`
images = [
self.pad_to_square(
image=image,
background_color=background_color,
input_data_format=input_data_format,
)
for image in images
]
if do_rescale:
images = [
self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
for image in images
]
if do_normalize:
images = [
self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
for image in images
]
images = [
to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
]
encoded_outputs = BatchFeature(data={"pixel_values": images}, tensor_type=return_tensors)
return encoded_outputs
def postprocess(
self,
images: ImageInput,
do_rescale: Optional[bool] = None,
rescale_factor: Optional[float] = None,
do_normalize: Optional[bool] = None,
image_mean: Optional[list[float]] = None,
image_std: Optional[list[float]] = None,
input_data_format: Optional[str] = None,
return_tensors: Optional[str] = None,
):
"""Applies post-processing to the decoded image tokens by reversing transformations applied during preprocessing."""
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = 1.0 / self.rescale_factor if rescale_factor is None else rescale_factor
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
images = make_flat_list_of_images(images) # Ensures input is a list
if isinstance(images[0], PIL.Image.Image):
return images if len(images) > 1 else images[0]
if input_data_format is None:
input_data_format = infer_channel_dimension_format(images[0]) # Determine format dynamically
pixel_values = []
for image in images:
image = to_numpy_array(image) # Ensure NumPy format
if do_normalize:
image = self.unnormalize(
image=image, image_mean=image_mean, image_std=image_std, input_data_format=input_data_format
)
if do_rescale:
image = self.rescale(image, scale=rescale_factor, input_data_format=input_data_format)
image = image.clip(0, 255).astype(np.uint8)
if do_normalize and do_rescale and return_tensors == "PIL.Image.Image":
image = to_channel_dimension_format(image, ChannelDimension.LAST, input_channel_dim=input_data_format)
image = PIL.Image.fromarray(image)
pixel_values.append(image)
data = {"pixel_values": pixel_values}
return_tensors = return_tensors if return_tensors != "PIL.Image.Image" else None
return BatchFeature(data=data, tensor_type=return_tensors)
def unnormalize(
self,
image: np.ndarray,
image_mean: Union[float, Iterable[float]],
image_std: Union[float, Iterable[float]],
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> np.ndarray:
"""
Unnormalizes `image` using the mean and standard deviation specified by `mean` and `std`.
image = (image * image_std) + image_mean
Args:
image (`torch.Tensor` of shape `(batch_size, num_channels, image_size, image_size)` or `(num_channels, image_size, image_size)`):
Batch of pixel values to postprocess.
image_mean (`float` or `Iterable[float]`):
The mean to use for unnormalization.
image_std (`float` or `Iterable[float]`):
The standard deviation to use for unnormalization.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
num_channels = 3
if isinstance(image_mean, Iterable):
if len(image_mean) != num_channels:
raise ValueError(f"mean must have {num_channels} elements if it is an iterable, got {len(image_mean)}")
else:
image_mean = [image_mean] * num_channels
if isinstance(image_std, Iterable):
if len(image_std) != num_channels:
raise ValueError(f"std must have {num_channels} elements if it is an iterable, got {len(image_std)}")
else:
image_std = [image_std] * num_channels
rev_image_mean = tuple(-mean / std for mean, std in zip(image_mean, image_std))
rev_image_std = tuple(1 / std for std in image_std)
image = self.normalize(
image=image, mean=rev_image_mean, std=rev_image_std, input_data_format=input_data_format
)
return image
__all__ = [
"JanusImageProcessor",
"JanusPreTrainedModel",
"JanusForConditionalGeneration",
"JanusModel",
"JanusVQVAE",
"JanusVisionModel",
"JanusVQVAEConfig",
"JanusVisionConfig",
"JanusConfig",
]
| JanusImageProcessor |
python | ray-project__ray | python/ray/data/_internal/util.py | {
"start": 44836,
"end": 55917
} | class ____(pyarrow.fs.FileSystemHandler):
"""Wrapper for filesystem objects that adds retry functionality for file operations.
This class wraps any filesystem object and adds automatic retries for common
file operations that may fail transiently.
"""
def __init__(
self,
fs: "pyarrow.fs.FileSystem",
retryable_errors: List[str] = tuple(),
max_attempts: int = 10,
max_backoff_s: int = 32,
):
"""Initialize the retrying filesystem wrapper.
Args:
fs: The underlying filesystem to wrap
context: DataContext for retry settings
max_attempts: Maximum number of retry attempts
max_backoff_s: Maximum backoff time in seconds
"""
assert not isinstance(
fs, RetryingPyFileSystem
), "Cannot wrap a RetryingPyFileSystem"
self._fs = fs
self._retryable_errors = retryable_errors
self._max_attempts = max_attempts
self._max_backoff_s = max_backoff_s
def _retry_operation(self, operation: Callable, description: str):
"""Execute an operation with retries."""
return call_with_retry(
operation,
description=description,
match=self._retryable_errors,
max_attempts=self._max_attempts,
max_backoff_s=self._max_backoff_s,
)
def unwrap(self):
return self._fs
def copy_file(self, src: str, dest: str):
"""Copy a file."""
return self._retry_operation(
lambda: self._fs.copy_file(src, dest), f"copy file from {src} to {dest}"
)
def create_dir(self, path: str, recursive: bool):
"""Create a directory and subdirectories."""
return self._retry_operation(
lambda: self._fs.create_dir(path, recursive=recursive),
f"create directory {path}",
)
def delete_dir(self, path: str):
"""Delete a directory and its contents, recursively."""
return self._retry_operation(
lambda: self._fs.delete_dir(path), f"delete directory {path}"
)
def delete_dir_contents(self, path: str, missing_dir_ok: bool = False):
"""Delete a directory's contents, recursively."""
return self._retry_operation(
lambda: self._fs.delete_dir_contents(path, missing_dir_ok=missing_dir_ok),
f"delete directory contents {path}",
)
def delete_file(self, path: str):
"""Delete a file."""
return self._retry_operation(
lambda: self._fs.delete_file(path), f"delete file {path}"
)
def delete_root_dir_contents(self):
return self._retry_operation(
lambda: self._fs.delete_dir_contents("/", accept_root_dir=True),
"delete root dir contents",
)
def equals(self, other: "pyarrow.fs.FileSystem") -> bool:
"""Test if this filesystem equals another."""
return self._fs.equals(other)
def get_file_info(self, paths: List[str]):
"""Get info for the given files."""
return self._retry_operation(
lambda: self._fs.get_file_info(paths),
f"get file info for {paths}",
)
def get_file_info_selector(self, selector):
return self._retry_operation(
lambda: self._fs.get_file_info(selector),
f"get file info for {selector}",
)
def get_type_name(self):
return "RetryingPyFileSystem"
def move(self, src: str, dest: str):
"""Move / rename a file or directory."""
return self._retry_operation(
lambda: self._fs.move(src, dest), f"move from {src} to {dest}"
)
def normalize_path(self, path: str) -> str:
"""Normalize filesystem path."""
return self._retry_operation(
lambda: self._fs.normalize_path(path), f"normalize path {path}"
)
def open_append_stream(
self,
path: str,
metadata=None,
) -> "pyarrow.NativeFile":
"""Open an output stream for appending.
Compression is disabled in this method because it is handled in the
PyFileSystem abstract class.
"""
return self._retry_operation(
lambda: self._fs.open_append_stream(
path,
compression=None,
metadata=metadata,
),
f"open append stream for {path}",
)
def open_input_stream(
self,
path: str,
) -> "pyarrow.NativeFile":
"""Open an input stream for sequential reading.
Compression is disabled in this method because it is handled in the
PyFileSystem abstract class.
"""
return self._retry_operation(
lambda: self._fs.open_input_stream(path, compression=None),
f"open input stream for {path}",
)
def open_output_stream(
self,
path: str,
metadata=None,
) -> "pyarrow.NativeFile":
"""Open an output stream for sequential writing."
Compression is disabled in this method because it is handled in the
PyFileSystem abstract class.
"""
return self._retry_operation(
lambda: self._fs.open_output_stream(
path,
compression=None,
metadata=metadata,
),
f"open output stream for {path}",
)
def open_input_file(self, path: str) -> "pyarrow.NativeFile":
"""Open an input file for random access reading."""
return self._retry_operation(
lambda: self._fs.open_input_file(path), f"open input file {path}"
)
def iterate_with_retry(
iterable_factory: Callable[[], Iterable],
description: str,
*,
match: Optional[List[str]] = None,
max_attempts: int = 10,
max_backoff_s: int = 32,
) -> Any:
"""Iterate through an iterable with retries.
If the iterable raises an exception, this function recreates and re-iterates
through the iterable, while skipping the items that have already been yielded.
Args:
iterable_factory: A no-argument function that creates the iterable.
match: A list of strings to match in the exception message. If ``None``, any
error is retried.
description: An imperitive description of the function being retried. For
example, "open the file".
max_attempts: The maximum number of attempts to retry.
max_backoff_s: The maximum number of seconds to backoff.
"""
assert max_attempts >= 1, f"`max_attempts` must be positive. Got {max_attempts}."
num_items_yielded = 0
for attempt in range(max_attempts):
try:
iterable = iterable_factory()
for item_index, item in enumerate(iterable):
if item_index < num_items_yielded:
# Skip items that have already been yielded.
continue
num_items_yielded += 1
yield item
return
except Exception as e:
is_retryable = match is None or any(pattern in str(e) for pattern in match)
if is_retryable and attempt + 1 < max_attempts:
# Retry with binary expoential backoff with random jitter.
backoff = min((2 ** (attempt + 1)), max_backoff_s) * random.random()
logger.debug(
f"Retrying {attempt+1} attempts to {description} "
f"after {backoff} seconds."
)
time.sleep(backoff)
else:
raise e from None
def convert_bytes_to_human_readable_str(num_bytes: int) -> str:
if num_bytes >= 1e9:
num_bytes_str = f"{round(num_bytes / 1e9)}GB"
elif num_bytes >= 1e6:
num_bytes_str = f"{round(num_bytes / 1e6)}MB"
else:
num_bytes_str = f"{round(num_bytes / 1e3)}KB"
return num_bytes_str
def _validate_rows_per_file_args(
*,
num_rows_per_file: Optional[int] = None,
min_rows_per_file: Optional[int] = None,
max_rows_per_file: Optional[int] = None,
) -> Tuple[Optional[int], Optional[int]]:
"""Helper method to validate and handle rows per file arguments.
Args:
num_rows_per_file: Deprecated parameter for number of rows per file
min_rows_per_file: New parameter for minimum rows per file
max_rows_per_file: New parameter for maximum rows per file
Returns:
A tuple of (effective_min_rows_per_file, effective_max_rows_per_file)
"""
if num_rows_per_file is not None:
import warnings
warnings.warn(
"`num_rows_per_file` is deprecated and will be removed in a future release. "
"Use `min_rows_per_file` instead.",
DeprecationWarning,
stacklevel=3,
)
if min_rows_per_file is not None:
raise ValueError(
"Cannot specify both `num_rows_per_file` and `min_rows_per_file`. "
"Use `min_rows_per_file` as `num_rows_per_file` is deprecated."
)
min_rows_per_file = num_rows_per_file
# Validate max_rows_per_file
if max_rows_per_file is not None and max_rows_per_file <= 0:
raise ValueError("max_rows_per_file must be a positive integer")
# Validate min_rows_per_file
if min_rows_per_file is not None and min_rows_per_file <= 0:
raise ValueError("min_rows_per_file must be a positive integer")
# Validate that max >= min if both are specified
if (
min_rows_per_file is not None
and max_rows_per_file is not None
and min_rows_per_file > max_rows_per_file
):
raise ValueError(
f"min_rows_per_file ({min_rows_per_file}) cannot be greater than "
f"max_rows_per_file ({max_rows_per_file})"
)
return min_rows_per_file, max_rows_per_file
def is_nan(value) -> bool:
"""Returns true if provide value is ``np.nan``"""
try:
return isinstance(value, float) and np.isnan(value)
except TypeError:
return False
def is_null(value: Any) -> bool:
"""This generalization of ``is_nan`` util qualifying both None and np.nan
as null values"""
return value is None or is_nan(value)
def keys_equal(keys1, keys2):
if len(keys1) != len(keys2):
return False
for k1, k2 in zip(keys1, keys2):
if not ((is_nan(k1) and is_nan(k2)) or k1 == k2):
return False
return True
def get_total_obj_store_mem_on_node() -> int:
"""Return the total object store memory on the current node.
This function incurs an RPC. Use it cautiously.
"""
node_id = ray.get_runtime_context().get_node_id()
total_resources_per_node = ray._private.state.total_resources_per_node()
assert (
node_id in total_resources_per_node
), f"Expected node '{node_id}' to be in resources: {total_resources_per_node}"
return total_resources_per_node[node_id]["object_store_memory"]
| RetryingPyFileSystemHandler |
python | apache__airflow | airflow-core/tests/unit/always/test_project_structure.py | {
"start": 18789,
"end": 21067
} | class ____(ProjectStructureTest):
"""Checks that every operator is covered by example"""
# Those operators are deprecated, so we do not need examples for them
DEPRECATED_CLASSES: set = set()
# Those operators should not have examples as they are never used standalone (they are abstract)
BASE_CLASSES: set = set()
# Please add the examples to those operators at the earliest convenience :)
MISSING_EXAMPLES_FOR_CLASSES: set = set()
def example_paths(self):
"""Override this method if your example dags are located elsewhere"""
yield from glob.glob(
f"{AIRFLOW_ROOT_PATH}/providers/{self.PROVIDER}/tests/system/{self.PROVIDER}/**/example_*.py",
recursive=True,
)
yield from glob.glob(
f"{AIRFLOW_ROOT_PATH}/providers/{self.PROVIDER}/src/airflow/providers/{self.PROVIDER}/**/example_*.py",
recursive=True,
)
def test_missing_examples(self):
"""
Assert that all operators defined under operators, sensors and transfers directories
are used in any of the example dags
"""
classes = self.list_of_classes()
assert len(classes) != 0, "Failed to retrieve operators, override class_paths if needed"
classes = set(classes.keys())
for example in self.example_paths():
classes -= get_imports_from_file(example)
covered_but_omitted = self.MISSING_EXAMPLES_FOR_CLASSES - classes
classes -= self.MISSING_EXAMPLES_FOR_CLASSES
classes -= self.DEPRECATED_CLASSES
classes -= self.BASE_CLASSES
classes = set(class_name for class_name in classes if not class_name.startswith("Test"))
if set() != classes:
print("Classes with missing examples:")
print_sorted(classes)
pytest.fail(
"Not all classes are covered with example dags. Update self.MISSING_EXAMPLES_FOR_CLASSES "
"if you want to skip this error"
)
if set() != covered_but_omitted:
print("Covered classes that are listed as missing:")
print_sorted(covered_but_omitted)
pytest.fail("Operator listed in missing examples but is used in example dag")
| ExampleCoverageTest |
python | huggingface__transformers | src/transformers/models/clap/modeling_clap.py | {
"start": 32149,
"end": 40981
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.num_layers = len(config.depths)
self.config = config
self.patch_embed = ClapAudioPatchEmbed(config)
self.enable_fusion = config.enable_fusion
self.patch_stride = self.patch_embed.patch_stride
self.spec_size = config.spec_size
self.freq_ratio = config.spec_size // config.num_mel_bins
self.num_features = int(config.patch_embeds_hidden_size * 2 ** (self.num_layers - 1))
drop_path_rate = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths), device="cpu")]
grid_size = self.patch_embed.grid_size
self.input_resolutions = [(grid_size[0] // (2**i), grid_size[1] // (2**i)) for i in range(self.num_layers)]
self.layers = nn.ModuleList(
[
ClapAudioStage(
config=config,
dim=int(config.patch_embeds_hidden_size * 2**i_layer),
input_resolution=self.input_resolutions[i_layer],
depth=config.depths[i_layer],
num_heads=config.num_attention_heads[i_layer],
drop_path=drop_path_rate[sum(config.depths[:i_layer]) : sum(config.depths[: i_layer + 1])],
downsample=ClapAudioPatchMerging if (i_layer < self.num_layers - 1) else None,
)
for i_layer in range(self.num_layers)
]
)
self.gradient_checkpointing = False
self.batch_norm = nn.BatchNorm2d(config.num_mel_bins)
self.norm = nn.LayerNorm(self.num_features)
self.depths = config.depths
self.avgpool = nn.AdaptiveAvgPool1d(1)
def reshape_mel2img(self, normalized_input_features):
"""
The input is 4 normalized log mel spectrograms. It is reshape to the common shape of images. Each channel
should represent 1 of the 4 crops of the spectrogram. For more details, refer to the [`ClapFeatureExtractor`].
"""
_, _, time_length, freq_length = normalized_input_features.shape
spec_width = int(self.spec_size * self.freq_ratio)
spec_height = self.spec_size // self.freq_ratio
if time_length > spec_width or freq_length > spec_height:
raise ValueError("the wav size should be less than or equal to the swin input size")
# to avoid bicubic zero error
if time_length < spec_width:
normalized_input_features = nn.functional.interpolate(
normalized_input_features, (spec_width, freq_length), mode="bicubic", align_corners=True
)
if freq_length < spec_height:
normalized_input_features = nn.functional.interpolate(
normalized_input_features, (time_length, spec_height), mode="bicubic", align_corners=True
)
batch, channels, time, freq = normalized_input_features.shape
# batch_size, channels, spec_width, spec_height --> batch_size, channels, spec_height * freq_ratio, spec_width // freq_ratio
normalized_input_features = normalized_input_features.reshape(
batch, channels * self.freq_ratio, time // self.freq_ratio, freq
)
normalized_input_features = normalized_input_features.permute(0, 1, 3, 2).contiguous()
normalized_input_features = normalized_input_features.reshape(
batch, channels, freq * self.freq_ratio, time // self.freq_ratio
)
return normalized_input_features
def forward(
self,
input_features,
is_longer: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = False,
output_hidden_states: Optional[bool] = False,
output_hidden_states_before_downsampling: Optional[bool] = False,
always_partition: Optional[bool] = False,
return_dict: Optional[bool] = True,
) -> Union[tuple, ClapAudioModelOutput]:
input_features = input_features.transpose(1, 3)
normalized_input_features = self.batch_norm(input_features)
normalized_input_features = normalized_input_features.transpose(1, 3)
is_longer_list_idx = None
if self.enable_fusion:
is_longer_list = is_longer.to(input_features.device)
is_longer_list_idx = torch.where(is_longer_list == 1)[0]
hidden_states = self.reshape_mel2img(normalized_input_features)
frames_num = hidden_states.shape[2]
hidden_states = self.patch_embed(hidden_states, is_longer_list_idx)
all_hidden_states = () if output_hidden_states else None
all_reshaped_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
input_dimensions = self.input_resolutions[0]
if output_hidden_states:
batch_size, _, hidden_size = hidden_states.shape
# rearrange batch_size (height width) channels -> batch_size channel height width
reshaped_hidden_state = hidden_states.view(batch_size, *input_dimensions, hidden_size)
reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2)
all_hidden_states += (hidden_states,)
all_reshaped_hidden_states += (reshaped_hidden_state,)
for i, layer_module in enumerate(self.layers):
input_dimensions = self.input_resolutions[i]
layer_outputs = layer_module(hidden_states, input_dimensions, output_attentions, always_partition)
hidden_states = layer_outputs[0]
hidden_states_before_downsampling = layer_outputs[1]
output_dimensions = layer_outputs[2]
input_dimensions = (output_dimensions[-2], output_dimensions[-1])
if output_hidden_states and output_hidden_states_before_downsampling:
batch_size, _, hidden_size = hidden_states_before_downsampling.shape
# rearrange batch_size (height width) channels -> batch_size channel height width
# here we use the original (not downsampled) height and width
reshaped_hidden_state = hidden_states_before_downsampling.view(
batch_size, *(output_dimensions[0], output_dimensions[1]), hidden_size
)
reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2)
all_hidden_states += (hidden_states_before_downsampling,)
all_reshaped_hidden_states += (reshaped_hidden_state,)
elif output_hidden_states and not output_hidden_states_before_downsampling:
batch_size, _, hidden_size = hidden_states.shape
# rearrange batch_size (height width) channels -> batch_size channel height width
reshaped_hidden_state = hidden_states.view(batch_size, *input_dimensions, hidden_size)
reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2)
all_hidden_states += (hidden_states,)
all_reshaped_hidden_states += (reshaped_hidden_state,)
if output_attentions:
all_self_attentions += layer_outputs[3:]
last_hidden_state = self.norm(hidden_states)
batch_size, _, n_channels = last_hidden_state.shape
freq_shape = frames_num // (2 ** (len(self.depths) - 1)) // self.patch_stride[0]
temporal_shape = frames_num // (2 ** (len(self.depths) - 1)) // self.patch_stride[1]
last_hidden_state = (
last_hidden_state.permute(0, 2, 1).contiguous().reshape(batch_size, n_channels, freq_shape, temporal_shape)
)
batch_size, n_channels, n_frequencies, n_temp = last_hidden_state.shape
# group 2D CNN
c_freq_bin = n_frequencies // self.freq_ratio
last_hidden_state = last_hidden_state.reshape(
batch_size, n_channels, n_frequencies // c_freq_bin, c_freq_bin, n_temp
)
last_hidden_state = (
last_hidden_state.permute(0, 1, 3, 2, 4).contiguous().reshape(batch_size, n_channels, c_freq_bin, -1)
)
latent_output = self.avgpool(torch.flatten(last_hidden_state, 2))
latent_output = torch.flatten(latent_output, 1)
if not return_dict:
return tuple(
v
for v in [
last_hidden_state,
latent_output,
all_reshaped_hidden_states,
all_self_attentions,
]
if v is not None
)
return BaseModelOutputWithPooling(
last_hidden_state=last_hidden_state,
pooler_output=latent_output,
hidden_states=all_reshaped_hidden_states,
attentions=all_self_attentions,
)
| ClapAudioEncoder |
python | apache__airflow | providers/google/tests/unit/google/cloud/transfers/test_gcs_to_sftp.py | {
"start": 1372,
"end": 17259
} | class ____:
@pytest.mark.parametrize(
("source_object", "target_object", "keep_directory_structure"),
[
("folder/test_object.txt", "folder/test_object.txt", True),
("folder/subfolder/test_object.txt", "folder/subfolder/test_object.txt", True),
("folder/test_object.txt", "test_object.txt", False),
("folder/subfolder/test_object.txt", "test_object.txt", False),
],
)
@mock.patch("airflow.providers.google.cloud.transfers.gcs_to_sftp.GCSHook")
@mock.patch("airflow.providers.google.cloud.transfers.gcs_to_sftp.SFTPHook")
def test_execute_copy_single_file(
self, sftp_hook_mock, gcs_hook_mock, source_object, target_object, keep_directory_structure
):
task = GCSToSFTPOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_object=source_object,
destination_path=DESTINATION_SFTP,
keep_directory_structure=keep_directory_structure,
move_object=False,
gcp_conn_id=GCP_CONN_ID,
sftp_conn_id=SFTP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
task.execute({})
gcs_hook_mock.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
sftp_hook_mock.assert_called_once_with(SFTP_CONN_ID)
gcs_hook_mock.return_value.download.assert_called_with(
bucket_name=TEST_BUCKET, object_name=source_object, filename=mock.ANY
)
sftp_hook_mock.return_value.store_file.assert_called_with(
os.path.join(DESTINATION_SFTP, target_object), mock.ANY
)
gcs_hook_mock.return_value.delete.assert_not_called()
@pytest.mark.parametrize(
("source_object", "target_object", "keep_directory_structure"),
[
("folder/test_object.txt", "folder/test_object.txt", True),
("folder/subfolder/test_object.txt", "folder/subfolder/test_object.txt", True),
("folder/test_object.txt", "test_object.txt", False),
("folder/subfolder/test_object.txt", "test_object.txt", False),
],
)
@mock.patch("airflow.providers.google.cloud.transfers.gcs_to_sftp.GCSHook")
@mock.patch("airflow.providers.google.cloud.transfers.gcs_to_sftp.SFTPHook")
def test_execute_move_single_file(
self, sftp_hook_mock, gcs_hook_mock, source_object, target_object, keep_directory_structure
):
task = GCSToSFTPOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_object=source_object,
destination_path=DESTINATION_SFTP,
keep_directory_structure=keep_directory_structure,
move_object=True,
gcp_conn_id=GCP_CONN_ID,
sftp_conn_id=SFTP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
task.execute(None)
gcs_hook_mock.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
sftp_hook_mock.assert_called_once_with(SFTP_CONN_ID)
gcs_hook_mock.return_value.download.assert_called_with(
bucket_name=TEST_BUCKET, object_name=source_object, filename=mock.ANY
)
sftp_hook_mock.return_value.store_file.assert_called_with(
os.path.join(DESTINATION_SFTP, target_object), mock.ANY
)
gcs_hook_mock.return_value.delete.assert_called_once_with(TEST_BUCKET, source_object)
@pytest.mark.parametrize(
(
"source_object",
"prefix",
"delimiter",
"gcs_files_list",
"target_objects",
"keep_directory_structure",
),
[
(
"folder/test_object*.txt",
"folder/test_object",
".txt",
[
"folder/test_object/file1.txt",
"folder/test_object/file2.txt",
],
["test_object/file1.txt", "test_object/file2.txt"],
False,
),
(
"folder/test_object/*",
"folder/test_object/",
"",
[
"folder/test_object/file1.txt",
"folder/test_object/file2.txt",
],
["file1.txt", "file2.txt"],
False,
),
(
"folder/test_object*.txt",
"folder/test_object",
".txt",
[
"folder/test_object/file1.txt",
"folder/test_object/file2.txt",
],
["folder/test_object/file1.txt", "folder/test_object/file2.txt"],
True,
),
(
"folder/test_object/*",
"folder/test_object/",
"",
[
"folder/test_object/file1.txt",
"folder/test_object/file2.txt",
],
["folder/test_object/file1.txt", "folder/test_object/file2.txt"],
True,
),
],
)
@mock.patch("airflow.providers.google.cloud.transfers.gcs_to_sftp.GCSHook")
@mock.patch("airflow.providers.google.cloud.transfers.gcs_to_sftp.SFTPHook")
def test_execute_copy_with_wildcard(
self,
sftp_hook_mock,
gcs_hook_mock,
source_object,
prefix,
delimiter,
gcs_files_list,
target_objects,
keep_directory_structure,
):
gcs_hook_mock.return_value.list.return_value = gcs_files_list
operator = GCSToSFTPOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_object=source_object,
destination_path=DESTINATION_SFTP,
keep_directory_structure=keep_directory_structure,
move_object=False,
gcp_conn_id=GCP_CONN_ID,
sftp_conn_id=SFTP_CONN_ID,
)
operator.execute(None)
gcs_hook_mock.return_value.list.assert_called_with(TEST_BUCKET, delimiter=delimiter, prefix=prefix)
gcs_hook_mock.return_value.download.assert_has_calls(
[
mock.call(bucket_name=TEST_BUCKET, object_name=gcs_file, filename=mock.ANY)
for gcs_file in gcs_files_list
]
)
sftp_hook_mock.return_value.store_file.assert_has_calls(
[
mock.call(os.path.join(DESTINATION_SFTP, target_object), mock.ANY)
for target_object in target_objects
]
)
gcs_hook_mock.return_value.delete.assert_not_called()
@pytest.mark.parametrize(
(
"source_object",
"prefix",
"delimiter",
"gcs_files_list",
"target_objects",
"keep_directory_structure",
),
[
(
"folder/test_object*.txt",
"folder/test_object",
".txt",
[
"folder/test_object/file1.txt",
"folder/test_object/file2.txt",
],
["test_object/file1.txt", "test_object/file2.txt"],
False,
),
(
"folder/test_object/*",
"folder/test_object/",
"",
[
"folder/test_object/file1.txt",
"folder/test_object/file2.txt",
],
["file1.txt", "file2.txt"],
False,
),
(
"folder/test_object*.txt",
"folder/test_object",
".txt",
[
"folder/test_object/file1.txt",
"folder/test_object/file2.txt",
],
["folder/test_object/file1.txt", "folder/test_object/file2.txt"],
True,
),
(
"folder/test_object/*",
"folder/test_object/",
"",
[
"folder/test_object/file1.txt",
"folder/test_object/file2.txt",
],
["folder/test_object/file1.txt", "folder/test_object/file2.txt"],
True,
),
],
)
@mock.patch("airflow.providers.google.cloud.transfers.gcs_to_sftp.GCSHook")
@mock.patch("airflow.providers.google.cloud.transfers.gcs_to_sftp.SFTPHook")
def test_execute_move_with_wildcard(
self,
sftp_hook_mock,
gcs_hook_mock,
source_object,
prefix,
delimiter,
gcs_files_list,
target_objects,
keep_directory_structure,
):
gcs_hook_mock.return_value.list.return_value = gcs_files_list
operator = GCSToSFTPOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_object=source_object,
destination_path=DESTINATION_SFTP,
keep_directory_structure=keep_directory_structure,
move_object=True,
gcp_conn_id=GCP_CONN_ID,
sftp_conn_id=SFTP_CONN_ID,
)
operator.execute(None)
gcs_hook_mock.return_value.list.assert_called_with(TEST_BUCKET, delimiter=delimiter, prefix=prefix)
gcs_hook_mock.return_value.download.assert_has_calls(
[
mock.call(bucket_name=TEST_BUCKET, object_name=gcs_file, filename=mock.ANY)
for gcs_file in gcs_files_list
]
)
sftp_hook_mock.return_value.store_file.assert_has_calls(
[
mock.call(os.path.join(DESTINATION_SFTP, target_object), mock.ANY)
for target_object in target_objects
]
)
gcs_hook_mock.return_value.delete.assert_has_calls(
[mock.call(TEST_BUCKET, gcs_file) for gcs_file in gcs_files_list]
)
@mock.patch("airflow.providers.google.cloud.transfers.gcs_to_sftp.GCSHook")
@mock.patch("airflow.providers.google.cloud.transfers.gcs_to_sftp.SFTPHook")
def test_execute_more_than_one_wildcard_exception(self, sftp_hook_mock, gcs_hook_mock):
operator = GCSToSFTPOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_object="csv/*/test_*.csv",
destination_path=DESTINATION_SFTP,
move_object=False,
gcp_conn_id=GCP_CONN_ID,
sftp_conn_id=SFTP_CONN_ID,
)
with pytest.raises(AirflowException):
operator.execute(None)
@pytest.mark.parametrize(
(
"source_object",
"destination_path",
"keep_directory_structure",
"expected_source",
"expected_destination",
),
[
(
"folder/test_object.txt",
"dest/dir",
True,
"folder/test_object.txt",
"dest/dir/folder/test_object.txt",
),
(
"folder/test_object.txt",
"dest/dir/",
True,
"folder/test_object.txt",
"dest/dir/folder/test_object.txt",
),
(
"folder/test_object.txt",
"dest/dir",
False,
"folder/test_object.txt",
"dest/dir/test_object.txt",
),
("folder/test_object.txt", "/", False, "folder/test_object.txt", "/test_object.txt"),
("folder/test_object.txt", "/", True, "folder/test_object.txt", "/folder/test_object.txt"),
(
"folder/test_object.txt",
"dest/dir/dest_object.txt",
True,
"folder/test_object.txt",
"dest/dir/dest_object.txt/folder/test_object.txt", # Dest path is always treated as "dir"
),
(
"folder/test_object.txt",
"dest/dir/dest_object.txt",
False,
"folder/test_object.txt",
"dest/dir/dest_object.txt/test_object.txt", # Dest path is always treated as "dir"
),
("folder/test_object*.txt", "dest/dir", True, "folder", "dest/dir/folder"),
("folder/test_object*", "dest/dir", False, "folder", "dest/dir"),
("*", "/", True, "/", "/"),
("*", "/dest/dir", True, "/", "/dest/dir"),
("*", "/dest/dir", False, "/", "/dest/dir"),
],
)
@mock.patch("airflow.providers.google.cloud.transfers.gcs_to_sftp.SFTPHook")
def test_get_openlineage_facets(
self,
sftp_hook_mock,
source_object,
destination_path,
keep_directory_structure,
expected_source,
expected_destination,
):
sftp_hook_mock.return_value.remote_host = "11.222.33.44"
sftp_hook_mock.return_value.port = 22
operator = GCSToSFTPOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_object=source_object,
destination_path=destination_path,
keep_directory_structure=keep_directory_structure,
move_object=False,
gcp_conn_id=GCP_CONN_ID,
sftp_conn_id=SFTP_CONN_ID,
)
result = operator.get_openlineage_facets_on_start()
assert not result.run_facets
assert not result.job_facets
assert len(result.inputs) == 1
assert len(result.outputs) == 1
assert result.inputs[0].namespace == f"gs://{TEST_BUCKET}"
assert result.inputs[0].name == expected_source
assert result.outputs[0].namespace == "file://11.222.33.44:22"
assert result.outputs[0].name == expected_destination
@mock.patch("airflow.providers.google.cloud.transfers.gcs_to_sftp.GCSHook")
@mock.patch("airflow.providers.google.cloud.transfers.gcs_to_sftp.SFTPHook")
def test_create_intermediate_dirs_true(self, sftp_hook_mock, gcp_hook_mock):
task = GCSToSFTPOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_object="folder/test_object.txt", # Hard-coding
destination_path=DESTINATION_SFTP,
keep_directory_structure=True, # Hard-coding
create_intermediate_dirs=True,
move_object=False,
gcp_conn_id=GCP_CONN_ID,
sftp_conn_id=SFTP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
assert task.create_intermediate_dirs
task.execute(None)
sftp_hook_mock.return_value.create_directory.assert_called_once_with(
os.path.join(DESTINATION_SFTP, "folder")
)
@mock.patch("airflow.providers.google.cloud.transfers.gcs_to_sftp.GCSHook")
@mock.patch("airflow.providers.google.cloud.transfers.gcs_to_sftp.SFTPHook")
def test_create_intermediate_dirs_false(self, sftp_hook_mock, gcp_hook_mock):
task = GCSToSFTPOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_object="folder/test_object.txt", # Hard-coding
destination_path=DESTINATION_SFTP,
keep_directory_structure=True, # Hard-coding
create_intermediate_dirs=False,
move_object=False,
gcp_conn_id=GCP_CONN_ID,
sftp_conn_id=SFTP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
assert not task.create_intermediate_dirs
task.execute(None)
sftp_hook_mock.return_value.create_directory.assert_not_called()
| TestGoogleCloudStorageToSFTPOperator |
python | protocolbuffers__protobuf | python/google/protobuf/internal/containers.py | {
"start": 20667,
"end": 21631
} | class ____: # pylint: disable=missing-class-docstring
def __init__(self, parent, index):
self._parent = parent
self._index = index
def _check_valid(self):
if not self._parent:
raise ValueError('UnknownField does not exist. '
'The parent message might be cleared.')
if self._index >= len(self._parent):
raise ValueError('UnknownField does not exist. '
'The parent message might be cleared.')
@property
def field_number(self):
self._check_valid()
# pylint: disable=protected-access
return self._parent._internal_get(self._index)._field_number
@property
def wire_type(self):
self._check_valid()
# pylint: disable=protected-access
return self._parent._internal_get(self._index)._wire_type
@property
def data(self):
self._check_valid()
# pylint: disable=protected-access
return self._parent._internal_get(self._index)._data
| UnknownFieldRef |
python | tensorflow__tensorflow | tensorflow/python/debug/lib/debug_data_test.py | {
"start": 1807,
"end": 4051
} | class ____(test_util.TensorFlowTestCase):
def setUp(self):
self._dummy_datum = dummy_datum = debug_data.DebugTensorDatum(
"/foo", "bar_0_DebugIdentity_42"
)
def testNaN(self):
a = np.array([np.nan, np.nan, 7.0])
self.assertTrue(debug_data.has_inf_or_nan(self._dummy_datum, a))
def testInf(self):
a = np.array([np.inf, np.inf, 7.0])
self.assertTrue(debug_data.has_inf_or_nan(self._dummy_datum, a))
def testNanAndInf(self):
a = np.array([np.inf, np.nan, 7.0])
self.assertTrue(debug_data.has_inf_or_nan(self._dummy_datum, a))
def testNoNanOrInf(self):
a = np.array([0.0, 0.0, 7.0])
self.assertFalse(debug_data.has_inf_or_nan(self._dummy_datum, a))
def testEmpty(self):
a = np.array([])
self.assertFalse(debug_data.has_inf_or_nan(self._dummy_datum, a))
def testInconvertibleTensorProto(self):
self.assertFalse(
debug_data.has_inf_or_nan(
self._dummy_datum,
debug_data.InconvertibleTensorProto(
tensor_pb2.TensorProto(), initialized=False
),
)
)
self.assertFalse(
debug_data.has_inf_or_nan(
self._dummy_datum,
debug_data.InconvertibleTensorProto(
tensor_pb2.TensorProto(), initialized=True
),
)
)
def testDTypeComplexWorks(self):
a = np.array([1j, 3j, 3j, 7j], dtype=np.complex128)
self.assertFalse(debug_data.has_inf_or_nan(self._dummy_datum, a))
b = np.array([1j, 3j, 3j, 7j, np.nan], dtype=np.complex128)
self.assertTrue(debug_data.has_inf_or_nan(self._dummy_datum, b))
def testDTypeIntegerWorks(self):
a = np.array([1, 3, 3, 7], dtype=np.int16)
self.assertFalse(debug_data.has_inf_or_nan(self._dummy_datum, a))
def testDTypeStringGivesFalse(self):
"""isnan and isinf are not applicable to strings."""
a = np.array(["s", "p", "a", "m"])
self.assertFalse(debug_data.has_inf_or_nan(self._dummy_datum, a))
def testDTypeObjectGivesFalse(self):
dt = np.dtype([("spam", np.str_, 16), ("eggs", np.float64, (2,))])
a = np.array([("spam", (8.0, 7.0)), ("eggs", (6.0, 5.0))], dtype=dt)
self.assertFalse(debug_data.has_inf_or_nan(self._dummy_datum, a))
| HasNanOrInfTest |
python | doocs__leetcode | solution/2000-2099/2087.Minimum Cost Homecoming of a Robot in a Grid/Solution.py | {
"start": 0,
"end": 494
} | class ____:
def minCost(
self,
startPos: List[int],
homePos: List[int],
rowCosts: List[int],
colCosts: List[int],
) -> int:
i, j = startPos
x, y = homePos
ans = 0
if i < x:
ans += sum(rowCosts[i + 1 : x + 1])
else:
ans += sum(rowCosts[x:i])
if j < y:
ans += sum(colCosts[j + 1 : y + 1])
else:
ans += sum(colCosts[y:j])
return ans
| Solution |
python | django__django | django/contrib/gis/db/models/lookups.py | {
"start": 5508,
"end": 5868
} | class ____(GISLookup):
"""
The "~=" operator is the "same as" operator. It tests actual geometric
equality of two features. So if A and B are the same feature,
vertex-by-vertex, the operator returns true.
"""
lookup_name = "same_as"
BaseSpatialField.register_lookup(SameAsLookup, "exact")
@BaseSpatialField.register_lookup
| SameAsLookup |
python | pytorch__pytorch | test/inductor/test_inductor_freezing.py | {
"start": 2911,
"end": 3766
} | class ____(torch.nn.Module):
def __init__(
self,
in_channels,
out_channels,
bias=False,
kernel_size=3,
stride=2,
running_mean=None,
running_var=None,
weight=None,
bn_bias=None,
):
super().__init__()
self.conv = torch.nn.Conv2d(
in_channels, out_channels, bias=bias, kernel_size=kernel_size, stride=stride
)
self.running_mean = running_mean
self.running_var = running_var
self.weight = weight
self.bias = bn_bias
def forward(self, x):
return torch.nn.functional.batch_norm(
self.conv(x),
self.running_mean,
self.running_var,
self.weight,
self.bias,
False,
0.1,
1e-5,
)
| ConvFunctionalBN |
python | pypa__warehouse | tests/unit/test_views.py | {
"start": 14518,
"end": 17028
} | class ____:
@pytest.mark.parametrize(
("referer", "redirect", "get", "valid"),
[
(None, "/fake-route", MultiDict({"locale_id": "en"}), True),
(
"/robots.txt",
"/robots.txt",
MultiDict({"locale_id": "non-existent-locale"}),
False,
),
],
)
def test_locale(self, referer, redirect, get, valid, monkeypatch):
localizer = pretend.stub(translate=lambda *a: "translated")
make_localizer = pretend.call_recorder(lambda *a: localizer)
monkeypatch.setattr(views, "make_localizer", make_localizer)
tdirs = pretend.stub()
request = pretend.stub(
GET=get,
referer=referer,
route_path=pretend.call_recorder(lambda r: "/fake-route"),
session=pretend.stub(flash=pretend.call_recorder(lambda *a, **kw: None)),
host=None,
registry=pretend.stub(queryUtility=lambda *a: tdirs),
)
result = locale(request)
assert isinstance(result, HTTPSeeOther)
assert result.location == redirect
if valid:
assert "Set-Cookie" in result.headers
assert f"_LOCALE_={get['locale_id']};" in result.headers["Set-Cookie"]
assert make_localizer.calls == [pretend.call(get["locale_id"], tdirs)]
assert request.session.flash.calls == [
pretend.call("translated", queue="success")
]
else:
assert "Set-Cookie" not in result.headers
@pytest.mark.parametrize(
"get",
[
MultiDict({"nonsense": "arguments"}),
MultiDict([("locale_id", "one"), ("locale_id", "two")]),
],
)
def test_locale_bad_request(self, get, monkeypatch):
request = pretend.stub(
GET=get,
route_path=pretend.call_recorder(lambda r: "/fake-route"),
session=pretend.stub(flash=pretend.call_recorder(lambda *a, **kw: None)),
host=None,
)
with pytest.raises(HTTPBadRequest):
locale(request)
def test_csi_current_user_indicator():
assert current_user_indicator(pretend.stub()) == {}
def test_csi_flash_messages():
assert flash_messages(pretend.stub()) == {}
def test_csi_session_notifications():
assert session_notifications(pretend.stub()) == {}
def test_csi_sidebar_sponsor_logo():
assert sidebar_sponsor_logo(pretend.stub()) == {}
| TestLocale |
python | scipy__scipy | scipy/sparse/linalg/tests/test_norm.py | {
"start": 1436,
"end": 3925
} | class ____:
def setup_method(self):
a = np.arange(9) - 4
b = a.reshape((3, 3))
self.b = scipy.sparse.csr_array(b)
def test_matrix_norm(self):
# Frobenius norm is the default
assert_allclose(spnorm(self.b), 7.745966692414834)
assert_allclose(spnorm(self.b, 'fro'), 7.745966692414834)
assert_allclose(spnorm(self.b, np.inf), 9)
assert_allclose(spnorm(self.b, -np.inf), 2)
assert_allclose(spnorm(self.b, 1), 7)
assert_allclose(spnorm(self.b, -1), 6)
assert_allclose(spnorm(self.b.astype(np.float64), 2), 7.348469228349534)
# _multi_svd_norm is not implemented for sparse array
assert_raises(NotImplementedError, spnorm, self.b, -2)
def test_matrix_norm_axis(self):
for m, axis in ((self.b, None), (self.b, (0, 1)), (self.b.T, (1, 0))):
assert_allclose(spnorm(m, axis=axis), 7.745966692414834)
assert_allclose(spnorm(m, 'fro', axis=axis), 7.745966692414834)
assert_allclose(spnorm(m, np.inf, axis=axis), 9)
assert_allclose(spnorm(m, -np.inf, axis=axis), 2)
assert_allclose(spnorm(m, 1, axis=axis), 7)
assert_allclose(spnorm(m, -1, axis=axis), 6)
def test_vector_norm(self):
v = [4.5825756949558398, 4.2426406871192848, 4.5825756949558398]
for m, a in (self.b, 0), (self.b.T, 1):
for axis in a, (a, ), a-2, (a-2, ):
assert_allclose(spnorm(m, 1, axis=axis), [7, 6, 7])
assert_allclose(spnorm(m, np.inf, axis=axis), [4, 3, 4])
assert_allclose(spnorm(m, axis=axis), v)
assert_allclose(spnorm(m, ord=2, axis=axis), v)
assert_allclose(spnorm(m, ord=None, axis=axis), v)
def test_norm_exceptions(self):
m = self.b
assert_raises(TypeError, spnorm, m, None, 1.5)
assert_raises(TypeError, spnorm, m, None, [2])
assert_raises(ValueError, spnorm, m, None, ())
assert_raises(ValueError, spnorm, m, None, (0, 1, 2))
assert_raises(ValueError, spnorm, m, None, (0, 0))
assert_raises(ValueError, spnorm, m, None, (0, 2))
assert_raises(ValueError, spnorm, m, None, (-3, 0))
assert_raises(ValueError, spnorm, m, None, 2)
assert_raises(ValueError, spnorm, m, None, -3)
assert_raises(ValueError, spnorm, m, 'plate_of_shrimp', 0)
assert_raises(ValueError, spnorm, m, 'plate_of_shrimp', (0, 1))
| TestNorm |
python | apache__airflow | airflow-core/tests/unit/core/test_stats.py | {
"start": 10682,
"end": 12760
} | class ____:
@pytest.mark.parametrize(
("validator", "stat_name", "expect_incr"),
[
(PatternAllowListValidator, "stats_one", True),
(PatternAllowListValidator, "stats_two.bla", True),
(PatternAllowListValidator, "stats_three.foo", True),
(PatternAllowListValidator, "stats_foo_three", True),
(PatternAllowListValidator, "stats_three", False),
(PatternBlockListValidator, "stats_one", False),
(PatternBlockListValidator, "stats_two.bla", False),
(PatternBlockListValidator, "stats_three.foo", False),
(PatternBlockListValidator, "stats_foo_three", False),
(PatternBlockListValidator, "stats_foo", False),
(PatternBlockListValidator, "stats_three", True),
],
)
def test_allow_and_block_list(self, validator, stat_name, expect_incr):
statsd_client = Mock(spec=statsd.StatsClient)
stats = SafeStatsdLogger(statsd_client, validator("stats_one, stats_two, foo"))
stats.incr(stat_name)
if expect_incr:
statsd_client.incr.assert_called_once_with(stat_name, 1, 1)
else:
statsd_client.assert_not_called()
@pytest.mark.parametrize(
("match_pattern", "expect_incr"),
[
("^stat", True), # Match: Regex Startswith
("a.{4}o", True), # Match: RegEx Pattern
("foo", True), # Match: Any substring
("stat", True), # Match: Substring Startswith
("^banana", False), # No match
],
)
def test_regex_matches(self, match_pattern, expect_incr):
stat_name = "stats_foo_one"
validator = PatternAllowListValidator
statsd_client = Mock(spec=statsd.StatsClient)
stats = SafeStatsdLogger(statsd_client, validator(match_pattern))
stats.incr(stat_name)
if expect_incr:
statsd_client.incr.assert_called_once_with(stat_name, 1, 1)
else:
statsd_client.assert_not_called()
| TestStatsAllowAndBlockLists |
python | plotly__plotly.py | plotly/graph_objs/_histogram.py | {
"start": 215,
"end": 91815
} | class ____(_BaseTraceType):
_parent_path_str = ""
_path_str = "histogram"
_valid_props = {
"alignmentgroup",
"autobinx",
"autobiny",
"bingroup",
"cliponaxis",
"constraintext",
"cumulative",
"customdata",
"customdatasrc",
"error_x",
"error_y",
"histfunc",
"histnorm",
"hoverinfo",
"hoverinfosrc",
"hoverlabel",
"hovertemplate",
"hovertemplatefallback",
"hovertemplatesrc",
"hovertext",
"hovertextsrc",
"ids",
"idssrc",
"insidetextanchor",
"insidetextfont",
"legend",
"legendgroup",
"legendgrouptitle",
"legendrank",
"legendwidth",
"marker",
"meta",
"metasrc",
"name",
"nbinsx",
"nbinsy",
"offsetgroup",
"opacity",
"orientation",
"outsidetextfont",
"selected",
"selectedpoints",
"showlegend",
"stream",
"text",
"textangle",
"textfont",
"textposition",
"textsrc",
"texttemplate",
"texttemplatefallback",
"type",
"uid",
"uirevision",
"unselected",
"visible",
"x",
"xaxis",
"xbins",
"xcalendar",
"xhoverformat",
"xsrc",
"y",
"yaxis",
"ybins",
"ycalendar",
"yhoverformat",
"ysrc",
"zorder",
}
@property
def alignmentgroup(self):
"""
Set several traces linked to the same position axis or matching
axes to the same alignmentgroup. This controls whether bars
compute their positional range dependently or independently.
The 'alignmentgroup' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["alignmentgroup"]
@alignmentgroup.setter
def alignmentgroup(self, val):
self["alignmentgroup"] = val
@property
def autobinx(self):
"""
Obsolete: since v1.42 each bin attribute is auto-determined
separately and `autobinx` is not needed. However, we accept
`autobinx: true` or `false` and will update `xbins` accordingly
before deleting `autobinx` from the trace.
The 'autobinx' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["autobinx"]
@autobinx.setter
def autobinx(self, val):
self["autobinx"] = val
@property
def autobiny(self):
"""
Obsolete: since v1.42 each bin attribute is auto-determined
separately and `autobiny` is not needed. However, we accept
`autobiny: true` or `false` and will update `ybins` accordingly
before deleting `autobiny` from the trace.
The 'autobiny' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["autobiny"]
@autobiny.setter
def autobiny(self, val):
self["autobiny"] = val
@property
def bingroup(self):
"""
Set a group of histogram traces which will have compatible bin
settings. Note that traces on the same subplot and with the
same "orientation" under `barmode` "stack", "relative" and
"group" are forced into the same bingroup, Using `bingroup`,
traces under `barmode` "overlay" and on different axes (of the
same axis type) can have compatible bin settings. Note that
histogram and histogram2d* trace can share the same `bingroup`
The 'bingroup' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["bingroup"]
@bingroup.setter
def bingroup(self, val):
self["bingroup"] = val
@property
def cliponaxis(self):
"""
Determines whether the text nodes are clipped about the subplot
axes. To show the text nodes above axis lines and tick labels,
make sure to set `xaxis.layer` and `yaxis.layer` to *below
traces*.
The 'cliponaxis' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["cliponaxis"]
@cliponaxis.setter
def cliponaxis(self, val):
self["cliponaxis"] = val
@property
def constraintext(self):
"""
Constrain the size of text inside or outside a bar to be no
larger than the bar itself.
The 'constraintext' property is an enumeration that may be specified as:
- One of the following enumeration values:
['inside', 'outside', 'both', 'none']
Returns
-------
Any
"""
return self["constraintext"]
@constraintext.setter
def constraintext(self, val):
self["constraintext"] = val
@property
def cumulative(self):
"""
The 'cumulative' property is an instance of Cumulative
that may be specified as:
- An instance of :class:`plotly.graph_objs.histogram.Cumulative`
- A dict of string/value properties that will be passed
to the Cumulative constructor
Returns
-------
plotly.graph_objs.histogram.Cumulative
"""
return self["cumulative"]
@cumulative.setter
def cumulative(self, val):
self["cumulative"] = val
@property
def customdata(self):
"""
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note that,
"scatter" traces also appends customdata items in the markers
DOM elements
The 'customdata' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["customdata"]
@customdata.setter
def customdata(self, val):
self["customdata"] = val
@property
def customdatasrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`customdata`.
The 'customdatasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["customdatasrc"]
@customdatasrc.setter
def customdatasrc(self, val):
self["customdatasrc"] = val
@property
def error_x(self):
"""
The 'error_x' property is an instance of ErrorX
that may be specified as:
- An instance of :class:`plotly.graph_objs.histogram.ErrorX`
- A dict of string/value properties that will be passed
to the ErrorX constructor
Returns
-------
plotly.graph_objs.histogram.ErrorX
"""
return self["error_x"]
@error_x.setter
def error_x(self, val):
self["error_x"] = val
@property
def error_y(self):
"""
The 'error_y' property is an instance of ErrorY
that may be specified as:
- An instance of :class:`plotly.graph_objs.histogram.ErrorY`
- A dict of string/value properties that will be passed
to the ErrorY constructor
Returns
-------
plotly.graph_objs.histogram.ErrorY
"""
return self["error_y"]
@error_y.setter
def error_y(self, val):
self["error_y"] = val
@property
def histfunc(self):
"""
Specifies the binning function used for this histogram trace.
If "count", the histogram values are computed by counting the
number of values lying inside each bin. If "sum", "avg", "min",
"max", the histogram values are computed using the sum, the
average, the minimum or the maximum of the values lying inside
each bin respectively.
The 'histfunc' property is an enumeration that may be specified as:
- One of the following enumeration values:
['count', 'sum', 'avg', 'min', 'max']
Returns
-------
Any
"""
return self["histfunc"]
@histfunc.setter
def histfunc(self, val):
self["histfunc"] = val
@property
def histnorm(self):
"""
Specifies the type of normalization used for this histogram
trace. If "", the span of each bar corresponds to the number of
occurrences (i.e. the number of data points lying inside the
bins). If "percent" / "probability", the span of each bar
corresponds to the percentage / fraction of occurrences with
respect to the total number of sample points (here, the sum of
all bin HEIGHTS equals 100% / 1). If "density", the span of
each bar corresponds to the number of occurrences in a bin
divided by the size of the bin interval (here, the sum of all
bin AREAS equals the total number of sample points). If
*probability density*, the area of each bar corresponds to the
probability that an event will fall into the corresponding bin
(here, the sum of all bin AREAS equals 1).
The 'histnorm' property is an enumeration that may be specified as:
- One of the following enumeration values:
['', 'percent', 'probability', 'density', 'probability
density']
Returns
-------
Any
"""
return self["histnorm"]
@histnorm.setter
def histnorm(self, val):
self["histnorm"] = val
@property
def hoverinfo(self):
"""
Determines which trace information appear on hover. If `none`
or `skip` are set, no information is displayed upon hovering.
But, if `none` is set, click and hover events are still fired.
The 'hoverinfo' property is a flaglist and may be specified
as a string containing:
- Any combination of ['x', 'y', 'z', 'text', 'name'] joined with '+' characters
(e.g. 'x+y')
OR exactly one of ['all', 'none', 'skip'] (e.g. 'skip')
- A list or array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["hoverinfo"]
@hoverinfo.setter
def hoverinfo(self, val):
self["hoverinfo"] = val
@property
def hoverinfosrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`hoverinfo`.
The 'hoverinfosrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hoverinfosrc"]
@hoverinfosrc.setter
def hoverinfosrc(self, val):
self["hoverinfosrc"] = val
@property
def hoverlabel(self):
"""
The 'hoverlabel' property is an instance of Hoverlabel
that may be specified as:
- An instance of :class:`plotly.graph_objs.histogram.Hoverlabel`
- A dict of string/value properties that will be passed
to the Hoverlabel constructor
Returns
-------
plotly.graph_objs.histogram.Hoverlabel
"""
return self["hoverlabel"]
@hoverlabel.setter
def hoverlabel(self, val):
self["hoverlabel"] = val
@property
def hovertemplate(self):
"""
Template string used for rendering the information that appear
on hover box. Note that this will override `hoverinfo`.
Variables are inserted using %{variable}, for example "y: %{y}"
as well as %{xother}, {%_xother}, {%_xother_}, {%xother_}. When
showing info for several points, "xother" will be added to
those with different x positions from the first point. An
underscore before or after "(x|y)other" will add a space on
that side, only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format for
details on the formatting syntax. Dates are formatted using
d3-time-format's syntax %{variable|d3-time-format}, for example
"Day: %{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the date
formatting syntax. Variables that can't be found will be
replaced with the specifier. For example, a template of "data:
%{x}, %{y}" will result in a value of "data: 1, %{y}" if x is 1
and y is missing. Variables with an undefined value will be
replaced with the fallback value. The variables available in
`hovertemplate` are the ones emitted as event data described at
this link https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, all attributes that can be specified per-
point (the ones that are `arrayOk: true`) are available.
Finally, the template string has access to variable `binNumber`
Anything contained in tag `<extra>` is displayed in the
secondary box, for example `<extra>%{fullData.name}</extra>`.
To hide the secondary box completely, use an empty tag
`<extra></extra>`.
The 'hovertemplate' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["hovertemplate"]
@hovertemplate.setter
def hovertemplate(self, val):
self["hovertemplate"] = val
@property
def hovertemplatefallback(self):
"""
Fallback string that's displayed when a variable referenced in
a template is missing. If the boolean value 'false' is passed
in, the specifier with the missing variable will be displayed.
The 'hovertemplatefallback' property accepts values of any type
Returns
-------
Any
"""
return self["hovertemplatefallback"]
@hovertemplatefallback.setter
def hovertemplatefallback(self, val):
self["hovertemplatefallback"] = val
@property
def hovertemplatesrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`hovertemplate`.
The 'hovertemplatesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hovertemplatesrc"]
@hovertemplatesrc.setter
def hovertemplatesrc(self, val):
self["hovertemplatesrc"] = val
@property
def hovertext(self):
"""
Same as `text`.
The 'hovertext' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["hovertext"]
@hovertext.setter
def hovertext(self, val):
self["hovertext"] = val
@property
def hovertextsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`hovertext`.
The 'hovertextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hovertextsrc"]
@hovertextsrc.setter
def hovertextsrc(self, val):
self["hovertextsrc"] = val
@property
def ids(self):
"""
Assigns id labels to each datum. These ids for object constancy
of data points during animation. Should be an array of strings,
not numbers or any other type.
The 'ids' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ids"]
@ids.setter
def ids(self, val):
self["ids"] = val
@property
def idssrc(self):
"""
Sets the source reference on Chart Studio Cloud for `ids`.
The 'idssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["idssrc"]
@idssrc.setter
def idssrc(self, val):
self["idssrc"] = val
@property
def insidetextanchor(self):
"""
Determines if texts are kept at center or start/end points in
`textposition` "inside" mode.
The 'insidetextanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['end', 'middle', 'start']
Returns
-------
Any
"""
return self["insidetextanchor"]
@insidetextanchor.setter
def insidetextanchor(self, val):
self["insidetextanchor"] = val
@property
def insidetextfont(self):
"""
Sets the font used for `text` lying inside the bar.
The 'insidetextfont' property is an instance of Insidetextfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.histogram.Insidetextfont`
- A dict of string/value properties that will be passed
to the Insidetextfont constructor
Returns
-------
plotly.graph_objs.histogram.Insidetextfont
"""
return self["insidetextfont"]
@insidetextfont.setter
def insidetextfont(self, val):
self["insidetextfont"] = val
@property
def legend(self):
"""
Sets the reference to a legend to show this trace in.
References to these legends are "legend", "legend2", "legend3",
etc. Settings for these legends are set in the layout, under
`layout.legend`, `layout.legend2`, etc.
The 'legend' property is an identifier of a particular
subplot, of type 'legend', that may be specified as the string 'legend'
optionally followed by an integer >= 1
(e.g. 'legend', 'legend1', 'legend2', 'legend3', etc.)
Returns
-------
str
"""
return self["legend"]
@legend.setter
def legend(self, val):
self["legend"] = val
@property
def legendgroup(self):
"""
Sets the legend group for this trace. Traces and shapes part of
the same legend group hide/show at the same time when toggling
legend items.
The 'legendgroup' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["legendgroup"]
@legendgroup.setter
def legendgroup(self, val):
self["legendgroup"] = val
@property
def legendgrouptitle(self):
"""
The 'legendgrouptitle' property is an instance of Legendgrouptitle
that may be specified as:
- An instance of :class:`plotly.graph_objs.histogram.Legendgrouptitle`
- A dict of string/value properties that will be passed
to the Legendgrouptitle constructor
Returns
-------
plotly.graph_objs.histogram.Legendgrouptitle
"""
return self["legendgrouptitle"]
@legendgrouptitle.setter
def legendgrouptitle(self, val):
self["legendgrouptitle"] = val
@property
def legendrank(self):
"""
Sets the legend rank for this trace. Items and groups with
smaller ranks are presented on top/left side while with
"reversed" `legend.traceorder` they are on bottom/right side.
The default legendrank is 1000, so that you can use ranks less
than 1000 to place certain items before all unranked items, and
ranks greater than 1000 to go after all unranked items. When
having unranked or equal rank items shapes would be displayed
after traces i.e. according to their order in data and layout.
The 'legendrank' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["legendrank"]
@legendrank.setter
def legendrank(self, val):
self["legendrank"] = val
@property
def legendwidth(self):
"""
Sets the width (in px or fraction) of the legend for this
trace.
The 'legendwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["legendwidth"]
@legendwidth.setter
def legendwidth(self, val):
self["legendwidth"] = val
@property
def marker(self):
"""
The 'marker' property is an instance of Marker
that may be specified as:
- An instance of :class:`plotly.graph_objs.histogram.Marker`
- A dict of string/value properties that will be passed
to the Marker constructor
Returns
-------
plotly.graph_objs.histogram.Marker
"""
return self["marker"]
@marker.setter
def marker(self, val):
self["marker"] = val
@property
def meta(self):
"""
Assigns extra meta information associated with this trace that
can be used in various text attributes. Attributes such as
trace `name`, graph, axis and colorbar `title.text`, annotation
`text` `rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta` values in
an attribute in the same trace, simply use `%{meta[i]}` where
`i` is the index or key of the `meta` item in question. To
access trace `meta` in layout attributes, use
`%{data[n[.meta[i]}` where `i` is the index or key of the
`meta` and `n` is the trace index.
The 'meta' property accepts values of any type
Returns
-------
Any|numpy.ndarray
"""
return self["meta"]
@meta.setter
def meta(self, val):
self["meta"] = val
@property
def metasrc(self):
"""
Sets the source reference on Chart Studio Cloud for `meta`.
The 'metasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["metasrc"]
@metasrc.setter
def metasrc(self, val):
self["metasrc"] = val
@property
def name(self):
"""
Sets the trace name. The trace name appears as the legend item
and on hover.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
@property
def nbinsx(self):
"""
Specifies the maximum number of desired bins. This value will
be used in an algorithm that will decide the optimal bin size
such that the histogram best visualizes the distribution of the
data. Ignored if `xbins.size` is provided.
The 'nbinsx' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["nbinsx"]
@nbinsx.setter
def nbinsx(self, val):
self["nbinsx"] = val
@property
def nbinsy(self):
"""
Specifies the maximum number of desired bins. This value will
be used in an algorithm that will decide the optimal bin size
such that the histogram best visualizes the distribution of the
data. Ignored if `ybins.size` is provided.
The 'nbinsy' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["nbinsy"]
@nbinsy.setter
def nbinsy(self, val):
self["nbinsy"] = val
@property
def offsetgroup(self):
"""
Set several traces linked to the same position axis or matching
axes to the same offsetgroup where bars of the same position
coordinate will line up.
The 'offsetgroup' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["offsetgroup"]
@offsetgroup.setter
def offsetgroup(self, val):
self["offsetgroup"] = val
@property
def opacity(self):
"""
Sets the opacity of the trace.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
@property
def orientation(self):
"""
Sets the orientation of the bars. With "v" ("h"), the value of
the each bar spans along the vertical (horizontal).
The 'orientation' property is an enumeration that may be specified as:
- One of the following enumeration values:
['v', 'h']
Returns
-------
Any
"""
return self["orientation"]
@orientation.setter
def orientation(self, val):
self["orientation"] = val
@property
def outsidetextfont(self):
"""
Sets the font used for `text` lying outside the bar.
The 'outsidetextfont' property is an instance of Outsidetextfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.histogram.Outsidetextfont`
- A dict of string/value properties that will be passed
to the Outsidetextfont constructor
Returns
-------
plotly.graph_objs.histogram.Outsidetextfont
"""
return self["outsidetextfont"]
@outsidetextfont.setter
def outsidetextfont(self, val):
self["outsidetextfont"] = val
@property
def selected(self):
"""
The 'selected' property is an instance of Selected
that may be specified as:
- An instance of :class:`plotly.graph_objs.histogram.Selected`
- A dict of string/value properties that will be passed
to the Selected constructor
Returns
-------
plotly.graph_objs.histogram.Selected
"""
return self["selected"]
@selected.setter
def selected(self, val):
self["selected"] = val
@property
def selectedpoints(self):
"""
Array containing integer indices of selected points. Has an
effect only for traces that support selections. Note that an
empty array means an empty selection where the `unselected` are
turned on for all points, whereas, any other non-array values
means no selection all where the `selected` and `unselected`
styles have no effect.
The 'selectedpoints' property accepts values of any type
Returns
-------
Any
"""
return self["selectedpoints"]
@selectedpoints.setter
def selectedpoints(self, val):
self["selectedpoints"] = val
@property
def showlegend(self):
"""
Determines whether or not an item corresponding to this trace
is shown in the legend.
The 'showlegend' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showlegend"]
@showlegend.setter
def showlegend(self, val):
self["showlegend"] = val
@property
def stream(self):
"""
The 'stream' property is an instance of Stream
that may be specified as:
- An instance of :class:`plotly.graph_objs.histogram.Stream`
- A dict of string/value properties that will be passed
to the Stream constructor
Returns
-------
plotly.graph_objs.histogram.Stream
"""
return self["stream"]
@stream.setter
def stream(self, val):
self["stream"] = val
@property
def text(self):
"""
Sets hover text elements associated with each bar. If a single
string, the same string appears over all bars. If an array of
string, the items are mapped in order to the this trace's
coordinates.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
@property
def textangle(self):
"""
Sets the angle of the tick labels with respect to the bar. For
example, a `tickangle` of -90 draws the tick labels vertically.
With "auto" the texts may automatically be rotated to fit with
the maximum size in bars.
The 'textangle' property is a angle (in degrees) that may be
specified as a number between -180 and 180.
Numeric values outside this range are converted to the equivalent value
(e.g. 270 is converted to -90).
Returns
-------
int|float
"""
return self["textangle"]
@textangle.setter
def textangle(self, val):
self["textangle"] = val
@property
def textfont(self):
"""
Sets the text font.
The 'textfont' property is an instance of Textfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.histogram.Textfont`
- A dict of string/value properties that will be passed
to the Textfont constructor
Returns
-------
plotly.graph_objs.histogram.Textfont
"""
return self["textfont"]
@textfont.setter
def textfont(self, val):
self["textfont"] = val
@property
def textposition(self):
"""
Specifies the location of the `text`. "inside" positions `text`
inside, next to the bar end (rotated and scaled if needed).
"outside" positions `text` outside, next to the bar end (scaled
if needed), unless there is another bar stacked on this one,
then the text gets pushed inside. "auto" tries to position
`text` inside the bar, but if the bar is too small and no bar
is stacked on this one the text is moved outside. If "none", no
text appears.
The 'textposition' property is an enumeration that may be specified as:
- One of the following enumeration values:
['inside', 'outside', 'auto', 'none']
Returns
-------
Any
"""
return self["textposition"]
@textposition.setter
def textposition(self, val):
self["textposition"] = val
@property
def textsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `text`.
The 'textsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["textsrc"]
@textsrc.setter
def textsrc(self, val):
self["textsrc"] = val
@property
def texttemplate(self):
"""
Template string used for rendering the information text that
appears on points. Note that this will override `textinfo`.
Variables are inserted using %{variable}, for example "y:
%{y}". Numbers are formatted using d3-format's syntax
%{variable:d3-format}, for example "Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format for
details on the formatting syntax. Dates are formatted using
d3-time-format's syntax %{variable|d3-time-format}, for example
"Day: %{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the date
formatting syntax. Variables that can't be found will be
replaced with the specifier. For example, a template of "data:
%{x}, %{y}" will result in a value of "data: 1, %{y}" if x is 1
and y is missing. Variables with an undefined value will be
replaced with the fallback value. All attributes that can be
specified per-point (the ones that are `arrayOk: true`) are
available. Finally, the template string has access to variables
`label` and `value`.
The 'texttemplate' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["texttemplate"]
@texttemplate.setter
def texttemplate(self, val):
self["texttemplate"] = val
@property
def texttemplatefallback(self):
"""
Fallback string that's displayed when a variable referenced in
a template is missing. If the boolean value 'false' is passed
in, the specifier with the missing variable will be displayed.
The 'texttemplatefallback' property accepts values of any type
Returns
-------
Any
"""
return self["texttemplatefallback"]
@texttemplatefallback.setter
def texttemplatefallback(self, val):
self["texttemplatefallback"] = val
@property
def uid(self):
"""
Assign an id to this trace, Use this to provide object
constancy between traces during animations and transitions.
The 'uid' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["uid"]
@uid.setter
def uid(self, val):
self["uid"] = val
@property
def uirevision(self):
"""
Controls persistence of some user-driven changes to the trace:
`constraintrange` in `parcoords` traces, as well as some
`editable: true` modifications such as `name` and
`colorbar.title`. Defaults to `layout.uirevision`. Note that
other user-driven trace attribute changes are controlled by
`layout` attributes: `trace.visible` is controlled by
`layout.legend.uirevision`, `selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)` (accessible
with `config: {editable: true}`) is controlled by
`layout.editrevision`. Trace changes are tracked by `uid`,
which only falls back on trace index if no `uid` is provided.
So if your app can add/remove traces before the end of the
`data` array, such that the same trace has a different index,
you can still preserve user-driven changes if you give each
trace a `uid` that stays with it as it moves.
The 'uirevision' property accepts values of any type
Returns
-------
Any
"""
return self["uirevision"]
@uirevision.setter
def uirevision(self, val):
self["uirevision"] = val
@property
def unselected(self):
"""
The 'unselected' property is an instance of Unselected
that may be specified as:
- An instance of :class:`plotly.graph_objs.histogram.Unselected`
- A dict of string/value properties that will be passed
to the Unselected constructor
Returns
-------
plotly.graph_objs.histogram.Unselected
"""
return self["unselected"]
@unselected.setter
def unselected(self, val):
self["unselected"] = val
@property
def visible(self):
"""
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as a
legend item (provided that the legend itself is visible).
The 'visible' property is an enumeration that may be specified as:
- One of the following enumeration values:
[True, False, 'legendonly']
Returns
-------
Any
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
@property
def x(self):
"""
Sets the sample data to be binned on the x axis.
The 'x' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
@property
def xaxis(self):
"""
Sets a reference between this trace's x coordinates and a 2D
cartesian x axis. If "x" (the default value), the x coordinates
refer to `layout.xaxis`. If "x2", the x coordinates refer to
`layout.xaxis2`, and so on.
The 'xaxis' property is an identifier of a particular
subplot, of type 'x', that may be specified as the string 'x'
optionally followed by an integer >= 1
(e.g. 'x', 'x1', 'x2', 'x3', etc.)
Returns
-------
str
"""
return self["xaxis"]
@xaxis.setter
def xaxis(self, val):
self["xaxis"] = val
@property
def xbins(self):
"""
The 'xbins' property is an instance of XBins
that may be specified as:
- An instance of :class:`plotly.graph_objs.histogram.XBins`
- A dict of string/value properties that will be passed
to the XBins constructor
Returns
-------
plotly.graph_objs.histogram.XBins
"""
return self["xbins"]
@xbins.setter
def xbins(self, val):
self["xbins"] = val
@property
def xcalendar(self):
"""
Sets the calendar system to use with `x` date data.
The 'xcalendar' property is an enumeration that may be specified as:
- One of the following enumeration values:
['chinese', 'coptic', 'discworld', 'ethiopian',
'gregorian', 'hebrew', 'islamic', 'jalali', 'julian',
'mayan', 'nanakshahi', 'nepali', 'persian', 'taiwan',
'thai', 'ummalqura']
Returns
-------
Any
"""
return self["xcalendar"]
@xcalendar.setter
def xcalendar(self, val):
self["xcalendar"] = val
@property
def xhoverformat(self):
"""
Sets the hover text formatting rulefor `x` using d3 formatting
mini-languages which are very similar to those in Python. For
numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format. And for
dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal number as
well as "%{n}f" for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display *09~15~23.46*By default the values
are formatted using `xaxis.hoverformat`.
The 'xhoverformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["xhoverformat"]
@xhoverformat.setter
def xhoverformat(self, val):
self["xhoverformat"] = val
@property
def xsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `x`.
The 'xsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["xsrc"]
@xsrc.setter
def xsrc(self, val):
self["xsrc"] = val
@property
def y(self):
"""
Sets the sample data to be binned on the y axis.
The 'y' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
@property
def yaxis(self):
"""
Sets a reference between this trace's y coordinates and a 2D
cartesian y axis. If "y" (the default value), the y coordinates
refer to `layout.yaxis`. If "y2", the y coordinates refer to
`layout.yaxis2`, and so on.
The 'yaxis' property is an identifier of a particular
subplot, of type 'y', that may be specified as the string 'y'
optionally followed by an integer >= 1
(e.g. 'y', 'y1', 'y2', 'y3', etc.)
Returns
-------
str
"""
return self["yaxis"]
@yaxis.setter
def yaxis(self, val):
self["yaxis"] = val
@property
def ybins(self):
"""
The 'ybins' property is an instance of YBins
that may be specified as:
- An instance of :class:`plotly.graph_objs.histogram.YBins`
- A dict of string/value properties that will be passed
to the YBins constructor
Returns
-------
plotly.graph_objs.histogram.YBins
"""
return self["ybins"]
@ybins.setter
def ybins(self, val):
self["ybins"] = val
@property
def ycalendar(self):
"""
Sets the calendar system to use with `y` date data.
The 'ycalendar' property is an enumeration that may be specified as:
- One of the following enumeration values:
['chinese', 'coptic', 'discworld', 'ethiopian',
'gregorian', 'hebrew', 'islamic', 'jalali', 'julian',
'mayan', 'nanakshahi', 'nepali', 'persian', 'taiwan',
'thai', 'ummalqura']
Returns
-------
Any
"""
return self["ycalendar"]
@ycalendar.setter
def ycalendar(self, val):
self["ycalendar"] = val
@property
def yhoverformat(self):
"""
Sets the hover text formatting rulefor `y` using d3 formatting
mini-languages which are very similar to those in Python. For
numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format. And for
dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal number as
well as "%{n}f" for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display *09~15~23.46*By default the values
are formatted using `yaxis.hoverformat`.
The 'yhoverformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["yhoverformat"]
@yhoverformat.setter
def yhoverformat(self, val):
self["yhoverformat"] = val
@property
def ysrc(self):
"""
Sets the source reference on Chart Studio Cloud for `y`.
The 'ysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["ysrc"]
@ysrc.setter
def ysrc(self, val):
self["ysrc"] = val
@property
def zorder(self):
"""
Sets the layer on which this trace is displayed, relative to
other SVG traces on the same subplot. SVG traces with higher
`zorder` appear in front of those with lower `zorder`.
The 'zorder' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
Returns
-------
int
"""
return self["zorder"]
@zorder.setter
def zorder(self, val):
self["zorder"] = val
@property
def type(self):
return self._props["type"]
@property
def _prop_descriptions(self):
return """\
alignmentgroup
Set several traces linked to the same position axis or
matching axes to the same alignmentgroup. This controls
whether bars compute their positional range dependently
or independently.
autobinx
Obsolete: since v1.42 each bin attribute is auto-
determined separately and `autobinx` is not needed.
However, we accept `autobinx: true` or `false` and will
update `xbins` accordingly before deleting `autobinx`
from the trace.
autobiny
Obsolete: since v1.42 each bin attribute is auto-
determined separately and `autobiny` is not needed.
However, we accept `autobiny: true` or `false` and will
update `ybins` accordingly before deleting `autobiny`
from the trace.
bingroup
Set a group of histogram traces which will have
compatible bin settings. Note that traces on the same
subplot and with the same "orientation" under `barmode`
"stack", "relative" and "group" are forced into the
same bingroup, Using `bingroup`, traces under `barmode`
"overlay" and on different axes (of the same axis type)
can have compatible bin settings. Note that histogram
and histogram2d* trace can share the same `bingroup`
cliponaxis
Determines whether the text nodes are clipped about the
subplot axes. To show the text nodes above axis lines
and tick labels, make sure to set `xaxis.layer` and
`yaxis.layer` to *below traces*.
constraintext
Constrain the size of text inside or outside a bar to
be no larger than the bar itself.
cumulative
:class:`plotly.graph_objects.histogram.Cumulative`
instance or dict with compatible properties
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
`customdata`.
error_x
:class:`plotly.graph_objects.histogram.ErrorX` instance
or dict with compatible properties
error_y
:class:`plotly.graph_objects.histogram.ErrorY` instance
or dict with compatible properties
histfunc
Specifies the binning function used for this histogram
trace. If "count", the histogram values are computed by
counting the number of values lying inside each bin. If
"sum", "avg", "min", "max", the histogram values are
computed using the sum, the average, the minimum or the
maximum of the values lying inside each bin
respectively.
histnorm
Specifies the type of normalization used for this
histogram trace. If "", the span of each bar
corresponds to the number of occurrences (i.e. the
number of data points lying inside the bins). If
"percent" / "probability", the span of each bar
corresponds to the percentage / fraction of occurrences
with respect to the total number of sample points
(here, the sum of all bin HEIGHTS equals 100% / 1). If
"density", the span of each bar corresponds to the
number of occurrences in a bin divided by the size of
the bin interval (here, the sum of all bin AREAS equals
the total number of sample points). If *probability
density*, the area of each bar corresponds to the
probability that an event will fall into the
corresponding bin (here, the sum of all bin AREAS
equals 1).
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
`hoverinfo`.
hoverlabel
:class:`plotly.graph_objects.histogram.Hoverlabel`
instance or dict with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}" as well as %{xother}, {%_xother},
{%_xother_}, {%xother_}. When showing info for several
points, "xother" will be added to those with different
x positions from the first point. An underscore before
or after "(x|y)other" will add a space on that side,
only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for
example "Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the
date formatting syntax. Variables that can't be found
will be replaced with the specifier. For example, a
template of "data: %{x}, %{y}" will result in a value
of "data: 1, %{y}" if x is 1 and y is missing.
Variables with an undefined value will be replaced with
the fallback value. The variables available in
`hovertemplate` are the ones emitted as event data
described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, all attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. Finally, the template string has access
to variable `binNumber` Anything contained in tag
`<extra>` is displayed in the secondary box, for
example `<extra>%{fullData.name}</extra>`. To hide the
secondary box completely, use an empty tag
`<extra></extra>`.
hovertemplatefallback
Fallback string that's displayed when a variable
referenced in a template is missing. If the boolean
value 'false' is passed in, the specifier with the
missing variable will be displayed.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
`hovertemplate`.
hovertext
Same as `text`.
hovertextsrc
Sets the source reference on Chart Studio Cloud for
`hovertext`.
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
`ids`.
insidetextanchor
Determines if texts are kept at center or start/end
points in `textposition` "inside" mode.
insidetextfont
Sets the font used for `text` lying inside the bar.
legend
Sets the reference to a legend to show this trace in.
References to these legends are "legend", "legend2",
"legend3", etc. Settings for these legends are set in
the layout, under `layout.legend`, `layout.legend2`,
etc.
legendgroup
Sets the legend group for this trace. Traces and shapes
part of the same legend group hide/show at the same
time when toggling legend items.
legendgrouptitle
:class:`plotly.graph_objects.histogram.Legendgrouptitle
` instance or dict with compatible properties
legendrank
Sets the legend rank for this trace. Items and groups
with smaller ranks are presented on top/left side while
with "reversed" `legend.traceorder` they are on
bottom/right side. The default legendrank is 1000, so
that you can use ranks less than 1000 to place certain
items before all unranked items, and ranks greater than
1000 to go after all unranked items. When having
unranked or equal rank items shapes would be displayed
after traces i.e. according to their order in data and
layout.
legendwidth
Sets the width (in px or fraction) of the legend for
this trace.
marker
:class:`plotly.graph_objects.histogram.Marker` instance
or dict with compatible properties
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
`meta`.
name
Sets the trace name. The trace name appears as the
legend item and on hover.
nbinsx
Specifies the maximum number of desired bins. This
value will be used in an algorithm that will decide the
optimal bin size such that the histogram best
visualizes the distribution of the data. Ignored if
`xbins.size` is provided.
nbinsy
Specifies the maximum number of desired bins. This
value will be used in an algorithm that will decide the
optimal bin size such that the histogram best
visualizes the distribution of the data. Ignored if
`ybins.size` is provided.
offsetgroup
Set several traces linked to the same position axis or
matching axes to the same offsetgroup where bars of the
same position coordinate will line up.
opacity
Sets the opacity of the trace.
orientation
Sets the orientation of the bars. With "v" ("h"), the
value of the each bar spans along the vertical
(horizontal).
outsidetextfont
Sets the font used for `text` lying outside the bar.
selected
:class:`plotly.graph_objects.histogram.Selected`
instance or dict with compatible properties
selectedpoints
Array containing integer indices of selected points.
Has an effect only for traces that support selections.
Note that an empty array means an empty selection where
the `unselected` are turned on for all points, whereas,
any other non-array values means no selection all where
the `selected` and `unselected` styles have no effect.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
stream
:class:`plotly.graph_objects.histogram.Stream` instance
or dict with compatible properties
text
Sets hover text elements associated with each bar. If a
single string, the same string appears over all bars.
If an array of string, the items are mapped in order to
the this trace's coordinates.
textangle
Sets the angle of the tick labels with respect to the
bar. For example, a `tickangle` of -90 draws the tick
labels vertically. With "auto" the texts may
automatically be rotated to fit with the maximum size
in bars.
textfont
Sets the text font.
textposition
Specifies the location of the `text`. "inside"
positions `text` inside, next to the bar end (rotated
and scaled if needed). "outside" positions `text`
outside, next to the bar end (scaled if needed), unless
there is another bar stacked on this one, then the text
gets pushed inside. "auto" tries to position `text`
inside the bar, but if the bar is too small and no bar
is stacked on this one the text is moved outside. If
"none", no text appears.
textsrc
Sets the source reference on Chart Studio Cloud for
`text`.
texttemplate
Template string used for rendering the information text
that appears on points. Note that this will override
`textinfo`. Variables are inserted using %{variable},
for example "y: %{y}". Numbers are formatted using
d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the
date formatting syntax. Variables that can't be found
will be replaced with the specifier. For example, a
template of "data: %{x}, %{y}" will result in a value
of "data: 1, %{y}" if x is 1 and y is missing.
Variables with an undefined value will be replaced with
the fallback value. All attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. Finally, the template string has access
to variables `label` and `value`.
texttemplatefallback
Fallback string that's displayed when a variable
referenced in a template is missing. If the boolean
value 'false' is passed in, the specifier with the
missing variable will be displayed.
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
unselected
:class:`plotly.graph_objects.histogram.Unselected`
instance or dict with compatible properties
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
x
Sets the sample data to be binned on the x axis.
xaxis
Sets a reference between this trace's x coordinates and
a 2D cartesian x axis. If "x" (the default value), the
x coordinates refer to `layout.xaxis`. If "x2", the x
coordinates refer to `layout.xaxis2`, and so on.
xbins
:class:`plotly.graph_objects.histogram.XBins` instance
or dict with compatible properties
xcalendar
Sets the calendar system to use with `x` date data.
xhoverformat
Sets the hover text formatting rulefor `x` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display *09~15~23.46*By default the values are
formatted using `xaxis.hoverformat`.
xsrc
Sets the source reference on Chart Studio Cloud for
`x`.
y
Sets the sample data to be binned on the y axis.
yaxis
Sets a reference between this trace's y coordinates and
a 2D cartesian y axis. If "y" (the default value), the
y coordinates refer to `layout.yaxis`. If "y2", the y
coordinates refer to `layout.yaxis2`, and so on.
ybins
:class:`plotly.graph_objects.histogram.YBins` instance
or dict with compatible properties
ycalendar
Sets the calendar system to use with `y` date data.
yhoverformat
Sets the hover text formatting rulefor `y` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display *09~15~23.46*By default the values are
formatted using `yaxis.hoverformat`.
ysrc
Sets the source reference on Chart Studio Cloud for
`y`.
zorder
Sets the layer on which this trace is displayed,
relative to other SVG traces on the same subplot. SVG
traces with higher `zorder` appear in front of those
with lower `zorder`.
"""
def __init__(
self,
arg=None,
alignmentgroup=None,
autobinx=None,
autobiny=None,
bingroup=None,
cliponaxis=None,
constraintext=None,
cumulative=None,
customdata=None,
customdatasrc=None,
error_x=None,
error_y=None,
histfunc=None,
histnorm=None,
hoverinfo=None,
hoverinfosrc=None,
hoverlabel=None,
hovertemplate=None,
hovertemplatefallback=None,
hovertemplatesrc=None,
hovertext=None,
hovertextsrc=None,
ids=None,
idssrc=None,
insidetextanchor=None,
insidetextfont=None,
legend=None,
legendgroup=None,
legendgrouptitle=None,
legendrank=None,
legendwidth=None,
marker=None,
meta=None,
metasrc=None,
name=None,
nbinsx=None,
nbinsy=None,
offsetgroup=None,
opacity=None,
orientation=None,
outsidetextfont=None,
selected=None,
selectedpoints=None,
showlegend=None,
stream=None,
text=None,
textangle=None,
textfont=None,
textposition=None,
textsrc=None,
texttemplate=None,
texttemplatefallback=None,
uid=None,
uirevision=None,
unselected=None,
visible=None,
x=None,
xaxis=None,
xbins=None,
xcalendar=None,
xhoverformat=None,
xsrc=None,
y=None,
yaxis=None,
ybins=None,
ycalendar=None,
yhoverformat=None,
ysrc=None,
zorder=None,
**kwargs,
):
"""
Construct a new Histogram object
The sample data from which statistics are computed is set in
`x` for vertically spanning histograms and in `y` for
horizontally spanning histograms. Binning options are set
`xbins` and `ybins` respectively if no aggregation data is
provided.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.Histogram`
alignmentgroup
Set several traces linked to the same position axis or
matching axes to the same alignmentgroup. This controls
whether bars compute their positional range dependently
or independently.
autobinx
Obsolete: since v1.42 each bin attribute is auto-
determined separately and `autobinx` is not needed.
However, we accept `autobinx: true` or `false` and will
update `xbins` accordingly before deleting `autobinx`
from the trace.
autobiny
Obsolete: since v1.42 each bin attribute is auto-
determined separately and `autobiny` is not needed.
However, we accept `autobiny: true` or `false` and will
update `ybins` accordingly before deleting `autobiny`
from the trace.
bingroup
Set a group of histogram traces which will have
compatible bin settings. Note that traces on the same
subplot and with the same "orientation" under `barmode`
"stack", "relative" and "group" are forced into the
same bingroup, Using `bingroup`, traces under `barmode`
"overlay" and on different axes (of the same axis type)
can have compatible bin settings. Note that histogram
and histogram2d* trace can share the same `bingroup`
cliponaxis
Determines whether the text nodes are clipped about the
subplot axes. To show the text nodes above axis lines
and tick labels, make sure to set `xaxis.layer` and
`yaxis.layer` to *below traces*.
constraintext
Constrain the size of text inside or outside a bar to
be no larger than the bar itself.
cumulative
:class:`plotly.graph_objects.histogram.Cumulative`
instance or dict with compatible properties
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
`customdata`.
error_x
:class:`plotly.graph_objects.histogram.ErrorX` instance
or dict with compatible properties
error_y
:class:`plotly.graph_objects.histogram.ErrorY` instance
or dict with compatible properties
histfunc
Specifies the binning function used for this histogram
trace. If "count", the histogram values are computed by
counting the number of values lying inside each bin. If
"sum", "avg", "min", "max", the histogram values are
computed using the sum, the average, the minimum or the
maximum of the values lying inside each bin
respectively.
histnorm
Specifies the type of normalization used for this
histogram trace. If "", the span of each bar
corresponds to the number of occurrences (i.e. the
number of data points lying inside the bins). If
"percent" / "probability", the span of each bar
corresponds to the percentage / fraction of occurrences
with respect to the total number of sample points
(here, the sum of all bin HEIGHTS equals 100% / 1). If
"density", the span of each bar corresponds to the
number of occurrences in a bin divided by the size of
the bin interval (here, the sum of all bin AREAS equals
the total number of sample points). If *probability
density*, the area of each bar corresponds to the
probability that an event will fall into the
corresponding bin (here, the sum of all bin AREAS
equals 1).
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
`hoverinfo`.
hoverlabel
:class:`plotly.graph_objects.histogram.Hoverlabel`
instance or dict with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}" as well as %{xother}, {%_xother},
{%_xother_}, {%xother_}. When showing info for several
points, "xother" will be added to those with different
x positions from the first point. An underscore before
or after "(x|y)other" will add a space on that side,
only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for
example "Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the
date formatting syntax. Variables that can't be found
will be replaced with the specifier. For example, a
template of "data: %{x}, %{y}" will result in a value
of "data: 1, %{y}" if x is 1 and y is missing.
Variables with an undefined value will be replaced with
the fallback value. The variables available in
`hovertemplate` are the ones emitted as event data
described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, all attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. Finally, the template string has access
to variable `binNumber` Anything contained in tag
`<extra>` is displayed in the secondary box, for
example `<extra>%{fullData.name}</extra>`. To hide the
secondary box completely, use an empty tag
`<extra></extra>`.
hovertemplatefallback
Fallback string that's displayed when a variable
referenced in a template is missing. If the boolean
value 'false' is passed in, the specifier with the
missing variable will be displayed.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
`hovertemplate`.
hovertext
Same as `text`.
hovertextsrc
Sets the source reference on Chart Studio Cloud for
`hovertext`.
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
`ids`.
insidetextanchor
Determines if texts are kept at center or start/end
points in `textposition` "inside" mode.
insidetextfont
Sets the font used for `text` lying inside the bar.
legend
Sets the reference to a legend to show this trace in.
References to these legends are "legend", "legend2",
"legend3", etc. Settings for these legends are set in
the layout, under `layout.legend`, `layout.legend2`,
etc.
legendgroup
Sets the legend group for this trace. Traces and shapes
part of the same legend group hide/show at the same
time when toggling legend items.
legendgrouptitle
:class:`plotly.graph_objects.histogram.Legendgrouptitle
` instance or dict with compatible properties
legendrank
Sets the legend rank for this trace. Items and groups
with smaller ranks are presented on top/left side while
with "reversed" `legend.traceorder` they are on
bottom/right side. The default legendrank is 1000, so
that you can use ranks less than 1000 to place certain
items before all unranked items, and ranks greater than
1000 to go after all unranked items. When having
unranked or equal rank items shapes would be displayed
after traces i.e. according to their order in data and
layout.
legendwidth
Sets the width (in px or fraction) of the legend for
this trace.
marker
:class:`plotly.graph_objects.histogram.Marker` instance
or dict with compatible properties
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
`meta`.
name
Sets the trace name. The trace name appears as the
legend item and on hover.
nbinsx
Specifies the maximum number of desired bins. This
value will be used in an algorithm that will decide the
optimal bin size such that the histogram best
visualizes the distribution of the data. Ignored if
`xbins.size` is provided.
nbinsy
Specifies the maximum number of desired bins. This
value will be used in an algorithm that will decide the
optimal bin size such that the histogram best
visualizes the distribution of the data. Ignored if
`ybins.size` is provided.
offsetgroup
Set several traces linked to the same position axis or
matching axes to the same offsetgroup where bars of the
same position coordinate will line up.
opacity
Sets the opacity of the trace.
orientation
Sets the orientation of the bars. With "v" ("h"), the
value of the each bar spans along the vertical
(horizontal).
outsidetextfont
Sets the font used for `text` lying outside the bar.
selected
:class:`plotly.graph_objects.histogram.Selected`
instance or dict with compatible properties
selectedpoints
Array containing integer indices of selected points.
Has an effect only for traces that support selections.
Note that an empty array means an empty selection where
the `unselected` are turned on for all points, whereas,
any other non-array values means no selection all where
the `selected` and `unselected` styles have no effect.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
stream
:class:`plotly.graph_objects.histogram.Stream` instance
or dict with compatible properties
text
Sets hover text elements associated with each bar. If a
single string, the same string appears over all bars.
If an array of string, the items are mapped in order to
the this trace's coordinates.
textangle
Sets the angle of the tick labels with respect to the
bar. For example, a `tickangle` of -90 draws the tick
labels vertically. With "auto" the texts may
automatically be rotated to fit with the maximum size
in bars.
textfont
Sets the text font.
textposition
Specifies the location of the `text`. "inside"
positions `text` inside, next to the bar end (rotated
and scaled if needed). "outside" positions `text`
outside, next to the bar end (scaled if needed), unless
there is another bar stacked on this one, then the text
gets pushed inside. "auto" tries to position `text`
inside the bar, but if the bar is too small and no bar
is stacked on this one the text is moved outside. If
"none", no text appears.
textsrc
Sets the source reference on Chart Studio Cloud for
`text`.
texttemplate
Template string used for rendering the information text
that appears on points. Note that this will override
`textinfo`. Variables are inserted using %{variable},
for example "y: %{y}". Numbers are formatted using
d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the
date formatting syntax. Variables that can't be found
will be replaced with the specifier. For example, a
template of "data: %{x}, %{y}" will result in a value
of "data: 1, %{y}" if x is 1 and y is missing.
Variables with an undefined value will be replaced with
the fallback value. All attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. Finally, the template string has access
to variables `label` and `value`.
texttemplatefallback
Fallback string that's displayed when a variable
referenced in a template is missing. If the boolean
value 'false' is passed in, the specifier with the
missing variable will be displayed.
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
unselected
:class:`plotly.graph_objects.histogram.Unselected`
instance or dict with compatible properties
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
x
Sets the sample data to be binned on the x axis.
xaxis
Sets a reference between this trace's x coordinates and
a 2D cartesian x axis. If "x" (the default value), the
x coordinates refer to `layout.xaxis`. If "x2", the x
coordinates refer to `layout.xaxis2`, and so on.
xbins
:class:`plotly.graph_objects.histogram.XBins` instance
or dict with compatible properties
xcalendar
Sets the calendar system to use with `x` date data.
xhoverformat
Sets the hover text formatting rulefor `x` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display *09~15~23.46*By default the values are
formatted using `xaxis.hoverformat`.
xsrc
Sets the source reference on Chart Studio Cloud for
`x`.
y
Sets the sample data to be binned on the y axis.
yaxis
Sets a reference between this trace's y coordinates and
a 2D cartesian y axis. If "y" (the default value), the
y coordinates refer to `layout.yaxis`. If "y2", the y
coordinates refer to `layout.yaxis2`, and so on.
ybins
:class:`plotly.graph_objects.histogram.YBins` instance
or dict with compatible properties
ycalendar
Sets the calendar system to use with `y` date data.
yhoverformat
Sets the hover text formatting rulefor `y` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display *09~15~23.46*By default the values are
formatted using `yaxis.hoverformat`.
ysrc
Sets the source reference on Chart Studio Cloud for
`y`.
zorder
Sets the layer on which this trace is displayed,
relative to other SVG traces on the same subplot. SVG
traces with higher `zorder` appear in front of those
with lower `zorder`.
Returns
-------
Histogram
"""
super().__init__("histogram")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.Histogram
constructor must be a dict or
an instance of :class:`plotly.graph_objs.Histogram`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("alignmentgroup", arg, alignmentgroup)
self._set_property("autobinx", arg, autobinx)
self._set_property("autobiny", arg, autobiny)
self._set_property("bingroup", arg, bingroup)
self._set_property("cliponaxis", arg, cliponaxis)
self._set_property("constraintext", arg, constraintext)
self._set_property("cumulative", arg, cumulative)
self._set_property("customdata", arg, customdata)
self._set_property("customdatasrc", arg, customdatasrc)
self._set_property("error_x", arg, error_x)
self._set_property("error_y", arg, error_y)
self._set_property("histfunc", arg, histfunc)
self._set_property("histnorm", arg, histnorm)
self._set_property("hoverinfo", arg, hoverinfo)
self._set_property("hoverinfosrc", arg, hoverinfosrc)
self._set_property("hoverlabel", arg, hoverlabel)
self._set_property("hovertemplate", arg, hovertemplate)
self._set_property("hovertemplatefallback", arg, hovertemplatefallback)
self._set_property("hovertemplatesrc", arg, hovertemplatesrc)
self._set_property("hovertext", arg, hovertext)
self._set_property("hovertextsrc", arg, hovertextsrc)
self._set_property("ids", arg, ids)
self._set_property("idssrc", arg, idssrc)
self._set_property("insidetextanchor", arg, insidetextanchor)
self._set_property("insidetextfont", arg, insidetextfont)
self._set_property("legend", arg, legend)
self._set_property("legendgroup", arg, legendgroup)
self._set_property("legendgrouptitle", arg, legendgrouptitle)
self._set_property("legendrank", arg, legendrank)
self._set_property("legendwidth", arg, legendwidth)
self._set_property("marker", arg, marker)
self._set_property("meta", arg, meta)
self._set_property("metasrc", arg, metasrc)
self._set_property("name", arg, name)
self._set_property("nbinsx", arg, nbinsx)
self._set_property("nbinsy", arg, nbinsy)
self._set_property("offsetgroup", arg, offsetgroup)
self._set_property("opacity", arg, opacity)
self._set_property("orientation", arg, orientation)
self._set_property("outsidetextfont", arg, outsidetextfont)
self._set_property("selected", arg, selected)
self._set_property("selectedpoints", arg, selectedpoints)
self._set_property("showlegend", arg, showlegend)
self._set_property("stream", arg, stream)
self._set_property("text", arg, text)
self._set_property("textangle", arg, textangle)
self._set_property("textfont", arg, textfont)
self._set_property("textposition", arg, textposition)
self._set_property("textsrc", arg, textsrc)
self._set_property("texttemplate", arg, texttemplate)
self._set_property("texttemplatefallback", arg, texttemplatefallback)
self._set_property("uid", arg, uid)
self._set_property("uirevision", arg, uirevision)
self._set_property("unselected", arg, unselected)
self._set_property("visible", arg, visible)
self._set_property("x", arg, x)
self._set_property("xaxis", arg, xaxis)
self._set_property("xbins", arg, xbins)
self._set_property("xcalendar", arg, xcalendar)
self._set_property("xhoverformat", arg, xhoverformat)
self._set_property("xsrc", arg, xsrc)
self._set_property("y", arg, y)
self._set_property("yaxis", arg, yaxis)
self._set_property("ybins", arg, ybins)
self._set_property("ycalendar", arg, ycalendar)
self._set_property("yhoverformat", arg, yhoverformat)
self._set_property("ysrc", arg, ysrc)
self._set_property("zorder", arg, zorder)
self._props["type"] = "histogram"
arg.pop("type", None)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Histogram |
python | ansible__ansible | test/integration/targets/connection_delegation/connection_plugins/delegation_connection.py | {
"start": 539,
"end": 1214
} | class ____(ConnectionBase):
transport = 'delegation_connection'
has_pipelining = True
def __init__(self, *args, **kwargs):
super(Connection, self).__init__(*args, **kwargs)
def _connect(self):
super(Connection, self)._connect()
def exec_command(self, cmd, in_data=None, sudoable=True):
super(Connection, self).exec_command(cmd, in_data, sudoable)
def put_file(self, in_path, out_path):
super(Connection, self).put_file(in_path, out_path)
def fetch_file(self, in_path, out_path):
super(Connection, self).fetch_file(in_path, out_path)
def close(self):
super(Connection, self).close()
| Connection |
python | google__pytype | pytype/overlays/classgen.py | {
"start": 1373,
"end": 6353
} | class ____(abstract.PyTDFunction, metaclass=abc.ABCMeta):
"""Base class for decorators that generate classes from data declarations."""
# Defaults for the args that we support (dataclasses only support 'init',
# but the others default to false so they should not affect anything).
DEFAULT_ARGS: ClassVar[dict[str, Any]] = {
"init": True,
"kw_only": False,
"auto_attribs": False,
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Decorator.call() is invoked first with args, then with the class to
# decorate, so we need to first store the args and then associate them to
# the right class.
self._current_args = None
# Some constructors like attr.dataclass partially apply args, overriding the
# defaults attached to the class.
self.partial_args = {}
self.args = {} # map from each class we decorate to its args
@abc.abstractmethod
def decorate(self, node, cls):
"""Apply the decorator to cls."""
def get_initial_args(self):
ret = self.DEFAULT_ARGS.copy()
ret.update(self.partial_args)
return ret
def update_kwargs(self, args):
"""Update current_args with the Args passed to the decorator."""
self._current_args = self.get_initial_args()
for k, v in args.namedargs.items():
if k in self._current_args:
try:
self._current_args[k] = abstract_utils.get_atomic_python_constant(v)
except abstract_utils.ConversionError:
self.ctx.errorlog.not_supported_yet(
self.ctx.vm.frames, f"Non-constant argument to decorator: {k!r}"
)
def set_current_args(self, kwargs):
"""Set current_args when constructing a class directly."""
self._current_args = self.get_initial_args()
self._current_args.update(kwargs)
def init_name(self, attr):
"""Attribute name as an __init__ keyword, could differ from attr.name."""
return attr.name
def make_init(self, node, cls, attrs, init_method_name="__init__"):
pos_params = []
kwonly_params = []
all_kwonly = self.args[cls]["kw_only"]
for attr in attrs:
if not attr.init:
continue
typ = attr.init_type or attr.typ
# call self.init_name in case the name differs from the field name -
# e.g. attrs removes leading underscores from attrib names when
# generating kwargs for __init__.
param = Param(name=self.init_name(attr), typ=typ, default=attr.default)
# kw_only=False in a field does not override kw_only=True in the class.
if all_kwonly or attr.kw_only:
kwonly_params.append(param)
else:
pos_params.append(param)
# If the class has unknown bases or is dynamic, we can't know all possible
# fields, so accept arbitrary positional and keyword arguments.
has_unknown_fields = (
self.ctx.convert.unsolvable in cls.mro or cls.is_dynamic
)
return overlay_utils.make_method(
self.ctx,
node,
init_method_name,
pos_params,
posonly_count=0,
kwonly_params=kwonly_params,
varargs=Param("args") if has_unknown_fields else None,
kwargs=Param("kwargs") if has_unknown_fields else None,
)
def call(self, node, func, args, alias_map=None):
"""Construct a decorator, and call it on the class."""
args = args.simplify(node, self.ctx)
self.match_args(node, args)
# There are two ways to use a decorator:
# @decorator(...)
# class Foo: ...
# or
# @decorator
# class Foo: ...
# In the first case, call() is invoked twice: once with kwargs to create the
# decorator object and once with the decorated class as a posarg. So we call
# update_kwargs on the first invocation, setting _current_args, and skip it
# on the second.
# In the second case, we call update_kwargs on the first and only
# invocation.
if not self._current_args:
self.update_kwargs(args)
# NOTE: @dataclass is py3-only and has explicitly kwonly args in its
# constructor.
#
# @attr.s does not take positional arguments in typical usage, but
# technically this works:
# class Foo:
# x = attr.ib()
# Foo = attr.s(Foo, **kwargs)
#
# Unfortunately, it also works to pass kwargs as posargs; we will at least
# reject posargs if the first arg is not a Callable.
if not args.posargs:
return node, self.to_variable(node)
cls_var = args.posargs[0]
# We should only have a single binding here
(cls,) = cls_var.data
if not isinstance(cls, abstract.Class):
# There are other valid types like abstract.Unsolvable that we don't need
# to do anything with.
return node, cls_var
self.args[cls] = self._current_args
# Reset _current_args so we don't use old args for a new class.
self._current_args = None
# decorate() modifies the cls object in place
self.decorate(node, cls)
return node, cls_var
| Decorator |
python | pennersr__django-allauth | allauth/socialaccount/forms.py | {
"start": 221,
"end": 1205
} | class ____(BaseSignupForm):
def __init__(self, *args, **kwargs):
self.sociallogin = kwargs.pop("sociallogin")
initial = get_adapter().get_signup_form_initial_data(self.sociallogin)
kwargs.update(
{
"initial": initial,
"email_required": kwargs.get(
"email_required", app_settings.EMAIL_REQUIRED
),
}
)
super(SignupForm, self).__init__(*args, **kwargs)
def save(self, request):
adapter = get_adapter()
user = adapter.save_user(request, self.sociallogin, form=self)
self.custom_signup(request, user)
return user
def validate_unique_email(self, value):
try:
return super(SignupForm, self).validate_unique_email(value)
except forms.ValidationError:
raise get_adapter().validation_error(
"email_taken", self.sociallogin.provider.name
)
| SignupForm |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_name03.py | {
"start": 315,
"end": 1710
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_name03.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart1 = workbook.add_chart({"type": "line", "name": "New 1"})
chart2 = workbook.add_chart({"type": "line", "name": "New 2"})
chart1.axis_ids = [44271104, 45703168]
chart2.axis_ids = [80928128, 80934400]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart1.add_series({"values": "=Sheet1!$A$1:$A$5"})
chart1.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart1.add_series({"values": "=Sheet1!$C$1:$C$5"})
chart2.add_series({"values": "=Sheet1!$A$1:$A$5"})
chart2.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart2.add_series({"values": "=Sheet1!$C$1:$C$5"})
worksheet.insert_chart("E9", chart1)
worksheet.insert_chart("E24", chart2)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | coleifer__peewee | tests/shortcuts.py | {
"start": 22910,
"end": 23615
} | class ____(ReconnectMixin, MySQLDatabase):
def cursor(self, named_cursor=None):
cursor = super(ReconnectMySQLDatabase, self).cursor(named_cursor)
# The first (0th) query fails, as do all queries after the 2nd (1st).
if self._query_counter != 1:
def _fake_execute(self, *args):
raise OperationalError('2006')
cursor.execute = _fake_execute
self._query_counter += 1
return cursor
def close(self):
self._close_counter += 1
return super(ReconnectMySQLDatabase, self).close()
def _reset_mock(self):
self._close_counter = 0
self._query_counter = 0
@requires_mysql
| ReconnectMySQLDatabase |
python | pyparsing__pyparsing | examples/adventureEngine.py | {
"start": 1038,
"end": 2027
} | class ____:
def __init__(self, desc):
self.desc = desc
self.inv = []
self.gameOver = False
self.doors = [None, None, None, None]
def __getattr__(self, attr):
return {
"n": self.doors[0],
"s": self.doors[1],
"e": self.doors[2],
"w": self.doors[3],
}[attr]
def enter(self, player):
if self.gameOver:
player.gameOver = True
def add_item(self, it):
self.inv.append(it)
def remove_item(self, it):
self.inv.remove(it)
def describe(self):
print(self.desc)
visibleItems = [it for it in self.inv if it.isVisible]
if random.random() > 0.5:
if len(visibleItems) > 1:
is_form = "are"
else:
is_form = "is"
print(f"There {is_form} {enumerate_items(visibleItems)} here.")
else:
print(f"You see {enumerate_items(visibleItems)}.")
| Room |
python | getsentry__sentry | src/sentry/integrations/api/bases/doc_integrations.py | {
"start": 2097,
"end": 2262
} | class ____(StaffPermissionMixin, DocIntegrationsPermission):
"""Allows staff to to access doc integration endpoints."""
pass
| DocIntegrationsAndStaffPermission |
python | pennersr__django-allauth | tests/apps/socialaccount/providers/frontier/tests.py | {
"start": 244,
"end": 2428
} | class ____(OAuth2TestsMixin, TestCase):
provider_id = FrontierProvider.id
def get_mocked_response(self):
return MockedResponse(
HTTPStatus.OK,
"""
{
"email": "johndoe@example.com",
"customer_id": "1234567",
"firstname": "John",
"developer": false,
"lastname": "Doe",
"allowedDownloads": ["FORC-FDEV-D-1010", "FORC-FDEV-D-1012", "COMBAT_TUTORIAL_DEMO", "FORC-FDEV-D-1013", "PUBLIC_TEST_SERVER", "FORC_FDEV_V_ADDER_LRPO", "FORC_FDEV_V_CHALLENGER_LRPO", "FORC_FDEV_V_CHIEFTAIN_LRPO", "FORC_FDEV_V_CRUSADER_LRPO", "FORC_FDEV_V_ANACONDA_LRPO", "FORC_FDEV_V_ASP_LRPO", "FORC_FDEV_V_ASP_SCOUT_LRPO", "FORC_FDEV_V_BELUGA_LRPO", "FORC_FDEV_V_COBRA_MKIII_LRPO", "FORC_FDEV_V_DIAMOND_EXPLORER_LRPO", "FORC_FDEV_V_COBRA_MKIV_LRPO", "FORC_FDEV_V_DIAMOND_SCOUT_LRPO", "FORC_FDEV_V_DOLPHIN_LRPO", "FORC_FDEV_V_EAGLE_LRPO", "FORC_FDEV_V_FEDERAL_ASSAULT_LRPO", "FORC_FDEV_V_FEDERAL_CORVETTE_LRPO", "FORC_FDEV_V_FEDDROP_LRPO", "FORC_FDEV_V_FEDERAL_FIGHTER_LRPO", "FORC_FDEV_V_FEDERAL_GUNSHIP_LRPO", "FORC_FDEV_V_FERDELANCE_LRPO", "FORC_FDEV_V_HAULER_LRPO", "FORC_FDEV_V_CLIPPER_LRPO", "FORC_FDEV_V_IMPERIAL_COURIER_LRPO", "FORC_FDEV_V_IMPERIAL_CUTTER_LRPO", "FORC_FDEV_V_IMPERIAL_EAGLE_LRPO", "FORC_FDEV_V_IMPERIAL_FIGHTER_LRPO", "FORC_FDEV_V_KEELBACK_LRPO", "FORC_FDEV_V_KRAIT_LRPO", "FORC_FDEV_V_KRAIT_LITE_LRPO", "FORC_FDEV_V_MAMBA_LRPO", "FORC_FDEV_V_ORCA_LRPO", "FORC_FDEV_V_PYTHON_LRPO", "FORC_FDEV_V_SIDEWINDER_LRPO", "FORC_FDEV_V_TAIPAN_LRPO", "FORC_FDEV_V_MAMMOTH_LRPO", "FORC_FDEV_V_TYPE6_LRPO", "FORC_FDEV_V_TYPE7_LRPO", "FORC_FDEV_V_TYPE9_LRPO", "FORC_FDEV_V_VIPER_MKIII_LRPO", "FORC_FDEV_V_VIPER_MKIV_LRPO", "FORC_FDEV_V_VULTURE_LRPO", "FORC-FDEV-D-1022", "FORC_FDEV_V_DECAL_1091", "FORC_FDEV_V_DECAL_1100", "FORC_FDEV_V_DECAL_1149", "FORC_FDEV_V_DECAL_1150", "FORC_FDEV_V_DECAL_1151", "FORC_FDEV_V_DECAL_1176", "FORC_FDEV_V_DECAL_1177", "FORC-FDEV-DO-1000", "FORC-FDEV-DO-1003", "FORC-FDEV-DO-1006", "PUBLIC_TEST_SERVER_OD"],
"platform": "frontier"
}""",
)
def get_expected_to_str(self):
return "johndoe@example.com"
| FrontierTests |
python | getsentry__sentry | src/sentry/sentry_apps/api/bases/sentryapps.py | {
"start": 19063,
"end": 20373
} | class ____(SentryPermission):
scope_map = {
"GET": ("org:read", "org:integrations", "org:write", "org:admin"),
# Anyone logged in can increment the stats, so leave the scopes empty
# Note: this only works for session-based auth so you cannot increment stats through API
"POST": (),
}
def has_object_permission(self, request: Request, view, sentry_app: SentryApp | RpcSentryApp):
if not hasattr(request, "user") or not request.user:
return False
owner_app = organization_service.get_organization_by_id(
id=sentry_app.owner_id, user_id=request.user.id
)
if owner_app is None:
logger.error(
"sentry_app_stats.permission_org_not_found",
extra={
"sentry_app_id": sentry_app.id,
"owner_org_id": sentry_app.owner_id,
"user_id": request.user.id,
},
)
return False
self.determine_access(request, owner_app)
if is_active_superuser(request):
return True
assert request.method, "method must be present in request to get permissions"
return ensure_scoped_permission(request, self.scope_map.get(request.method))
| SentryAppStatsPermission |
python | EpistasisLab__tpot | tpot/builtin_modules/arithmetictransformer.py | {
"start": 12798,
"end": 13452
} | class ____(TransformerMixin, BaseEstimator):
def __init__(self):
"""
A transformer that takes the maximum of all elements in a row.
"""
pass
def fit(self, X, y=None):
return self
def transform(self, X):
transformed_X = np.array(self.transform_helper(np.array(X)))
if transformed_X.dtype != float:
transformed_X = transformed_X.astype(float)
return transformed_X
def transform_helper(self, X):
X = np.array(X)
if len(X.shape) == 1:
X = np.expand_dims(X,0)
return np.expand_dims(np.amax(X,1),1)
| MaxTransformer |
python | google__pytype | pytype/pattern_matching.py | {
"start": 9249,
"end": 16775
} | class ____:
"""Track exhaustiveness in pattern matches."""
def __init__(self, ast_matches, ctx):
self.matches = _Matches(ast_matches)
self._option_tracker: dict[int, dict[int, _OptionTracker]] = (
collections.defaultdict(dict)
)
self._match_types: dict[int, set[_MatchTypes]] = collections.defaultdict(
set
)
self._active_ends = set()
self.ctx = ctx
def _get_option_tracker(
self, match_var: cfg.Variable, match_line: int
) -> _OptionTracker:
"""Get the option tracker for a match line."""
if (
match_line not in self._option_tracker
or match_var.id not in self._option_tracker[match_line]
):
self._option_tracker[match_line][match_var.id] = _OptionTracker(
match_var, self.ctx
)
self._active_ends.add(self.matches.start_to_end[match_line])
return self._option_tracker[match_line][match_var.id]
def _make_instance_for_match(self, node, types):
"""Instantiate a type for match case narrowing."""
# This specifically handles the case where we match against an
# AnnotationContainer in MATCH_CLASS, and need to replace it with its base
# class when narrowing the matched variable.
ret = []
for v in types:
cls = v.base_cls if isinstance(v, abstract.AnnotationContainer) else v
if not isinstance(cls, (abstract.Class, abstract.AMBIGUOUS)):
self.ctx.errorlog.bad_class_match(self.ctx.vm.frames, cls)
return self.ctx.new_unsolvable(node)
ret.append(self.ctx.vm.init_class(node, cls))
return self.ctx.join_variables(node, ret)
def _register_case_branch(self, op: opcodes.Opcode) -> int | None:
match_line = self.matches.match_cases.get(op.line)
if match_line is None:
return None
self.matches.register_case(match_line, op.line)
return match_line
def instantiate_case_var(self, op, match_var, node):
match_line = self.matches.match_cases[op.line]
tracker = self._get_option_tracker(match_var, match_line)
if tracker.cases[op.line]:
# We have matched on one or more classes in this case.
types = [x.typ for x in tracker.cases[op.line]]
return self._make_instance_for_match(node, types)
else:
# We have not matched on a type, just bound the current match var to a
# variable.
return tracker.get_narrowed_match_var(node)
def get_current_type_tracker(
self, op: opcodes.Opcode, match_var: cfg.Variable
):
line = self.get_current_match(op)
return self._option_tracker[line].get(match_var.id)
def get_current_type_trackers(self, op: opcodes.Opcode):
line = self.get_current_match(op)
return list(self._option_tracker[line].values())
def get_current_match(self, op: opcodes.Opcode):
match_line = self.matches.match_cases[op.line]
return match_line
def is_current_as_name(self, op: opcodes.Opcode, name: str):
if op.line not in self.matches.match_cases:
return None
return self.matches.as_names.get(op.line) == name
def register_match_type(self, op: opcodes.Opcode):
if op.line not in self.matches.match_cases:
return
match_line = self.matches.match_cases[op.line]
self._match_types[match_line].add(_MatchTypes.make(op))
def add_none_branch(self, op: opcodes.Opcode, match_var: cfg.Variable):
match_line = self._register_case_branch(op)
if not match_line:
return None
tracker = self._get_option_tracker(match_var, match_line)
tracker.cover_from_none(op.line)
if not tracker.is_complete:
return None
else:
# This is the last remaining case, and will always succeed.
return True
def add_cmp_branch(
self,
op: opcodes.OpcodeWithArg,
cmp_type: int,
match_var: cfg.Variable,
case_var: cfg.Variable,
) -> _MatchSuccessType:
"""Add a compare-based match case branch to the tracker."""
match_line = self._register_case_branch(op)
if not match_line:
return None
if cmp_type not in (slots.CMP_EQ, slots.CMP_IS):
return None
match_type = self._match_types[match_line]
try:
case_val = abstract_utils.get_atomic_value(case_var)
except abstract_utils.ConversionError:
return None
# If this is part of a case statement and the match includes class matching,
# check if we need to include the compared value as a type case.
# (We need to do this whether or not the match_var has a concrete value
# because even an ambigious cmp match will require the type to be set within
# the case branch).
op = cast(opcodes.OpcodeWithArg, op)
if op.line not in self.matches.match_cases:
return None
tracker = self.get_current_type_tracker(op, match_var)
# If we are not part of a class match, check if we have an exhaustive match
# (enum or union of literals) that we are tracking.
if not tracker:
if _is_literal_match(match_var) or _is_enum_match(match_var, case_val):
tracker = self._get_option_tracker(match_var, match_line)
# If none of the above apply we cannot do any sort of tracking.
if not tracker:
return None
ret = tracker.cover_from_cmp(op.line, case_var)
if match_type != {_MatchTypes.CMP}:
# We only do exhaustiveness tracking for pure CMP matches
tracker.invalidate()
return None
elif tracker.is_complete:
# This is the last remaining case, and will always succeed.
return True
elif ret:
return None
else:
return False
def add_class_branch(
self, op: opcodes.Opcode, match_var: cfg.Variable, case_var: cfg.Variable
) -> _MatchSuccessType:
"""Add a class-based match case branch to the tracker."""
match_line = self._register_case_branch(op)
if not match_line:
return None
tracker = self._get_option_tracker(match_var, match_line)
tracker.cover(op.line, case_var)
return tracker.is_complete or None
def add_default_branch(self, op: opcodes.Opcode) -> _MatchSuccessType:
"""Add a default match case branch to the tracker."""
match_line = self._register_case_branch(op)
if not match_line or match_line not in self._option_tracker:
return None
for opt in self._option_tracker[match_line].values():
# We no longer check for exhaustive or redundant matches once we hit a
# default case.
opt.invalidate()
return True
def check_ending(
self, op: opcodes.Opcode, implicit_return: bool = False
) -> list[IncompleteMatch]:
"""Check if we have ended a match statement with leftover cases."""
line = op.line
if implicit_return:
done = set()
if line in self.matches.match_cases:
start = self.matches.match_cases[line]
end = self.matches.start_to_end[start]
if end in self._active_ends:
done.add(end)
else:
done = {i for i in self._active_ends if line > i}
ret = []
for i in done:
for start in self.matches.end_to_starts[i]:
if self.matches.unseen_cases[start]:
# We have executed some opcode out of order and thus gone past the end
# of the match block before seeing all case branches.
continue
trackers = self._option_tracker[start]
for tracker in trackers.values():
if tracker.is_valid:
for o in tracker.options:
if not o.is_empty and not o.indefinite:
ret.append(IncompleteMatch(start, o.values))
self._active_ends -= done
return ret
| BranchTracker |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/testing/assertsql.py | {
"start": 12134,
"end": 12801
} | class ____(AssertRule):
def __init__(self, *rules):
self.rules = set(rules)
def process_statement(self, execute_observed):
for rule in list(self.rules):
rule.errormessage = None
rule.process_statement(execute_observed)
if rule.is_consumed:
self.rules.discard(rule)
if not self.rules:
self.is_consumed = True
break
elif not rule.errormessage:
# rule is not done yet
self.errormessage = None
break
else:
self.errormessage = list(self.rules)[0].errormessage
| AllOf |
python | pytorch__pytorch | torch/_functorch/_aot_autograd/graph_capture_wrappers.py | {
"start": 22585,
"end": 62341
} | class ____:
mc_data: int
mc_storage: int
mc_inductor_storage_resized: int
T = TypeVar("T")
def sc_visit(
t, fn: Callable[[Tensor], T], reduce_fn: Callable[[T, T], T], accum_init: T
) -> T:
if not is_traceable_wrapper_subclass(t):
return fn(t)
accum = accum_init
def visit(e):
if not is_traceable_wrapper_subclass(e):
nonlocal accum
accum = reduce_fn(accum, fn(e))
return
for a in e.__tensor_flatten__()[0]:
visit(getattr(e, a))
visit(t)
return accum
def _get_mutation_counter(t) -> int:
return sc_visit(
t,
lambda t: torch._functionalize_mutation_counter(t.elem), # type: ignore[attr-defined]
lambda l, r: max(l, r),
-1,
)
def _get_storage_changed_counter(t) -> int:
return sc_visit(
t,
lambda t: torch._functionalize_storage_changed_counter(t.elem), # type: ignore[attr-defined]
lambda l, r: max(l, r),
-1,
)
def _get_inductor_storage_resized_counter(t) -> int:
return sc_visit(
t,
lambda t: torch._functionalize_inductor_storage_resized_counter(t.elem), # type: ignore[attr-defined]
lambda l, r: max(l, r),
-1,
)
def _get_mutation_counters(t) -> MutationCounters:
return MutationCounters(
_get_mutation_counter(t),
_get_storage_changed_counter(t),
_get_inductor_storage_resized_counter(t),
)
def apply_in_graph_mutations(
input_info,
inpt_old,
inpt_new,
f_inpt,
input_idx,
mcs: Optional[MutationCounters] = None,
applied_mcs: Optional[MutationCounters] = None,
):
assert input_info.mutation_type == MutationType.MUTATED_IN_GRAPH
# See Note [set_() Input Mutations in AOTAutograd]
# all mutations on the input must be under no_grad, so it is safe to put in the graph
# Here, we're saying that if an input experienced a set call, inp.set_(other),
# then we can effectively not have to worry about whether its data was mutated.
# There are 3 cases:
# (1) We mutate inp *after* the set_() call. other is a graph intermediate.
# In this case, we're not really mutating the input storage of "inp";
# we're mutating the storage of an intermdiate value (other),
# and slamming that storage into the input tensor. So no data mutation is necessary.
# (2) We mutate inp *after* the set_() call. other is a graph *input*.
# In this case, the data mutation will be properly handled in the runtime
# epilogue during the processing of "other"
# (3) We mutate inp *before* the set_() call.
# This case is *not* currently handled.
if input_info.mutates_storage_metadata:
if mcs is None or mcs.mc_storage > applied_mcs.mc_storage: # type: ignore[union-attr]
with torch.no_grad():
inpt_old.set_(inpt_new)
# Note [Ordering of resize_() and set_()]
# Importantly: the common usage in FSDP is that we have a dummy parameter
# that sees a set_() and **Then** a resize_().
# We must put those mutations into the graph in the same order,
# Since running them in the opposite order will have different behavior.
# We fully ban resize_() followed by set_() for now, although in principal
# we could support this
if input_info.mutation_inductor_storage_resize:
if (
mcs is None
or mcs.mc_inductor_storage_resized > applied_mcs.mc_inductor_storage_resized # type: ignore[union-attr]
):
# resizing is not supported on subclasses (we error earlier if this happens)
from torch._subclasses.functional_tensor import FunctionalTensor
assert isinstance(f_inpt, FunctionalTensor)
old_storage_size = torch._functionalize_get_storage_size( # type: ignore[attr-defined]
f_inpt.elem, before=True
)
new_storage_size = torch._functionalize_get_storage_size( # type: ignore[attr-defined]
f_inpt.elem, before=False
)
if old_storage_size != new_storage_size:
assert old_storage_size == 0 or new_storage_size == 0, f"""\
Encosize during tracing on input {input_idx}. Old nbytes={old_storage_size}, new nbytes={new_storage_size}
We oresizing on graph inputs as long as the input either starts or ends with a storage size of 0
(thee for FSDP)"""
torch.ops.inductor.resize_storage_bytes_(inpt_old, new_storage_size)
if new_storage_size == 0:
# Even if we marked the input as having a data mutation (thus needing a copy_()),
# We should **ignore** it if our input has no storage
# (this can happen if, e.g. we temporarily resize our input, copy data into it,
# and resize it back down to zero)
return
# Optimization: if the copy_() is a no-op then don't include it in the graph.
# In theory inductor could optimize this away, however in fsdp, we end up with
# param.copy_(param), where param is a zero-storage-size tensor,
# and running this op in eager mode (using the aot_eager backend) will result in a segfault.
# So we may as well optimize it away here.
if inpt_old is inpt_new:
# (This check needs to be done after putting resize_() in the graph,
# since a resize_(0) doesn't actually change the FunctionalTensor's inner tensor)
return
# We found an input that had a (data-only) mutation.
# Since keep_input_mutations is set, we need to faithfully apply a copy_()
# so the compiler will see the input mutation in the graph.
if not input_info.mutates_data:
return
if mcs is not None and mcs.mc_data <= applied_mcs.mc_data: # type: ignore[union-attr]
return
if input_info.mutations_hidden_from_autograd:
# Hidden from autograd = run under no_grad, **and** don't bump VC
# (although if the tensor was created in inference mode, it has no VC)
if inpt_old.is_inference():
maybe_preserve_vc = nullcontext()
else:
maybe_preserve_vc = torch.autograd._unsafe_preserve_version_counter(
inpt_old # type: ignore[assignment]
)
with torch.no_grad(), maybe_preserve_vc:
inpt_old.copy_(inpt_new)
elif input_info.mutations_under_no_grad_or_inference_mode:
# Under no_grad = run under no_grad (we still bump the VC though)
# (inference_mode will also bump the VC, as long as the tensor in question
# was created outside of inference_mode)
with torch.no_grad():
inpt_old.copy_(inpt_new)
else:
inpt_old.copy_(inpt_new)
# This creates the final function that we want to trace using make_fx(),
# in both aot_dispatch_autograd and aot_dispatch_base.
# Preconditions:
# - fn corresponds to the user's fw function
# - fn arguments have been flattened, duplicate arguments have been handled
# - In the returned function, the "primals" arguments *includes* synthetic bases.
# This function does the work of functionalizing the input function,
# and performing copy_() calls at the end of the function if `keep_input_mutations` is set.
# The function returned has signature that is either:
# (1) "traced_fn(primals: List[Any])" if trace_joint is False
# (2) "traced_fn(primals: List[Any], tangents: List[Any])" if trace_joint is True
# Returns a new (functionalized) function, and updated arguments to call it with.
def create_functionalized_fn(
fn,
args,
args_descs,
*,
meta: ViewAndMutationMeta,
aot_config: AOTConfig,
trace_joint: bool,
joint_fn_handle: Optional[JointFnHandle] = None,
) -> Any:
primals_after_forward = None
f_args_after_forward = None
f_args_mutation_counters_after_forward: Optional[list[MutationCounters]] = None
inputs_mutated_in_graph = [
info.mutation_type == MutationType.MUTATED_IN_GRAPH for info in meta.input_info
]
has_input_mutated_in_graph = any(inputs_mutated_in_graph)
@simple_wraps(fn)
def _functionalized_f_helper(
*args: list[FxValue],
) -> tuple[tuple[list[FxValue], list[Tensor]], list[Optional[AOTOutput]]]:
with maybe_enable_thunkify():
# See Note [Disabling Functionalize TLS Above Python Functionalization]
disable_above = torch._C._ExcludeDispatchKeyGuard(
torch._C.DispatchKeySet(torch._C.DispatchKey.Functionalize)
)
with disable_above:
# The functionalization code here can potentially trigger traces
# into the graph, but we'd prefer to NOT do this, because if we
# trace them now, we will end up with FX nodes that don't have
# module stack annotations, which makes unflattener unhappy.
# Wrap inputs into functional wrappers
f_args = pytree.tree_map(to_fun, args)
if trace_joint and has_input_mutated_in_graph and joint_fn_handle:
# TODO(ivankobzarev): Support fw and bw mutations for subclasses
def _post_forward(primals):
nonlocal primals_after_forward
primals_after_forward = pytree.tree_map(from_fun, primals)
nonlocal f_args_after_forward
f_args_after_forward = f_args[0]
nonlocal f_args_mutation_counters_after_forward
f_args_mutation_counters_after_forward = [
MutationCounters(-1, -1, -1)
if not inputs_mutated_in_graph[i]
else _get_mutation_counters(f_arg)
for i, f_arg in enumerate(f_args_after_forward)
]
joint_fn_handle.post_forward = _post_forward
# Run the joint
f_outs, f_outs_descs = call_and_expect_output_descs(fn, f_args)
if trace_joint:
# We support a limited amount of mutation of graph inputs during the backward pass.
# (This is used e.g. by Float8, which needs to update buffers during the backward pass)
# Here, we perform extra checks for primals that were mutated in the **backward**
# We're doing the checks here instead of doing them with the rest of the input mutation handling because:
# - We need to detect inputs that were mutated in the backward **separately** from mutations that happened
# during the forward, because the handling is different: some input mutations from the forward
# can be only handled in a fw-only runtime epilogue, and in theory if we wanted to handle those same
# types of mutations in the backward we would need a bw-only runtime epilogue.
# - We could in theory have our analysis pass differentiate mutations in the fw from mutations in
# the bw by running our analysis first on the fw-only graph, and then on the joint graph. This would
# require an extra round of tracing though, so it's more efficient to do in-line here.
assert (
isinstance(args, tuple)
and len(args) == 2
and isinstance(args[0], (list, tuple))
)
# Only look at mutations that happened to forward inputs (e.g. fw buffers that were saved for bw)
primals_before = args[0]
primals_after = pytree.tree_map(from_fun, f_args[0])
for idx, (f_inpt, before, after, inpt_info) in enumerate(
zip(f_args[0], primals_before, primals_after, meta.input_info)
):
# Store information about mutations in joint(for backward analysis)
joint_mutates_data = has_data_mutation(f_inpt)
joint_mutates_metadata = has_metadata_mutation(
f_inpt, before, check_only_storage_mutation=False
)
# Ban metadata mutations on fw inputs during the bw
if not inpt_info.mutates_metadata:
assert not joint_mutates_metadata, (
"Found a graph input that had its metadata mutated in the backward. This is not supported"
)
# Ban storage resizing on fw inputs during the bw
if not inpt_info.mutation_inductor_storage_resize:
assert not was_inductor_storage_resized(f_inpt), (
"Found a graph input that had storage resizing in the backward. This is not supported"
)
# Allow data mutations on fw inputs during the bw, but only if they do not require grad
# So we can guarantee that we can keep the mutations in the graph
if (
joint_mutates_data
and not inpt_info.mutates_data
and not inpt_info.mutates_storage_metadata
):
# Not banning here mutations on inpt_info.requires_grad -
# we'll check at runtime and fail only when backward is under torch.is_grad_enabled (create_graph)
# Add node meta for copy_ for partitioner that this node should be in backward graph.
with (
torch.fx.traceback.preserve_node_meta(),
set_partitioner_tag_must_be_in_backward(),
):
# before and after should be tensors if we're calling copy_ on them
assert isinstance(before, torch.Tensor) and isinstance(
after, torch.Tensor
)
before.copy_(after)
meta.indices_of_inputs_that_requires_grad_with_mutations_in_bw.append(
idx
)
# Now that we covered mutations to *forward* inputs during the backward,
# we also need to cover mutations to *backward-only* inputs during the backward (e.g. mutation to a grad_out).
# Today, we will just error in all cases of this happening unless someone needs us to support it.
tangents_before = args[1]
tangents_after = pytree.tree_map(from_fun, f_args[1])
for f_inpt, before, after in zip(
f_args[1], tangents_before, tangents_after
):
assert not has_metadata_mutation(
f_inpt, before, check_only_storage_mutation=False
), (
"Found an input to the backward that had metadata mutated during the backward pass. This is not supported"
)
if has_data_mutation(f_inpt):
can_be_in_graph = _check_if_mutation_can_be_in_graph(
keep_input_mutations=True,
mutates_data=True,
mutates_metadata=False,
mutations_hidden_from_autograd=are_all_mutations_hidden_from_autograd(
f_inpt
),
mutations_under_no_grad_or_inference_mode=are_all_mutations_under_no_grad_or_inference_mode(
f_inpt
),
mutates_storage_metadata=False,
mutation_inductor_storage_resize=was_inductor_storage_resized(
f_inpt
),
requires_grad=f_inpt.requires_grad,
)
assert can_be_in_graph, (
"a backward input that had data mutated in an autograd-aware way. This is not supported"
)
# Perform the input mutation
with torch.fx.traceback.preserve_node_meta():
# before and after should be tensors if we're calling copy_ on them
assert isinstance(before, torch.Tensor) and isinstance(
after, torch.Tensor
)
before.copy_(after)
if aot_config.keep_inference_input_mutations:
# Note: This is a bit annoying. There's a layering issue here, where:
# (1) functionalization needs to operate on **synthetic base** inputs, before unpacking them into the "real" inputs.
# (2) For keep_input_mutations, we support tracing a call to copy_() directly on mutated inputs.
# However, we **only** want to support this for inputs that have data-only (and no metadata) mutations,
# because inductor (and backends in generally) would prefer not to see these (e.g. as_strided_(), resize_()).
# This makes it pretty difficult for this logic to operate on synthetic bases.
# (3) In addition, there are cases where it's significantly cheaper to perform the copy on the individual
# (unpacked) input aliases, instead of the synthetic base.
# Example case where (3) could be important:
#
# def f(x, y):
# x.mul_(2)
# y.mul_(3)
# return x, y
# a = torch.ones(1'000'000)
# x, y = out(a[0:9], a[1:10])
#
# It would be much better to add copy_() calls into the graph for the two tiny slices, instead of materializing
# a giant "updated synthetic base" and copying into a's entire storage.
#
# For now, we are pessimistically not performing the optimization from (3);
# we will materialize an "updated" synthetic base, and copy it back to the synthetic input base.
# This allows us to factor aot autograd much more nicely, since only one area of the code needs to worry
# about synthetic bases.
# Apply in graph forward mutations only in joint case.
# Note: Mutations of primals in forward AND backward.
# If we have mutations of the same input in forward and in backward,
# we can not fuse them into one copy_ node. As in this case partitioner will put it
# either in forward or in backward. This will lead to incorrect state
# after forward and before backward.
# We have to emit two copy_ nodes, marking with additional meta each node,
# if it must be in forward or backward.
# We memorize mutation counter of the inputs after forward.
# Based on this after joint graph we check if backward also mutated input or not.
# We emit copy_ only in the end of joint tracing, to provide invariant for joint
# graph passes, that our graph is functional, except only some number of copy_ nodes
# in the end.
mcs_applied: list[MutationCounters] = [MutationCounters(0, 0, 0)] * len(
meta.input_info
)
if f_args_mutation_counters_after_forward is not None:
primals_before = args[0]
for idx, (f_inpt, before, after, inpt_info) in enumerate(
zip(
f_args_after_forward, # type: ignore[arg-type]
primals_before, # type: ignore[arg-type]
primals_after_forward, # type: ignore[arg-type]
meta.input_info,
)
):
if inpt_info.mutation_type != MutationType.MUTATED_IN_GRAPH:
continue
mcs_after_forward = f_args_mutation_counters_after_forward[idx]
with (
torch.fx.traceback.preserve_node_meta(),
set_partitioner_tag_must_be_in_forward(),
_proxy_tensor_disable_update_tensor_tracker(),
):
apply_in_graph_mutations(
inpt_info,
before,
after,
f_inpt,
idx,
mcs_after_forward,
mcs_applied[idx],
)
mcs_applied[idx] = mcs_after_forward
for idx, (inpt_old, f_inpt) in enumerate(
zip(args, f_args) if not trace_joint else zip(args[0], f_args[0]) # type: ignore[arg-type]
):
if not isinstance(f_inpt, torch.Tensor):
continue
assert is_fun(f_inpt)
inpt_new = from_fun(f_inpt)
if (
meta.input_info[idx].mutation_type
!= MutationType.MUTATED_IN_GRAPH
):
continue
mcs: Optional[MutationCounters] = None
if f_args_mutation_counters_after_forward is not None:
# This could happen for subclasses tracing
# Subclasses support for mutations in fw and bw is TBD.
mcs = _get_mutation_counters(f_inpt)
if mcs == mcs_applied[idx]:
# No mutation in backward; mutation was already applied.
continue
with (
torch.fx.traceback.preserve_node_meta(),
set_partitioner_tag_must_be_in_backward(),
):
apply_in_graph_mutations(
meta.input_info[idx],
inpt_old,
inpt_new,
f_inpt,
idx,
mcs,
mcs_applied[idx],
)
# When an output tensor is a functionalized mutated input, and we
# were able to move the mutation in to the graph then we can return
# the mutated input directly. This prevents duplicating the
# tensors contents.
flat_outs, outs_spec = pytree.tree_flatten(f_outs)
flat_outs = [from_fun(o) for o in flat_outs]
num_outs = len(meta.output_info)
for i in range(num_outs):
info = meta.output_info[i]
if info.output_type != OutputType.is_input:
continue
assert info.base_idx is not None
if (
meta.input_info[info.base_idx].mutation_type
== MutationType.MUTATED_IN_GRAPH
):
fw_args = args[0] if trace_joint else args
flat_outs[i] = fw_args[info.base_idx]
return pytree.tree_unflatten(flat_outs, outs_spec), f_outs_descs
return pytree.tree_map(from_fun, f_outs), f_outs_descs
# Kinda annoying, but needed to make sure that the fx graph we trace out has "primals"
# and "tangents" as its input names (which are special-cased by the partitioner)
# TODO (tmanlaibaatar) revisit this if we ever need to turn on non-strict joint graph export
def joint_helper(primals, tangents):
return _functionalized_f_helper(primals, tangents)
helper = joint_helper if trace_joint else _functionalized_f_helper
if config.functionalize_rng_ops:
# Setup the wrapper for functionalization of rng ops
helper, args, args_descs = create_functionalized_rng_ops_wrapper(
helper, args, args_descs, trace_joint
)
return helper, args, args_descs
def handle_effect_tokens_fn(
fn,
args,
args_descs: list[AOTInput],
*,
meta: ViewAndMutationMeta,
trace_joint: bool,
) -> Any:
num_tokens = len(meta.tokens)
@simple_wraps(fn)
def inner_fn(*args):
# See Note [Disabling Functionalize TLS Above Python Functionalization]
disable_above = torch._C._ExcludeDispatchKeyGuard(
torch._C.DispatchKeySet(torch._C.DispatchKey.Functionalize)
)
with disable_above:
# See Note [Side-Effectful Tokens in AOTAutograd]
if trace_joint:
assert isinstance(args, tuple) and isinstance(args[0], (list, tuple))
tokens = args[0][:num_tokens]
assert all(token.numel() == 0 for token in tokens)
args = (args[0][num_tokens:], *args[1:])
else:
tokens = args[:num_tokens]
assert all(token.numel() == 0 for token in tokens)
args = args[num_tokens:]
# Populate the current FunctionalTensorMode with the tokens per
# operator. See Note [FunctionalTensorMode is Stateful]
functional_tensor_mode = torch.utils._python_dispatch._detect_infra_mode(
torch._C._TorchDispatchModeKey.FUNCTIONAL
)
assert functional_tensor_mode is not None
f_tokens = pytree.tree_map(to_fun, tokens)
for i, k in enumerate(meta.tokens.keys()):
functional_tensor_mode._tokens[k] = f_tokens[i]
# Run the joint
outs, outs_descs = call_and_expect_output_descs(fn, args)
# Return both the tokens and the outputs
# See Note [Side-Effectful Tokens in AOTAutograd]
if trace_joint:
assert len(outs) == 2
assert len(functional_tensor_mode._tokens_forward_output) == num_tokens
fwd_out_tokens = functional_tensor_mode._tokens_forward_output.values()
bwd_out_tokens = functional_tensor_mode._tokens.values()
f_fwd_out_tokens = [from_fun(t) for t in fwd_out_tokens]
f_bwd_out_tokens = [from_fun(t) for t in bwd_out_tokens]
f_fwd_out_tokens_descs = [
ForwardTokenAOTOutput(i) for i in range(len(fwd_out_tokens))
]
f_bwd_out_tokens_descs = [
BackwardTokenAOTOutput(i) for i in range(len(bwd_out_tokens))
]
meta.num_backward_tokens = len(bwd_out_tokens)
return (
((*f_fwd_out_tokens, *outs[0]), (*outs[1], *f_bwd_out_tokens)),
(
(*f_fwd_out_tokens_descs, *outs_descs[0]),
(*outs_descs[1], *f_bwd_out_tokens_descs),
),
)
out_tokens = [from_fun(t) for t in functional_tensor_mode._tokens.values()]
# TODO: can probably do a little more resolution here
out_tokens_descs = [
ForwardTokenAOTOutput(i)
for i in range(len(functional_tensor_mode._tokens.values()))
]
return ((*out_tokens, *outs), (*out_tokens_descs, *outs_descs))
# Additionally pass in tokens as inputs
# See Note [Side-Effectful Tokens in AOTAutograd]
additional_fwd_token_inputs = [torch.tensor([])] * num_tokens
additional_fwd_token_inputs_descs = [
ForwardTokenAOTInput(i) for i in range(num_tokens)
]
if trace_joint:
args = ([*additional_fwd_token_inputs, *args[0]], *args[1:])
args_descs = ( # type: ignore[assignment]
[*additional_fwd_token_inputs_descs, *args_descs[0]], # type: ignore[misc]
*args_descs[1:],
)
else:
args = [*additional_fwd_token_inputs, *args]
args_descs = [*additional_fwd_token_inputs_descs, *args_descs]
return inner_fn, args, args_descs
# Given a function operating on Subclass -> Subclass, returns an function that operates on Tensor -> Tensor
# Also returns:
# - the new set of arguments to pass into this function (now that tensor subclasses have been eliminated)
# - the updated ViewAndMutationMeta for this dense -> dense function.
# The other important arguments are:
# - flat_fn_maybe_joint: when is_joint_structure=True, this is the joint fw-bw function.
# when is_joint_structure=False, this is just the forward function.
# - fw_only: this is *always* the forward-only function.
# Why do we need this? We need to collect updated ViewAndMutationMeta on our new dense -> dense functions.
# In particular, we need this to tell the partitioner how many dense forward outputs there are.
def aot_dispatch_subclass(
flat_fn_maybe_joint: Union[JointTraceFn, TraceFn],
args: Union[list[FxValue], tuple[list[FxValue], list[FxValue]]],
args_descs: Union[list[AOTInput], tuple[list[AOTInput], list[AOTInput]]],
*,
is_joint_structure: bool,
meta: ViewAndMutationMeta,
fw_only: Callable,
) -> SubclassTracingInfo:
# Skip logic if we don't need to trace through any subclasses
req_subclass_dispatch = requires_subclass_dispatch(args, meta)
if not req_subclass_dispatch:
return SubclassTracingInfo(
plain_tensor_trace_fn=flat_fn_maybe_joint,
plain_tensor_args=args,
plain_tensor_args_descs=args_descs,
maybe_subclass_meta=None,
)
# TODO: add subclass guards (later PR).
# What's going on here? We need to compute subclass metadata about the outputs of the joint (grad_inputs).
# Annoying: we don't know the grad input metas until we're in the middle of tracing the joint,
# so we set it later, while we're tracing the joint (see inner_fn() below).
# Another option would be to run our run_functionalized_fw_and_collect_metadata() function
# directly on the joint, but this would hurt compile time (adding yet another pass through the joint).
subclass_meta = SubclassMeta()
# NB: doesn't take descs, this is going from the NEW flat_args to the
# subclasses, we don't need to do bookkeeping here
def inner_fn(fn, args, *, use_trace_joint: bool):
# Step 1: wrap tensor inputs into subclasses if necessary
all_args = wrap_tensor_subclasses_maybe_joint(
args, is_joint_structure=use_trace_joint, meta=meta
)
# Step 2: call the inner function, with our (maybe subclass) inputs
wrapped_outs, wrapped_outs_descs = call_and_expect_output_descs(fn, all_args)
if use_trace_joint:
# See Note: [Computing Subclass Metadata about grad_inputs]
# We also stash subclass info on our grad_inputs, if we're tracing the joint.
nonlocal subclass_meta
assert isinstance(wrapped_outs, tuple) and len(wrapped_outs) == 2, (
wrapped_outs,
wrapped_outs_descs,
)
# Don't need fw outs since we already have subclass metadata on them
grad_inputs = wrapped_outs[1]
subclass_meta.grad_input_metas = create_subclass_meta(grad_inputs)
# Add extra symints as outputs to the forward/backward graphs
# ignore nested ints here
forward_outs, forward_outs_descs = unwrap_tensor_subclasses(
wrapped_outs[0], wrapped_outs_descs[0], append_symints=True
)
# ignore nested ints here
backward_outs, backward_outs_descs = unwrap_tensor_subclasses(
wrapped_outs[1], wrapped_outs_descs[1], append_symints=True
)
return (
(forward_outs, backward_outs),
(forward_outs_descs, backward_outs_descs),
)
# Step 3: Unwrap any subclass outputs back into dense tensors
return unwrap_tensor_subclasses(
wrapped_outs, wrapped_outs_descs, append_symints=True
)
def joint_fn(
primals: list[FxValue], tangents: list[FxValue]
) -> tuple[
tuple[list[FxValue], list[FxValue]], tuple[list[AOTOutput], list[AOTOutput]]
]:
with maybe_enable_thunkify():
return inner_fn(
flat_fn_maybe_joint, (primals, tangents), use_trace_joint=True
)
def fw_fn(*primals: FxValue) -> tuple[list[FxValue], list[AOTOutput]]:
with maybe_enable_thunkify():
return inner_fn(flat_fn_maybe_joint, primals, use_trace_joint=False)
def metadata_fn(*primals: FxValue) -> tuple[list[FxValue], list[AOTOutput]]:
@simple_wraps(fw_only)
def inner_fw_only(*args):
return call_and_expect_output_descs(fw_only, args)
return inner_fn(inner_fw_only, primals, use_trace_joint=False)
if is_joint_structure:
# Add extra symints (size/strides) as input to the forward graph
primals_unwrapped_pair = unwrap_tensor_subclasses(
args[0], # type: ignore[arg-type]
args_descs[0], # type: ignore[arg-type]
append_symints=True,
)
# We pass append_symints=False here because the partitioner will
# capture and add any extra argument
tangents_unwrapped_pair = unwrap_tensor_subclasses(
args[1], # type: ignore[arg-type]
args_descs[1], # type: ignore[arg-type]
append_symints=False,
)
args_unwrapped = (primals_unwrapped_pair[0], tangents_unwrapped_pair[0])
args_descs_unwrapped = (primals_unwrapped_pair[1], tangents_unwrapped_pair[1])
remapped_static_indices = remap_unwrapped_subclass_arg_indices(
args[0], meta.static_input_indices
)
else:
args_unwrapped, args_descs_unwrapped = unwrap_tensor_subclasses( # type: ignore[assignment]
args, # type: ignore[arg-type]
args_descs, # type: ignore[arg-type]
append_symints=True,
)
remapped_static_indices = remap_unwrapped_subclass_arg_indices(
args, meta.static_input_indices
)
if is_joint_structure:
primals_unwrapped = args_unwrapped[0] # type: ignore[assignment]
primals_unwrapped_descs = args_descs_unwrapped[0] # type: ignore[assignment]
fn_to_trace = joint_fn # type: ignore[assignment]
else:
primals_unwrapped = args_unwrapped # type: ignore[assignment]
primals_unwrapped_descs = args_descs_unwrapped # type: ignore[assignment]
fn_to_trace = fw_fn # type: ignore[assignment]
# Note: [Partitioner handling for Subclasses, Part 1]
# The way the partitioner works is that:
# (1) we pass is a single graph containing the joint fw/bw,
# where the # of graph outputs corresponds to # fw_outputs + # grad_inputs
# (2) The partitioner accepts an arguments, num_fwd_outputs,
# and assumes that the first "num_fwd_outputs" graph outputs correspond
# to outputs of the forward graph.
# How do tensor subclasses enter the picture?
# the num_fwd_outputs in the final graph is actually non-trivial to compute,
# because it can be influenced by input mutations and intermediate bases.
# So we compute it by inspecting the current ViewAndMutationMeta object.
# However, the original ViewAndMutationMeta that we computed was created
# on the subclass -> subclass graph,
# which can have a different number of outputs than the dense -> dense graph.
# That's why we created a fresh metadata object on the dense -> dense function here,
# and plumb it back up to the partitioner.
# See Note: [Partitioner handling for Subclasses, Part 2] for more info.
meta_updated = run_functionalized_fw_and_collect_metadata(
without_output_descs(metadata_fn),
# pyrefly: ignore [bad-argument-type]
flat_args_descs=primals_unwrapped_descs,
static_input_indices=remapped_static_indices,
keep_input_mutations=meta.keep_input_mutations,
is_train=meta.is_train,
# pyrefly: ignore [not-iterable]
)(*primals_unwrapped)
subclass_meta.fw_metadata = meta_updated
return SubclassTracingInfo(
plain_tensor_trace_fn=fn_to_trace,
plain_tensor_args=args_unwrapped,
plain_tensor_args_descs=args_descs_unwrapped,
maybe_subclass_meta=subclass_meta,
)
def create_functional_call(
mod, params_spec, params_len, store_orig_mod=False, strict_out_tuple=True
):
# Redundant with dynamo, but worth having in case this gets invoked elsewhere.
# https://github.com/pytorch/pytorch/issues/103569
@simple_wraps(mod)
def functional_call(*args, **kwargs):
flat_params = args[:params_len]
if isinstance(params_spec, TreeSpec):
params = pytree.tree_unflatten(flat_params, params_spec)
else:
assert isinstance(params_spec, list)
params = dict(zip(params_spec, flat_params))
with (
stateless._reparametrize_module(mod, params),
maybe_disable_thunkify(),
):
if isinstance(mod, torch.fx.GraphModule):
if kwargs:
# Handle **kwargs. FX only natively supports positional
# arguments (through placeholders).
arg_list = list(args[params_len:])
arg_list.extend(list(kwargs.values()))
args = tuple(arg_list)
else:
args = args[params_len:]
with fx_traceback.preserve_node_meta(), warnings.catch_warnings():
warnings.filterwarnings(
"ignore", "Anomaly Detection has been enabled."
)
with torch.autograd.detect_anomaly(check_nan=False):
fake_mode = detect_fake_mode()
assert fake_mode is not None
fake_mode.epoch += 1
out = PropagateUnbackedSymInts(mod).run(*args)
else:
out = mod(*args[params_len:], **kwargs)
if strict_out_tuple and not isinstance(out, (tuple, list)):
raise RuntimeError(
"Graph output must be a (). This is so that we can avoid "
"pytree processing of the outputs. Please change the module to "
"have tuple outputs or use aot_module instead."
)
return out
# Note [Preserving the nn module stack metadata during export non-strict mode]
# This path is currently only used by the non-strict export flow,
# where we cannot rely on dynamo to preserve nn stack metadata in our captured graph.
# Instead, we stash the original user nn module here, and rely on `make_fx` to grab
# this stashed module and use it to track nn module stack metadata
if store_orig_mod and not hasattr(functional_call, "_orig_mod"):
functional_call._orig_mod = mod # type: ignore[attr-defined]
return functional_call
| MutationCounters |
python | Pylons__pyramid | tests/test_urldispatch.py | {
"start": 24979,
"end": 25013
} | class ____:
""" """
| DummyContext |
python | jina-ai__jina | jina/excepts.py | {
"start": 508,
"end": 622
} | class ____(Exception, BaseJinaException):
"""Flow exception when the topology is ambiguous."""
| FlowTopologyError |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeVar6.py | {
"start": 606,
"end": 2099
} | class ____(Generic[_T1]):
async def func1(self, a: _T1) -> _T1:
_ = a.var1
# This should generate an error.
_ = a.var2
_ = a(3)
# This should generate an error.
_ = a(3.3)
# This should generate an error.
_ = a[0]
# This should generate an error.
_ = a + 1
# This should generate an error.
_ = -a
# This should generate an error.
a += 3
# This should generate an error.
_ = await a
# This should generate an error.
for _ in a:
pass
a.do_stuff()
# This should generate an error.
a.do_other_stuff()
_ = a.__class__
_ = a.__doc__
return a
async def func2(self, a: _T2) -> _T2:
_ = a.var1
# This should generate an error.
_ = a.var2
_ = a(3)
# This should generate an error.
_ = a(3.3)
# This should generate two errors.
_ = a[0]
# This should generate an error.
_ = a + 1
# This should generate an error.
_ = -a
# This should generate an error.
a += 3
# This should generate an error.
_ = await a
# This should generate an error.
for _ in a:
pass
a.do_stuff()
# This should generate an error.
a.do_other_stuff()
_ = a.__class__
_ = a.__doc__
return a
| ClassA |
python | sphinx-doc__sphinx | sphinx/transforms/post_transforms/images.py | {
"start": 5833,
"end": 10845
} | class ____(BaseImageConverter):
"""A base class for image converters.
An image converter is kind of Docutils transform module. It is used to
convert image files which are not supported by a builder to the
appropriate format for that builder.
For example, :py:class:`LaTeX builder <.LaTeXBuilder>` supports PDF,
PNG and JPEG as image formats. However it does not support SVG images.
For such case, using image converters allows to embed these
unsupported images into the document. One of the image converters;
:ref:`sphinx.ext.imgconverter <sphinx.ext.imgconverter>` can convert
a SVG image to PNG format using Imagemagick internally.
There are three steps to make your custom image converter:
1. Make a subclass of ``ImageConverter`` class
2. Override ``conversion_rules``, ``is_available()`` and ``convert()``
3. Register your image converter to Sphinx using
:py:meth:`.Sphinx.add_post_transform`
"""
default_priority = 200
#: The converter is available or not. Will be filled at the first call of
#: the build. The result is shared in the same process.
#:
#: .. todo:: This should be refactored not to store the state without class
#: variable.
available: bool | None = None
#: A conversion rules the image converter supports.
#: It is represented as a list of pair of source image format (mimetype) and
#: destination one::
#:
#: conversion_rules = [
#: ('image/svg+xml', 'image/png'),
#: ('image/gif', 'image/png'),
#: ('application/pdf', 'image/png'),
#: ]
conversion_rules: list[tuple[str, str]] = []
def match(self, node: nodes.image) -> bool:
if not self.env._builder_cls.supported_image_types:
return False
if '?' in node['candidates']:
return False
node_mime_types = set(self.guess_mimetypes(node))
supported_image_types = set(self.env._builder_cls.supported_image_types)
if node_mime_types & supported_image_types:
# builder supports the image; no need to convert
return False
if self.available is None:
# store the value to the class variable to share it during the build
self.__class__.available = self.is_available()
if not self.available:
return False
else:
try:
self.get_conversion_rule(node)
except ValueError:
return False
else:
return True
def get_conversion_rule(self, node: nodes.image) -> tuple[str, str]:
for candidate in self.guess_mimetypes(node):
for supported in self.env._builder_cls.supported_image_types:
rule = (candidate, supported)
if rule in self.conversion_rules:
return rule
msg = 'No conversion rule found'
raise ValueError(msg)
def is_available(self) -> bool:
"""Return the image converter is available or not."""
raise NotImplementedError
def guess_mimetypes(self, node: nodes.image) -> list[str]:
# The special key ? is set for nonlocal URIs.
if '?' in node['candidates']:
return []
elif '*' in node['candidates']:
path = self.env.srcdir / node['uri']
guessed = guess_mimetype(path)
return [guessed] if guessed is not None else []
else:
return node['candidates'].keys()
def handle(self, node: nodes.image) -> None:
_from, _to = self.get_conversion_rule(node)
if _from in node['candidates']:
srcpath = node['candidates'][_from]
else:
srcpath = node['candidates']['*']
filename = self.env.images[srcpath][1]
filename = get_filename_for(filename, _to)
ensuredir(self.imagedir)
destpath = self.imagedir / filename
abs_srcpath = self.env.srcdir / srcpath
if self.convert(abs_srcpath, destpath):
if '*' in node['candidates']:
node['candidates']['*'] = str(destpath)
else:
node['candidates'][_to] = str(destpath)
node['uri'] = str(destpath)
self.env.original_image_uri[destpath] = srcpath
self.env.images.add_file(self.env.current_document.docname, destpath)
def convert(
self, _from: str | os.PathLike[str], _to: str | os.PathLike[str]
) -> bool:
"""Convert an image file to the expected format.
*_from* is a path of the source image file, and *_to* is a path
of the destination file.
"""
raise NotImplementedError
def setup(app: Sphinx) -> ExtensionMetadata:
app.add_post_transform(ImageDownloader)
app.add_post_transform(DataURIExtractor)
return {
'version': 'builtin',
'parallel_read_safe': True,
'parallel_write_safe': True,
}
| ImageConverter |
python | langchain-ai__langchain | libs/partners/anthropic/langchain_anthropic/chat_models.py | {
"start": 22155,
"end": 102564
} | class ____(BaseChatModel):
"""Anthropic (Claude) chat models.
See the [Claude Platform docs](https://platform.claude.com/docs/en/about-claude/models/overview)
for a list of the latest models, their capabilities, and pricing.
Setup:
Install `langchain-anthropic` and set environment variable `ANTHROPIC_API_KEY`.
```bash
pip install -U langchain-anthropic
export ANTHROPIC_API_KEY="your-api-key"
```
Key init args:
**Completion params:**
* [`model`][langchain_anthropic.chat_models.ChatAnthropic.model]: Name of
Anthropic model to use. e.g. `'claude-sonnet-4-5-20250929'`.
* [`temperature`][langchain_anthropic.chat_models.ChatAnthropic.temperature]:
Sampling temperature. Ranges from `0.0` to `1.0`.
* [`max_tokens`][langchain_anthropic.chat_models.ChatAnthropic.max_tokens]: Max
number of tokens to generate.
**Client params:**
* [`timeout`][langchain_anthropic.chat_models.ChatAnthropic.default_request_timeout]:
Timeout for requests.
* [`anthropic_proxy`][langchain_anthropic.chat_models.ChatAnthropic.anthropic_proxy]:
Proxy to use for the Anthropic clients, will be used for every API call.
If not passed in will be read from env var `ANTHROPIC_PROXY`.
* [`max_retries`][langchain_anthropic.chat_models.ChatAnthropic.max_retries]:
Max number of retries if a request fails.
* [`api_key`][langchain_anthropic.chat_models.ChatAnthropic.anthropic_api_key]:
Anthropic API key. If not passed in will be read from env var
`ANTHROPIC_API_KEY`.
* [`base_url`][langchain_anthropic.chat_models.ChatAnthropic.anthropic_api_url]:
Base URL for API requests. Only specify if using a proxy or service emulator.
See full list of supported init args and their descriptions below.
???+ example "Instantiate"
```python
from langchain_anthropic import ChatAnthropic
model = ChatAnthropic(
model="claude-sonnet-4-5-20250929",
temperature=0,
max_tokens=1024,
timeout=None,
max_retries=2,
# api_key="...",
# base_url="...",
# other params...
)
```
???+ note
Any param which is not explicitly supported will be passed directly to
`Anthropic.messages.create(...)` each time to the model is invoked.
!!! example
```python
from langchain_anthropic import ChatAnthropic
import anthropic
ChatAnthropic(..., extra_headers={}).invoke(...)
# Results in underlying API call of:
anthropic.Anthropic(..).messages.create(..., extra_headers={})
# ... which is also equivalent to:
ChatAnthropic(...).invoke(..., extra_headers={})
```
???+ example "Invoke"
```python
messages = [
(
"system",
"You are a helpful translator. Translate the user sentence to French.",
),
(
"human",
"I love programming.",
),
]
model.invoke(messages)
```
```python
AIMessage(
content="J'aime la programmation.",
response_metadata={
"id": "msg_01Trik66aiQ9Z1higrD5XFx3",
"model": "claude-sonnet-4-5-20250929",
"stop_reason": "end_turn",
"stop_sequence": None,
"usage": {"input_tokens": 25, "output_tokens": 11},
},
id="run-5886ac5f-3c2e-49f5-8a44-b1e92808c929-0",
usage_metadata={
"input_tokens": 25,
"output_tokens": 11,
"total_tokens": 36,
},
)
```
???+ example "Stream"
```python
for chunk in model.stream(messages):
print(chunk.text, end="")
```
```python
AIMessageChunk(content="J", id="run-272ff5f9-8485-402c-b90d-eac8babc5b25")
AIMessageChunk(content="'", id="run-272ff5f9-8485-402c-b90d-eac8babc5b25")
AIMessageChunk(content="a", id="run-272ff5f9-8485-402c-b90d-eac8babc5b25")
AIMessageChunk(content="ime", id="run-272ff5f9-8485-402c-b90d-eac8babc5b25")
AIMessageChunk(content=" la", id="run-272ff5f9-8485-402c-b90d-eac8babc5b25")
AIMessageChunk(content=" programm", id="run-272ff5f9-8485-402c-b90d-eac8babc5b25")
AIMessageChunk(content="ation", id="run-272ff5f9-8485-402c-b90d-eac8babc5b25")
AIMessageChunk(content=".", id="run-272ff5f9-8485-402c-b90d-eac8babc5b25")
```
To aggregate the full message from the stream:
```python
stream = model.stream(messages)
full = next(stream)
for chunk in stream:
full += chunk
full
```
```python
AIMessageChunk(content="J'aime la programmation.", id="run-b34faef0-882f-4869-a19c-ed2b856e6361")
```
???+ example "Async invocation"
```python
await model.ainvoke(messages)
# stream:
# async for chunk in (await model.astream(messages))
# batch:
# await model.abatch([messages])
```
```python
AIMessage(
content="J'aime la programmation.",
response_metadata={
"id": "msg_01Trik66aiQ9Z1higrD5XFx3",
"model": "claude-sonnet-4-5-20250929",
"stop_reason": "end_turn",
"stop_sequence": None,
"usage": {"input_tokens": 25, "output_tokens": 11},
},
id="run-5886ac5f-3c2e-49f5-8a44-b1e92808c929-0",
usage_metadata={
"input_tokens": 25,
"output_tokens": 11,
"total_tokens": 36,
},
)
```
???+ example "Tool calling"
```python hl_lines="16"
from pydantic import BaseModel, Field
class GetWeather(BaseModel):
'''Get the current weather in a given location'''
location: str = Field(..., description="The city and state, e.g. San Francisco, CA")
class GetPopulation(BaseModel):
'''Get the current population in a given location'''
location: str = Field(..., description="The city and state, e.g. San Francisco, CA")
model_with_tools = model.bind_tools([GetWeather, GetPopulation])
ai_msg = model_with_tools.invoke("Which city is hotter today and which is bigger: LA or NY?")
ai_msg.tool_calls
```
```python
[
{
"name": "GetWeather",
"args": {"location": "Los Angeles, CA"},
"id": "toolu_01KzpPEAgzura7hpBqwHbWdo",
},
{
"name": "GetWeather",
"args": {"location": "New York, NY"},
"id": "toolu_01JtgbVGVJbiSwtZk3Uycezx",
},
{
"name": "GetPopulation",
"args": {"location": "Los Angeles, CA"},
"id": "toolu_01429aygngesudV9nTbCKGuw",
},
{
"name": "GetPopulation",
"args": {"location": "New York, NY"},
"id": "toolu_01JPktyd44tVMeBcPPnFSEJG",
},
]
```
See [`ChatAnthropic.bind_tools()`][langchain_anthropic.chat_models.ChatAnthropic.bind_tools]
for more info.
!!! note "Strict tool use"
Anthropic supports a strict tool use feature that guarantees tool names
and arguments are validated and correctly typed.
See [`ChatAnthropic.bind_tools()`][langchain_anthropic.chat_models.ChatAnthropic.bind_tools]
for more info.
???+ example "Token-efficient tool use (beta)"
See LangChain [docs](https://docs.langchain.com/oss/python/integrations/chat/anthropic#token-efficient-tool-use)
for more detail.
```python hl_lines="9"
from langchain_anthropic import ChatAnthropic
from langchain_core.tools import tool
model = ChatAnthropic(
model="claude-sonnet-4-5-20250929",
temperature=0,
model_kwargs={
"extra_headers": {
"anthropic-beta": "token-efficient-tools-2025-02-19"
}
}
)
@tool
def get_weather(location: str) -> str:
\"\"\"Get the weather at a location.\"\"\"
return "It's sunny."
model_with_tools = model.bind_tools([get_weather])
response = model_with_tools.invoke(
"What's the weather in San Francisco?"
)
print(response.tool_calls)
print(f'Total tokens: {response.usage_metadata["total_tokens"]}')
```
```txt
[{'name': 'get_weather', 'args': {'location': 'San Francisco'}, 'id': 'toolu_01HLjQMSb1nWmgevQUtEyz17', 'type': 'tool_call'}]
Total tokens: 408
```
???+ example "Image input"
See the [multimodal guide](https://docs.langchain.com/oss/python/langchain/models#multimodal)
for more detail.
```python
import base64
import httpx
from langchain_anthropic import ChatAnthropic
from langchain_core.messages import HumanMessage
image_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg"
image_data = base64.b64encode(httpx.get(image_url).content).decode("utf-8")
model = ChatAnthropic(model="claude-sonnet-4-5-20250929")
message = HumanMessage(
content=[
{
"type": "text",
"text": "Can you highlight the differences between these two images?",
},
{
"type": "image",
"base64": image_data,
"mime_type": "image/jpeg",
},
{
"type": "image",
"url": image_url,
},
],
)
ai_msg = model.invoke([message])
ai_msg.content
```
```python
"After examining both images carefully, I can see that they are actually identical."
```
??? example "Upload with Files API"
You can also pass in files that are managed through Anthropic's
[Files API](https://platform.claude.com/docs/en/build-with-claude/files):
```python
from langchain_anthropic import ChatAnthropic
model = ChatAnthropic(
model="claude-sonnet-4-5-20250929",
betas=["files-api-2025-04-14"],
)
input_message = {
"role": "user",
"content": [
{
"type": "text",
"text": "Describe this document.",
},
{
"type": "image",
"id": "file_abc123...",
},
],
}
model.invoke([input_message])
```
???+ example "PDF input"
See the [multimodal guide](https://docs.langchain.com/oss/python/langchain/models#multimodal)
for more detail.
```python
from base64 import b64encode
from langchain_anthropic import ChatAnthropic
from langchain_core.messages import HumanMessage
import requests
url = "https://www.w3.org/WAI/ER/tests/xhtml/testfiles/resources/pdf/dummy.pdf"
data = b64encode(requests.get(url).content).decode()
model = ChatAnthropic(model="claude-sonnet-4-5-20250929")
ai_msg = model.invoke(
[
HumanMessage(
[
"Summarize this document.",
{
"type": "file",
"mime_type": "application/pdf",
"base64": data,
},
]
)
]
)
ai_msg.content
```
```python
"This appears to be a simple document..."
```
??? example "Upload with Files API"
You can also pass in files that are managed through Anthropic's
[Files API](https://platform.claude.com/docs/en/build-with-claude/files):
```python
from langchain_anthropic import ChatAnthropic
model = ChatAnthropic(
model="claude-sonnet-4-5-20250929",
betas=["files-api-2025-04-14"],
)
input_message = {
"role": "user",
"content": [
{
"type": "text",
"text": "Describe this document.",
},
{
"type": "file",
"id": "file_abc123...",
},
],
}
model.invoke([input_message])
```
???+ example "Extended thinking"
Certain [Claude models](https://platform.claude.com/docs/en/build-with-claude/extended-thinking#supported-models)
support an [extended thinking](https://platform.claude.com/docs/en/build-with-claude/extended-thinking)
feature, which will output the step-by-step reasoning process that led to its
final answer.
To use it, specify the `thinking` parameter when initializing `ChatAnthropic`.
It can also be passed in as a kwarg during invocation.
**You will need to specify a token budget** to use this feature.
!!! example
```python hl_lines="5-6"
from langchain_anthropic import ChatAnthropic
model = ChatAnthropic(
model="claude-sonnet-4-5-20250929",
max_tokens=5000,
thinking={"type": "enabled", "budget_tokens": 2000},
)
response = model.invoke("What is the cube root of 50.653?")
response.content
```
```python
[
{
"signature": "...",
"thinking": "To find the cube root of 50.653...",
"type": "thinking",
},
{"text": "The cube root of 50.653 is ...", "type": "text"},
]
```
!!! warning "Differences in thinking across model versions"
The Claude Messages API handles thinking differently across Claude Sonnet
3.7 and Claude 4 models.
Refer to the [Claude docs](https://platform.claude.com/docs/en/build-with-claude/extended-thinking#differences-in-thinking-across-model-versions)
for more info.
???+ example "Prompt caching"
Prompt caching reduces processing time and costs for repetitive tasks or prompts
with consistent elements
!!! note
Only certain models support prompt caching.
See the [Claude documentation](https://platform.claude.com/docs/en/build-with-claude/prompt-caching#supported-models)
for a full list.
```python hl_lines="16"
from langchain_anthropic import ChatAnthropic
model = ChatAnthropic(model="claude-sonnet-4-5-20250929")
messages = [
{
"role": "system",
"content": [
{
"type": "text",
"text": "Below is some long context:",
},
{
"type": "text",
"text": f"{long_text}",
"cache_control": {"type": "ephemeral"},
},
],
},
{
"role": "user",
"content": "What's that about?",
},
]
response = model.invoke(messages)
response.usage_metadata["input_token_details"]
```
```python
{"cache_read": 0, "cache_creation": 1458}
```
Alternatively, you may enable prompt caching at invocation time. You may want to
conditionally cache based on runtime conditions, such as the length of the
context. This is useful for app-level decisions about what to
cache.
```python hl_lines="3"
response = model.invoke(
messages,
cache_control={"type": "ephemeral"},
)
```
??? example "Extended caching"
The cache lifetime is 5 minutes by default. If this is too short, you can
apply one hour caching by setting `ttl` to `'1h'`.
```python hl_lines="12"
model = ChatAnthropic(
model="claude-sonnet-4-5-20250929",
)
messages = [
{
"role": "user",
"content": [
{
"type": "text",
"text": f"{long_text}",
"cache_control": {"type": "ephemeral", "ttl": "1h"},
},
],
}
]
response = model.invoke(messages)
```
Details of cached token counts will be included on the `InputTokenDetails`
of response's `usage_metadata`:
```python
response = model.invoke(messages)
response.usage_metadata
```
```python
{
"input_tokens": 1500,
"output_tokens": 200,
"total_tokens": 1700,
"input_token_details": {
"cache_read": 0,
"cache_creation": 1000,
"ephemeral_1h_input_tokens": 750,
"ephemeral_5m_input_tokens": 250,
},
}
```
See [Claude documentation](https://platform.claude.com/docs/en/build-with-claude/prompt-caching#1-hour-cache-duration-beta)
for detail.
???+ example "Token usage metadata"
```python
ai_msg = model.invoke(messages)
ai_msg.usage_metadata
```
```python
{"input_tokens": 25, "output_tokens": 11, "total_tokens": 36}
```
Message chunks containing token usage will be included during streaming by
default:
```python
stream = model.stream(messages)
full = next(stream)
for chunk in stream:
full += chunk
full.usage_metadata
```
```python
{"input_tokens": 25, "output_tokens": 11, "total_tokens": 36}
```
These can be disabled by setting [`stream_usage=False`][langchain_anthropic.chat_models.ChatAnthropic.stream_usage]
in the stream method or when initializing `ChatAnthropic`.
???+ example "Citations"
Anthropic supports a [citations](https://platform.claude.com/docs/en/build-with-claude/citations)
feature that lets Claude attach context to its answers based on source
documents supplied by the user.
When passing a [Claude document content block](https://platform.claude.com/docs/en/build-with-claude/citations#document-types)
with `#!json "citations": {"enabled": True}` included in the query, Claude may
generate citations in its response.
```python hl_lines="9-19"
from langchain_anthropic import ChatAnthropic
model = ChatAnthropic(model="claude-3-5-haiku-20241022")
messages = [
{
"role": "user",
"content": [
{
"type": "document",
"source": {
"type": "text",
"media_type": "text/plain",
"data": "The grass is green. The sky is blue.",
},
"title": "My Document",
"context": "This is a trustworthy document.",
"citations": {"enabled": True},
},
{"type": "text", "text": "What color is the grass and sky?"},
],
}
]
response = model.invoke(messages)
response.content
```
```python hl_lines="6-15 21-30"
[
{"text": "Based on the document, ", "type": "text"},
{
"text": "the grass is green",
"type": "text",
"citations": [
{
"type": "char_location",
"cited_text": "The grass is green. ",
"document_index": 0,
"document_title": "My Document",
"start_char_index": 0,
"end_char_index": 20,
}
],
},
{"text": ", and ", "type": "text"},
{
"text": "the sky is blue",
"type": "text",
"citations": [
{
"type": "char_location",
"cited_text": "The sky is blue.",
"document_index": 0,
"document_title": "My Document",
"start_char_index": 20,
"end_char_index": 36,
}
],
},
{"text": ".", "type": "text"},
]
```
???+ example "Context management"
Anthropic supports a context editing feature that will automatically manage the
model's context window (e.g., by clearing tool results).
See [Anthropic documentation](https://platform.claude.com/docs/en/build-with-claude/context-editing)
for details and configuration options.
```python hl_lines="5-6"
from langchain_anthropic import ChatAnthropic
model = ChatAnthropic(
model="claude-sonnet-4-5-20250929",
betas=["context-management-2025-06-27"],
context_management={"edits": [{"type": "clear_tool_uses_20250919"}]},
)
model_with_tools = model.bind_tools([{"type": "web_search_20250305", "name": "web_search"}])
response = model_with_tools.invoke("Search for recent developments in AI")
```
???+ example "Response metadata"
```python
ai_msg = model.invoke(messages)
ai_msg.response_metadata
```
```python
{
"id": "msg_013xU6FHEGEq76aP4RgFerVT",
"model": "claude-sonnet-4-5-20250929",
"stop_reason": "end_turn",
"stop_sequence": None,
"usage": {"input_tokens": 25, "output_tokens": 11},
}
```
???+ example "Extended context windows (beta)"
Claude Sonnet 4 supports a 1-million token context window, available in beta for
organizations in usage tier 4 and organizations with custom rate limits.
```python hl_lines="5"
from langchain_anthropic import ChatAnthropic
model = ChatAnthropic(
model="claude-sonnet-4-5-20250929",
betas=["context-1m-2025-08-07"], # Enable 1M context beta
)
long_document = \"\"\"
This is a very long document that would benefit from the extended 1M
context window...
[imagine this continues for hundreds of thousands of tokens]
\"\"\"
messages = [
HumanMessage(f\"\"\"
Please analyze this document and provide a summary:
{long_document}
What are the key themes and main conclusions?
\"\"\")
]
response = model.invoke(messages)
```
See [Claude documentation](https://platform.claude.com/docs/en/build-with-claude/context-windows#1m-token-context-window)
for detail.
???+ example "Structured output"
```python hl_lines="13"
from typing import Optional
from pydantic import BaseModel, Field
class Joke(BaseModel):
'''Joke to tell user.'''
setup: str = Field(description="The setup of the joke")
punchline: str = Field(description="The punchline to the joke")
rating: int | None = Field(description="How funny the joke is, from 1 to 10")
structured_model = model.with_structured_output(Joke)
structured_model.invoke("Tell me a joke about cats")
```
```python
Joke(
setup="Why was the cat sitting on the computer?",
punchline="To keep an eye on the mouse!",
rating=None,
)
```
See [`ChatAnthropic.with_structured_output()`][langchain_anthropic.chat_models.ChatAnthropic.with_structured_output]
for more info.
!!! note "Native structured output"
Anthropic supports a native structured output feature that guarantees
responses adhere to a given schema.
See [`ChatAnthropic.with_structured_output()`][langchain_anthropic.chat_models.ChatAnthropic.with_structured_output]
for more info.
???+ example "Built-in tools"
See LangChain [docs](https://docs.langchain.com/oss/python/integrations/chat/anthropic#built-in-tools)
for more detail.
??? example "Web search"
```python hl_lines="5-9"
from langchain_anthropic import ChatAnthropic
model = ChatAnthropic(model="claude-3-5-haiku-20241022")
tool = {
"type": "web_search_20250305",
"name": "web_search",
"max_uses": 3,
}
model_with_tools = model.bind_tools([tool])
response = model_with_tools.invoke("How do I update a web app to TypeScript 5.5?")
```
See the [Claude docs](https://platform.claude.com/docs/en/agents-and-tools/tool-use/web-search-tool)
for more info.
??? example "Web fetch (beta)"
```python hl_lines="5 8-12"
from langchain_anthropic import ChatAnthropic
model = ChatAnthropic(
model="claude-3-5-haiku-20241022",
betas=["web-fetch-2025-09-10"], # Enable web fetch beta
)
tool = {
"type": "web_fetch_20250910",
"name": "web_fetch",
"max_uses": 3,
}
model_with_tools = model.bind_tools([tool])
response = model_with_tools.invoke("Please analyze the content at https://example.com/article")
```
See the [Claude docs](https://platform.claude.com/docs/en/agents-and-tools/tool-use/web-fetch-tool)
for more info.
??? example "Code execution"
```python hl_lines="3 6-9"
model = ChatAnthropic(
model="claude-sonnet-4-5-20250929",
betas=["code-execution-2025-05-22"], # Enable code execution beta
)
tool = {
"type": "code_execution_20250522",
"name": "code_execution",
}
model_with_tools = model.bind_tools([tool])
response = model_with_tools.invoke(
"Calculate the mean and standard deviation of [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]"
)
```
See the [Claude docs](https://platform.claude.com/docs/en/agents-and-tools/tool-use/code-execution-tool)
for more info.
??? example "Memory tool"
```python hl_lines="5 8-11"
from langchain_anthropic import ChatAnthropic
model = ChatAnthropic(
model="claude-sonnet-4-5-20250929",
betas=["context-management-2025-06-27"], # Enable context management beta
)
tool = {
"type": "memory_20250818",
"name": "memory",
}
model_with_tools = model.bind_tools([tool])
response = model_with_tools.invoke("What are my interests?")
```
See the [Claude docs](https://platform.claude.com/docs/en/agents-and-tools/tool-use/memory-tool)
for more info.
??? example "Remote MCP"
```python hl_lines="3-14 18-19"
from langchain_anthropic import ChatAnthropic
mcp_servers = [
{
"type": "url",
"url": "https://mcp.deepwiki.com/mcp",
"name": "deepwiki",
"tool_configuration": { # optional configuration
"enabled": True,
"allowed_tools": ["ask_question"],
},
"authorization_token": "PLACEHOLDER", # optional authorization
}
]
model = ChatAnthropic(
model="claude-sonnet-4-5-20250929",
betas=["mcp-client-2025-04-04"], # Enable MCP client beta
mcp_servers=mcp_servers, # Pass in MCP server configurations
)
response = model.invoke(
"What transport protocols does the 2025-03-26 version of the MCP "
"spec (modelcontextprotocol/modelcontextprotocol) support?"
)
```
See the [Claude docs](https://platform.claude.com/docs/en/agents-and-tools/mcp-connector)
for more info.
??? example "Text editor"
```python hl_lines="5-8"
from langchain_anthropic import ChatAnthropic
model = ChatAnthropic(model="claude-sonnet-4-5-20250929")
tool = {
"type": "text_editor_20250124",
"name": "str_replace_editor",
}
model_with_tools = model.bind_tools([tool])
response = model_with_tools.invoke(
"There's a syntax error in my primes.py file. Can you help me fix it?"
)
print(response.text)
response.tool_calls
```
```txt
I'd be happy to help you fix the syntax error in your primes.py file. First, let's look at the current content of the file to identify the error.
```
```txt
[{'name': 'str_replace_editor',
'args': {'command': 'view', 'path': '/repo/primes.py'},
'id': 'toolu_01VdNgt1YV7kGfj9LFLm6HyQ',
'type': 'tool_call'}]
```
See the [Claude docs](https://platform.claude.com/docs/en/agents-and-tools/tool-use/text-editor-tool)
for more info.
""" # noqa: E501
model_config = ConfigDict(
populate_by_name=True,
)
model: str = Field(alias="model_name")
"""Model name to use."""
max_tokens: int | None = Field(default=None, alias="max_tokens_to_sample")
"""Denotes the number of tokens to predict per generation."""
temperature: float | None = None
"""A non-negative float that tunes the degree of randomness in generation."""
top_k: int | None = None
"""Number of most likely tokens to consider at each step."""
top_p: float | None = None
"""Total probability mass of tokens to consider at each step."""
default_request_timeout: float | None = Field(None, alias="timeout")
"""Timeout for requests to Claude API."""
# sdk default = 2: https://github.com/anthropics/anthropic-sdk-python?tab=readme-ov-file#retries
max_retries: int = 2
"""Number of retries allowed for requests sent to the Claude API."""
stop_sequences: list[str] | None = Field(None, alias="stop")
"""Default stop sequences."""
anthropic_api_url: str | None = Field(
alias="base_url",
default_factory=from_env(
["ANTHROPIC_API_URL", "ANTHROPIC_BASE_URL"],
default="https://api.anthropic.com",
),
)
"""Base URL for API requests. Only specify if using a proxy or service emulator.
If a value isn't passed in, will attempt to read the value first from
`ANTHROPIC_API_URL` and if that is not set, `ANTHROPIC_BASE_URL`.
"""
anthropic_api_key: SecretStr = Field(
alias="api_key",
default_factory=secret_from_env("ANTHROPIC_API_KEY", default=""),
)
"""Automatically read from env var `ANTHROPIC_API_KEY` if not provided."""
anthropic_proxy: str | None = Field(
default_factory=from_env("ANTHROPIC_PROXY", default=None)
)
"""Proxy to use for the Anthropic clients, will be used for every API call.
If not provided, will attempt to read from the `ANTHROPIC_PROXY` environment
variable.
"""
default_headers: Mapping[str, str] | None = None
"""Headers to pass to the Anthropic clients, will be used for every API call."""
betas: list[str] | None = None
"""List of beta features to enable. If specified, invocations will be routed
through `client.beta.messages.create`.
Example: `#!python betas=["mcp-client-2025-04-04"]`
"""
model_kwargs: dict[str, Any] = Field(default_factory=dict)
streaming: bool = False
"""Whether to use streaming or not."""
stream_usage: bool = True
"""Whether to include usage metadata in streaming output.
If `True`, additional message chunks will be generated during the stream including
usage metadata.
"""
thinking: dict[str, Any] | None = Field(default=None)
"""Parameters for Claude reasoning,
e.g., `#!python {"type": "enabled", "budget_tokens": 10_000}`
"""
mcp_servers: list[dict[str, Any]] | None = None
"""List of MCP servers to use for the request.
Example: `#!python mcp_servers=[{"type": "url", "url": "https://mcp.example.com/mcp",
"name": "example-mcp"}]`
!!! note
This feature requires the beta header `'mcp-client-2025-11-20'` to be set in
[`betas`][langchain_anthropic.chat_models.ChatAnthropic.betas].
"""
context_management: dict[str, Any] | None = None
"""Configuration for
[context management](https://platform.claude.com/docs/en/build-with-claude/context-editing).
!!! note
This feature requires the beta header `'context-management-2025-06-27'` to be
set in [`betas`][langchain_anthropic.chat_models.ChatAnthropic.betas].
"""
@property
def _llm_type(self) -> str:
"""Return type of chat model."""
return "anthropic-chat"
@property
def lc_secrets(self) -> dict[str, str]:
"""Return a mapping of secret keys to environment variables."""
return {
"anthropic_api_key": "ANTHROPIC_API_KEY",
"mcp_servers": "ANTHROPIC_MCP_SERVERS",
}
@classmethod
def is_lc_serializable(cls) -> bool:
"""Whether the class is serializable in langchain."""
return True
@classmethod
def get_lc_namespace(cls) -> list[str]:
"""Get the namespace of the LangChain object.
Returns:
`["langchain", "chat_models", "anthropic"]`
"""
return ["langchain", "chat_models", "anthropic"]
@property
def _identifying_params(self) -> dict[str, Any]:
"""Get the identifying parameters."""
return {
"model": self.model,
"max_tokens": self.max_tokens,
"temperature": self.temperature,
"top_k": self.top_k,
"top_p": self.top_p,
"model_kwargs": self.model_kwargs,
"streaming": self.streaming,
"max_retries": self.max_retries,
"default_request_timeout": self.default_request_timeout,
"thinking": self.thinking,
}
def _get_ls_params(
self,
stop: list[str] | None = None,
**kwargs: Any,
) -> LangSmithParams:
"""Get standard params for tracing."""
params = self._get_invocation_params(stop=stop, **kwargs)
ls_params = LangSmithParams(
ls_provider="anthropic",
ls_model_name=params.get("model", self.model),
ls_model_type="chat",
ls_temperature=params.get("temperature", self.temperature),
)
if ls_max_tokens := params.get("max_tokens", self.max_tokens):
ls_params["ls_max_tokens"] = ls_max_tokens
if ls_stop := stop or params.get("stop", None):
ls_params["ls_stop"] = ls_stop
return ls_params
@model_validator(mode="before")
@classmethod
def set_default_max_tokens(cls, values: dict[str, Any]) -> Any:
"""Set default `max_tokens`."""
if values.get("max_tokens") is None:
model = values.get("model") or values.get("model_name")
values["max_tokens"] = _default_max_tokens_for(model)
return values
@model_validator(mode="before")
@classmethod
def build_extra(cls, values: dict) -> Any:
"""Build model kwargs."""
all_required_field_names = get_pydantic_field_names(cls)
return _build_model_kwargs(values, all_required_field_names)
@model_validator(mode="after")
def _set_model_profile(self) -> Self:
"""Set model profile if not overridden."""
if self.profile is None:
self.profile = _get_default_model_profile(self.model)
return self
@cached_property
def _client_params(self) -> dict[str, Any]:
client_params: dict[str, Any] = {
"api_key": self.anthropic_api_key.get_secret_value(),
"base_url": self.anthropic_api_url,
"max_retries": self.max_retries,
"default_headers": (self.default_headers or None),
}
# value <= 0 indicates the param should be ignored. None is a meaningful value
# for Anthropic client and treated differently than not specifying the param at
# all.
if self.default_request_timeout is None or self.default_request_timeout > 0:
client_params["timeout"] = self.default_request_timeout
return client_params
@cached_property
def _client(self) -> anthropic.Client:
client_params = self._client_params
http_client_params = {"base_url": client_params["base_url"]}
if "timeout" in client_params:
http_client_params["timeout"] = client_params["timeout"]
if self.anthropic_proxy:
http_client_params["anthropic_proxy"] = self.anthropic_proxy
http_client = _get_default_httpx_client(**http_client_params)
params = {
**client_params,
"http_client": http_client,
}
return anthropic.Client(**params)
@cached_property
def _async_client(self) -> anthropic.AsyncClient:
client_params = self._client_params
http_client_params = {"base_url": client_params["base_url"]}
if "timeout" in client_params:
http_client_params["timeout"] = client_params["timeout"]
if self.anthropic_proxy:
http_client_params["anthropic_proxy"] = self.anthropic_proxy
http_client = _get_default_async_httpx_client(**http_client_params)
params = {
**client_params,
"http_client": http_client,
}
return anthropic.AsyncClient(**params)
def _get_request_payload(
self,
input_: LanguageModelInput,
*,
stop: list[str] | None = None,
**kwargs: dict,
) -> dict:
"""Get the request payload for the Anthropic API."""
messages = self._convert_input(input_).to_messages()
for idx, message in enumerate(messages):
# Translate v1 content
if (
isinstance(message, AIMessage)
and message.response_metadata.get("output_version") == "v1"
):
tcs: list[types.ToolCall] = [
{
"type": "tool_call",
"name": tool_call["name"],
"args": tool_call["args"],
"id": tool_call.get("id"),
}
for tool_call in message.tool_calls
]
messages[idx] = message.model_copy(
update={
"content": _convert_from_v1_to_anthropic(
cast(list[types.ContentBlock], message.content),
tcs,
message.response_metadata.get("model_provider"),
)
}
)
system, formatted_messages = _format_messages(messages)
# If cache_control is provided in kwargs, add it to last message
# and content block.
if "cache_control" in kwargs and formatted_messages:
if isinstance(formatted_messages[-1]["content"], list):
formatted_messages[-1]["content"][-1]["cache_control"] = kwargs.pop(
"cache_control"
)
elif isinstance(formatted_messages[-1]["content"], str):
formatted_messages[-1]["content"] = [
{
"type": "text",
"text": formatted_messages[-1]["content"],
"cache_control": kwargs.pop("cache_control"),
}
]
else:
pass
# If cache_control remains in kwargs, it would be passed as a top-level param
# to the API, but Anthropic expects it nested within a message
_ = kwargs.pop("cache_control", None)
payload = {
"model": self.model,
"max_tokens": self.max_tokens,
"messages": formatted_messages,
"temperature": self.temperature,
"top_k": self.top_k,
"top_p": self.top_p,
"stop_sequences": stop or self.stop_sequences,
"betas": self.betas,
"context_management": self.context_management,
"mcp_servers": self.mcp_servers,
"system": system,
**self.model_kwargs,
**kwargs,
}
if self.thinking is not None:
payload["thinking"] = self.thinking
if "response_format" in payload:
response_format = payload.pop("response_format")
if (
isinstance(response_format, dict)
and response_format.get("type") == "json_schema"
and "schema" in response_format.get("json_schema", {})
):
# compat with langchain.agents.create_agent response_format, which is
# an approximation of OpenAI format
response_format = cast(dict, response_format["json_schema"]["schema"])
payload["output_format"] = _convert_to_anthropic_output_format(
response_format
)
if "output_format" in payload and not payload["betas"]:
payload["betas"] = ["structured-outputs-2025-11-13"]
return {k: v for k, v in payload.items() if v is not None}
def _create(self, payload: dict) -> Any:
if "betas" in payload:
return self._client.beta.messages.create(**payload)
return self._client.messages.create(**payload)
async def _acreate(self, payload: dict) -> Any:
if "betas" in payload:
return await self._async_client.beta.messages.create(**payload)
return await self._async_client.messages.create(**payload)
def _stream(
self,
messages: list[BaseMessage],
stop: list[str] | None = None,
run_manager: CallbackManagerForLLMRun | None = None,
*,
stream_usage: bool | None = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
if stream_usage is None:
stream_usage = self.stream_usage
kwargs["stream"] = True
payload = self._get_request_payload(messages, stop=stop, **kwargs)
try:
stream = self._create(payload)
coerce_content_to_string = (
not _tools_in_params(payload)
and not _documents_in_params(payload)
and not _thinking_in_params(payload)
)
block_start_event = None
for event in stream:
msg, block_start_event = _make_message_chunk_from_anthropic_event(
event,
stream_usage=stream_usage,
coerce_content_to_string=coerce_content_to_string,
block_start_event=block_start_event,
)
if msg is not None:
chunk = ChatGenerationChunk(message=msg)
if run_manager and isinstance(msg.content, str):
run_manager.on_llm_new_token(msg.content, chunk=chunk)
yield chunk
except anthropic.BadRequestError as e:
_handle_anthropic_bad_request(e)
async def _astream(
self,
messages: list[BaseMessage],
stop: list[str] | None = None,
run_manager: AsyncCallbackManagerForLLMRun | None = None,
*,
stream_usage: bool | None = None,
**kwargs: Any,
) -> AsyncIterator[ChatGenerationChunk]:
if stream_usage is None:
stream_usage = self.stream_usage
kwargs["stream"] = True
payload = self._get_request_payload(messages, stop=stop, **kwargs)
try:
stream = await self._acreate(payload)
coerce_content_to_string = (
not _tools_in_params(payload)
and not _documents_in_params(payload)
and not _thinking_in_params(payload)
)
block_start_event = None
async for event in stream:
msg, block_start_event = _make_message_chunk_from_anthropic_event(
event,
stream_usage=stream_usage,
coerce_content_to_string=coerce_content_to_string,
block_start_event=block_start_event,
)
if msg is not None:
chunk = ChatGenerationChunk(message=msg)
if run_manager and isinstance(msg.content, str):
await run_manager.on_llm_new_token(msg.content, chunk=chunk)
yield chunk
except anthropic.BadRequestError as e:
_handle_anthropic_bad_request(e)
def _format_output(self, data: Any, **kwargs: Any) -> ChatResult:
"""Format the output from the Anthropic API to LC."""
data_dict = data.model_dump()
content = data_dict["content"]
# Remove citations if they are None - introduced in anthropic sdk 0.45
for block in content:
if (
isinstance(block, dict)
and "citations" in block
and block["citations"] is None
):
block.pop("citations")
if (
isinstance(block, dict)
and block.get("type") == "thinking"
and "text" in block
and block["text"] is None
):
block.pop("text")
llm_output = {
k: v for k, v in data_dict.items() if k not in ("content", "role", "type")
}
response_metadata = {"model_provider": "anthropic"}
if "model" in llm_output and "model_name" not in llm_output:
llm_output["model_name"] = llm_output["model"]
if (
len(content) == 1
and content[0]["type"] == "text"
and not content[0].get("citations")
):
msg = AIMessage(
content=content[0]["text"], response_metadata=response_metadata
)
elif any(block["type"] == "tool_use" for block in content):
tool_calls = extract_tool_calls(content)
msg = AIMessage(
content=content,
tool_calls=tool_calls,
response_metadata=response_metadata,
)
else:
msg = AIMessage(content=content, response_metadata=response_metadata)
msg.usage_metadata = _create_usage_metadata(data.usage)
return ChatResult(
generations=[ChatGeneration(message=msg)],
llm_output=llm_output,
)
def _generate(
self,
messages: list[BaseMessage],
stop: list[str] | None = None,
run_manager: CallbackManagerForLLMRun | None = None,
**kwargs: Any,
) -> ChatResult:
payload = self._get_request_payload(messages, stop=stop, **kwargs)
try:
data = self._create(payload)
except anthropic.BadRequestError as e:
_handle_anthropic_bad_request(e)
return self._format_output(data, **kwargs)
async def _agenerate(
self,
messages: list[BaseMessage],
stop: list[str] | None = None,
run_manager: AsyncCallbackManagerForLLMRun | None = None,
**kwargs: Any,
) -> ChatResult:
payload = self._get_request_payload(messages, stop=stop, **kwargs)
try:
data = await self._acreate(payload)
except anthropic.BadRequestError as e:
_handle_anthropic_bad_request(e)
return self._format_output(data, **kwargs)
def _get_llm_for_structured_output_when_thinking_is_enabled(
self,
schema: dict | type,
formatted_tool: AnthropicTool,
) -> Runnable[LanguageModelInput, BaseMessage]:
thinking_admonition = (
"Anthropic structured output relies on forced tool calling, "
"which is not supported when `thinking` is enabled. This method will raise "
"langchain_core.exceptions.OutputParserException if tool calls are not "
"generated. Consider disabling `thinking` or adjust your prompt to ensure "
"the tool is called."
)
warnings.warn(thinking_admonition, stacklevel=2)
llm = self.bind_tools(
[schema],
ls_structured_output_format={
"kwargs": {"method": "function_calling"},
"schema": formatted_tool,
},
)
def _raise_if_no_tool_calls(message: AIMessage) -> AIMessage:
if not message.tool_calls:
raise OutputParserException(thinking_admonition)
return message
return llm | _raise_if_no_tool_calls
def bind_tools(
self,
tools: Sequence[dict[str, Any] | type | Callable | BaseTool],
*,
tool_choice: dict[str, str] | str | None = None,
parallel_tool_calls: bool | None = None,
strict: bool | None = None,
**kwargs: Any,
) -> Runnable[LanguageModelInput, AIMessage]:
r"""Bind tool-like objects to this chat model.
Args:
tools: A list of tool definitions to bind to this chat model.
Supports Anthropic format tool schemas and any tool definition handled
by `langchain_core.utils.function_calling.convert_to_openai_tool`.
tool_choice: Which tool to require the model to call. Options are:
- Name of the tool as a string or as dict `{"type": "tool", "name": "<<tool_name>>"}`: calls corresponding tool
- `'auto'`, `{"type: "auto"}`, or `None`: automatically selects a tool (including no tool)
- `'any'` or `{"type: "any"}`: force at least one tool to be called
parallel_tool_calls: Set to `False` to disable parallel tool use.
Defaults to `None` (no specification, which allows parallel tool use).
!!! version-added "Added in `langchain-anthropic` 0.3.2"
strict: If `True`, Claude's schema adherence is applied to tool calls.
See the [Claude docs](https://platform.claude.com/docs/en/build-with-claude/structured-outputs#when-to-use-json-outputs-vs-strict-tool-use).
kwargs: Any additional parameters are passed directly to `bind`.
???+ example
```python
from langchain_anthropic import ChatAnthropic
from pydantic import BaseModel, Field
class GetWeather(BaseModel):
'''Get the current weather in a given location'''
location: str = Field(..., description="The city and state, e.g. San Francisco, CA")
class GetPrice(BaseModel):
'''Get the price of a specific product.'''
product: str = Field(..., description="The product to look up.")
model = ChatAnthropic(model="claude-sonnet-4-5-20250929", temperature=0)
model_with_tools = model.bind_tools([GetWeather, GetPrice])
model_with_tools.invoke(
"What is the weather like in San Francisco",
)
# -> AIMessage(
# content=[
# {'text': '<thinking>\nBased on the user\'s question, the relevant function to call is GetWeather, which requires the "location" parameter.\n\nThe user has directly specified the location as "San Francisco". Since San Francisco is a well known city, I can reasonably infer they mean San Francisco, CA without needing the state specified.\n\nAll the required parameters are provided, so I can proceed with the API call.\n</thinking>', 'type': 'text'},
# {'text': None, 'type': 'tool_use', 'id': 'toolu_01SCgExKzQ7eqSkMHfygvYuu', 'name': 'GetWeather', 'input': {'location': 'San Francisco, CA'}}
# ],
# response_metadata={'id': 'msg_01GM3zQtoFv8jGQMW7abLnhi', 'model': 'claude-sonnet-4-5-20250929', 'stop_reason': 'tool_use', 'stop_sequence': None, 'usage': {'input_tokens': 487, 'output_tokens': 145}},
# id='run-87b1331e-9251-4a68-acef-f0a018b639cc-0'
# )
```
??? example "Force tool call with tool_choice `'any'`"
```python
from langchain_anthropic import ChatAnthropic
from pydantic import BaseModel, Field
class GetWeather(BaseModel):
'''Get the current weather in a given location'''
location: str = Field(..., description="The city and state, e.g. San Francisco, CA")
class GetPrice(BaseModel):
'''Get the price of a specific product.'''
product: str = Field(..., description="The product to look up.")
model = ChatAnthropic(model="claude-sonnet-4-5-20250929", temperature=0)
model_with_tools = model.bind_tools([GetWeather, GetPrice], tool_choice="any")
model_with_tools.invoke(
"what is the weather like in San Francisco",
)
```
??? example "Force specific tool call with `tool_choice` `'<name_of_tool>'`"
```python
from langchain_anthropic import ChatAnthropic
from pydantic import BaseModel, Field
class GetWeather(BaseModel):
'''Get the current weather in a given location'''
location: str = Field(..., description="The city and state, e.g. San Francisco, CA")
class GetPrice(BaseModel):
'''Get the price of a specific product.'''
product: str = Field(..., description="The product to look up.")
model = ChatAnthropic(model="claude-sonnet-4-5-20250929", temperature=0)
model_with_tools = model.bind_tools([GetWeather, GetPrice], tool_choice="GetWeather")
model_with_tools.invoke("What is the weather like in San Francisco")
```
??? example "Cache specific tools"
```python
from langchain_anthropic import ChatAnthropic, convert_to_anthropic_tool
from pydantic import BaseModel, Field
class GetWeather(BaseModel):
'''Get the current weather in a given location'''
location: str = Field(..., description="The city and state, e.g. San Francisco, CA")
class GetPrice(BaseModel):
'''Get the price of a specific product.'''
product: str = Field(..., description="The product to look up.")
# We'll convert our pydantic class to the anthropic tool format
# before passing to bind_tools so that we can set the 'cache_control'
# field on our tool.
cached_price_tool = convert_to_anthropic_tool(GetPrice)
# Currently the only supported "cache_control" value is {"type": "ephemeral"}
cached_price_tool["cache_control"] = {"type": "ephemeral"}
# Need to pass in extra headers to enable use of the beta cache control API.
model = ChatAnthropic(
model="claude-sonnet-4-5-20250929",
temperature=0,
)
model_with_tools = model.bind_tools([GetWeather, cached_price_tool])
model_with_tools.invoke("What is the weather like in San Francisco")
```
This outputs:
```python
AIMessage(
content=[
{
"text": "Certainly! I can help you find out the current weather in San Francisco. To get this information, I'll use the GetWeather function. Let me fetch that data for you right away.",
"type": "text",
},
{
"id": "toolu_01TS5h8LNo7p5imcG7yRiaUM",
"input": {"location": "San Francisco, CA"},
"name": "GetWeather",
"type": "tool_use",
},
],
response_metadata={
"id": "msg_01Xg7Wr5inFWgBxE5jH9rpRo",
"model": "claude-sonnet-4-5-20250929",
"stop_reason": "tool_use",
"stop_sequence": None,
"usage": {
"input_tokens": 171,
"output_tokens": 96,
"cache_creation_input_tokens": 1470,
"cache_read_input_tokens": 0,
},
},
id="run-b36a5b54-5d69-470e-a1b0-b932d00b089e-0",
tool_calls=[
{
"name": "GetWeather",
"args": {"location": "San Francisco, CA"},
"id": "toolu_01TS5h8LNo7p5imcG7yRiaUM",
"type": "tool_call",
}
],
usage_metadata={
"input_tokens": 171,
"output_tokens": 96,
"total_tokens": 267,
},
)
```
If we invoke the tool again, we can see that the "usage" information in the `AIMessage.response_metadata` shows that we had a cache hit:
```python hl_lines="23"
AIMessage(
content=[
{
"text": "To get the current weather in San Francisco, I can use the GetWeather function. Let me check that for you.",
"type": "text",
},
{
"id": "toolu_01HtVtY1qhMFdPprx42qU2eA",
"input": {"location": "San Francisco, CA"},
"name": "GetWeather",
"type": "tool_use",
},
],
response_metadata={
"id": "msg_016RfWHrRvW6DAGCdwB6Ac64",
"model": "claude-sonnet-4-5-20250929",
"stop_reason": "tool_use",
"stop_sequence": None,
"usage": {
"input_tokens": 171,
"output_tokens": 82,
"cache_creation_input_tokens": 0,
"cache_read_input_tokens": 1470,
},
},
id="run-88b1f825-dcb7-4277-ac27-53df55d22001-0",
tool_calls=[
{
"name": "GetWeather",
"args": {"location": "San Francisco, CA"},
"id": "toolu_01HtVtY1qhMFdPprx42qU2eA",
"type": "tool_call",
}
],
usage_metadata={
"input_tokens": 171,
"output_tokens": 82,
"total_tokens": 253,
},
)
```
??? example "Strict tool use"
Strict tool use guarantees that tool names and arguments are validated
and correctly typed.
!!! note
Strict tool use requires:
- Claude Sonnet 4.5 or Opus 4.1
- `langchain-anthropic>=1.1.0`
To enable strict tool use:
1. Specify the `structured-outputs-2025-11-13` beta header
2. Specify `strict=True` when calling `bind_tools`
```python hl_lines="5 12"
from langchain_anthropic import ChatAnthropic
model = ChatAnthropic(
model="claude-sonnet-4-5",
betas=["structured-outputs-2025-11-13"],
)
def get_weather(location: str) -> str:
\"\"\"Get the weather at a location.\"\"\"
return "It's sunny."
model_with_tools = model.bind_tools([get_weather], strict=True)
```
See LangChain [docs](https://docs.langchain.com/oss/python/integrations/chat/anthropic#strict-tool-use)
for more detail.
""" # noqa: E501
formatted_tools = [
tool
if _is_builtin_tool(tool)
else convert_to_anthropic_tool(tool, strict=strict)
for tool in tools
]
if not tool_choice:
pass
elif isinstance(tool_choice, dict):
kwargs["tool_choice"] = tool_choice
elif isinstance(tool_choice, str) and tool_choice in ("any", "auto"):
kwargs["tool_choice"] = {"type": tool_choice}
elif isinstance(tool_choice, str):
kwargs["tool_choice"] = {"type": "tool", "name": tool_choice}
else:
msg = (
f"Unrecognized 'tool_choice' type {tool_choice=}. Expected dict, "
f"str, or None."
)
raise ValueError(
msg,
)
if parallel_tool_calls is not None:
disable_parallel_tool_use = not parallel_tool_calls
if "tool_choice" in kwargs:
kwargs["tool_choice"]["disable_parallel_tool_use"] = (
disable_parallel_tool_use
)
else:
kwargs["tool_choice"] = {
"type": "auto",
"disable_parallel_tool_use": disable_parallel_tool_use,
}
return self.bind(tools=formatted_tools, **kwargs)
def with_structured_output(
self,
schema: dict | type,
*,
include_raw: bool = False,
method: Literal["function_calling", "json_schema"] = "function_calling",
**kwargs: Any,
) -> Runnable[LanguageModelInput, dict | BaseModel]:
"""Model wrapper that returns outputs formatted to match the given schema.
Args:
schema: The output schema. Can be passed in as:
- An Anthropic tool schema,
- An OpenAI function/tool schema,
- A JSON Schema,
- A `TypedDict` class,
- Or a Pydantic class.
If `schema` is a Pydantic class then the model output will be a
Pydantic instance of that class, and the model-generated fields will be
validated by the Pydantic class. Otherwise the model output will be a
dict and will not be validated.
See `langchain_core.utils.function_calling.convert_to_openai_tool` for
more on how to properly specify types and descriptions of schema fields
when specifying a Pydantic or `TypedDict` class.
include_raw:
If `False` then only the parsed structured output is returned.
If an error occurs during model output parsing it will be raised.
If `True` then both the raw model response (a `BaseMessage`) and the
parsed model response will be returned.
If an error occurs during output parsing it will be caught and returned
as well.
The final output is always a `dict` with keys `'raw'`, `'parsed'`, and
`'parsing_error'`.
method: The structured output method to use. Options are:
- `'function_calling'` (default): Use forced tool calling to get
structured output.
- `'json_schema'`: Use Claude's dedicated
[structured output](https://platform.claude.com/docs/en/build-with-claude/structured-outputs)
feature.
kwargs: Additional keyword arguments are ignored.
Returns:
A `Runnable` that takes same inputs as a
`langchain_core.language_models.chat.BaseChatModel`.
If `include_raw` is `False` and `schema` is a Pydantic class, `Runnable`
outputs an instance of `schema` (i.e., a Pydantic object). Otherwise, if
`include_raw` is `False` then `Runnable` outputs a `dict`.
If `include_raw` is `True`, then `Runnable` outputs a `dict` with keys:
- `'raw'`: `BaseMessage`
- `'parsed'`: `None` if there was a parsing error, otherwise the type
depends on the `schema` as described above.
- `'parsing_error'`: `BaseException | None`
??? example "Pydantic schema (`include_raw=False`)"
```python
from langchain_anthropic import ChatAnthropic
from pydantic import BaseModel
class AnswerWithJustification(BaseModel):
'''An answer to the user question along with justification for the answer.'''
answer: str
justification: str
model = ChatAnthropic(model="claude-sonnet-4-5-20250929", temperature=0)
structured_model = model.with_structured_output(AnswerWithJustification)
structured_model.invoke("What weighs more a pound of bricks or a pound of feathers")
# -> AnswerWithJustification(
# answer='They weigh the same',
# justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'
# )
```
??? example "Pydantic schema (`include_raw=True`)"
```python
from langchain_anthropic import ChatAnthropic
from pydantic import BaseModel
class AnswerWithJustification(BaseModel):
'''An answer to the user question along with justification for the answer.'''
answer: str
justification: str
model = ChatAnthropic(model="claude-sonnet-4-5-20250929", temperature=0)
structured_model = model.with_structured_output(AnswerWithJustification, include_raw=True)
structured_model.invoke("What weighs more a pound of bricks or a pound of feathers")
# -> {
# 'raw': AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_Ao02pnFYXD6GN1yzc0uXPsvF', 'function': {'arguments': '{"answer":"They weigh the same.","justification":"Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ."}', 'name': 'AnswerWithJustification'}, 'type': 'function'}]}),
# 'parsed': AnswerWithJustification(answer='They weigh the same.', justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'),
# 'parsing_error': None
# }
```
??? example "Dictionary schema (`include_raw=False`)"
```python
from langchain_anthropic import ChatAnthropic
schema = {
"name": "AnswerWithJustification",
"description": "An answer to the user question along with justification for the answer.",
"input_schema": {
"type": "object",
"properties": {
"answer": {"type": "string"},
"justification": {"type": "string"},
},
"required": ["answer", "justification"],
},
}
model = ChatAnthropic(model="claude-sonnet-4-5-20250929", temperature=0)
structured_model = model.with_structured_output(schema)
structured_model.invoke("What weighs more a pound of bricks or a pound of feathers")
# -> {
# 'answer': 'They weigh the same',
# 'justification': 'Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume and density of the two substances differ.'
# }
```
??? example "Native structured output with `method='json_schema'`"
Anthropic supports a native structured output feature that guarantees
responses adhere to a given schema.
!!! note
Native structured output requires:
- Claude Sonnet 4.5 or Opus 4.1
- `langchain-anthropic>=1.1.0`
To enable native structured output:
1. Specify the `structured-outputs-2025-11-13` beta header
2. Specify `method="json_schema"` when calling `with_structured_output`
```python hl_lines="6 16"
from langchain_anthropic import ChatAnthropic
from pydantic import BaseModel, Field
model = ChatAnthropic(
model="claude-sonnet-4-5",
betas=["structured-outputs-2025-11-13"],
)
class Movie(BaseModel):
\"\"\"A movie with details.\"\"\"
title: str = Field(..., description="The title of the movie")
year: int = Field(..., description="The year the movie was released")
director: str = Field(..., description="The director of the movie")
rating: float = Field(..., description="The movie's rating out of 10")
model_with_structure = model.with_structured_output(Movie, method="json_schema")
response = model_with_structure.invoke("Provide details about the movie Inception")
print(response)
# -> Movie(title="Inception", year=2010, director="Christopher Nolan", rating=8.8)
```
""" # noqa: E501
if method == "json_mode":
warning_message = (
"Unrecognized structured output method 'json_mode'. Defaulting to "
"'json_schema' method."
)
warnings.warn(warning_message, stacklevel=2)
method = "json_schema"
if method == "function_calling":
formatted_tool = convert_to_anthropic_tool(schema)
tool_name = formatted_tool["name"]
if self.thinking is not None and self.thinking.get("type") == "enabled":
llm = self._get_llm_for_structured_output_when_thinking_is_enabled(
schema,
formatted_tool,
)
else:
llm = self.bind_tools(
[schema],
tool_choice=tool_name,
ls_structured_output_format={
"kwargs": {"method": "function_calling"},
"schema": formatted_tool,
},
)
if isinstance(schema, type) and is_basemodel_subclass(schema):
output_parser: OutputParserLike = PydanticToolsParser(
tools=[schema],
first_tool_only=True,
)
else:
output_parser = JsonOutputKeyToolsParser(
key_name=tool_name,
first_tool_only=True,
)
elif method == "json_schema":
llm = self.bind(
output_format=_convert_to_anthropic_output_format(schema),
ls_structured_output_format={
"kwargs": {"method": "json_schema"},
"schema": convert_to_openai_tool(schema),
},
)
if isinstance(schema, type) and is_basemodel_subclass(schema):
output_parser = PydanticOutputParser(pydantic_object=schema)
else:
output_parser = JsonOutputParser()
else:
error_message = (
f"Unrecognized structured output method '{method}'. "
f"Expected 'function_calling' or 'json_schema'."
)
raise ValueError(error_message)
if include_raw:
parser_assign = RunnablePassthrough.assign(
parsed=itemgetter("raw") | output_parser,
parsing_error=lambda _: None,
)
parser_none = RunnablePassthrough.assign(parsed=lambda _: None)
parser_with_fallback = parser_assign.with_fallbacks(
[parser_none],
exception_key="parsing_error",
)
return RunnableMap(raw=llm) | parser_with_fallback
return llm | output_parser
def get_num_tokens_from_messages(
self,
messages: list[BaseMessage],
tools: Sequence[dict[str, Any] | type | Callable | BaseTool] | None = None,
**kwargs: Any,
) -> int:
"""Count tokens in a sequence of input messages.
Args:
messages: The message inputs to tokenize.
tools: If provided, sequence of `dict`, `BaseModel`, function, or `BaseTool`
objects to be converted to tool schemas.
kwargs: Additional keyword arguments are passed to the Anthropic
`messages.count_tokens` method.
???+ example "Basic usage"
```python
from langchain_anthropic import ChatAnthropic
from langchain_core.messages import HumanMessage, SystemMessage
model = ChatAnthropic(model="claude-sonnet-4-5-20250929")
messages = [
SystemMessage(content="You are a scientist"),
HumanMessage(content="Hello, Claude"),
]
model.get_num_tokens_from_messages(messages)
```
```txt
14
```
??? example "Pass tool schemas"
```python
from langchain_anthropic import ChatAnthropic
from langchain_core.messages import HumanMessage
from langchain_core.tools import tool
model = ChatAnthropic(model="claude-sonnet-4-5-20250929")
@tool(parse_docstring=True)
def get_weather(location: str) -> str:
\"\"\"Get the current weather in a given location
Args:
location: The city and state, e.g. San Francisco, CA
\"\"\"
return "Sunny"
messages = [
HumanMessage(content="What's the weather like in San Francisco?"),
]
model.get_num_tokens_from_messages(messages, tools=[get_weather])
```
```txt
403
```
!!! warning "Behavior changed in `langchain-anthropic` 0.3.0"
Uses Anthropic's [token counting API](https://platform.claude.com/docs/en/build-with-claude/token-counting) to count tokens in messages.
""" # noqa: D214,E501
formatted_system, formatted_messages = _format_messages(messages)
if isinstance(formatted_system, str):
kwargs["system"] = formatted_system
if tools:
kwargs["tools"] = [convert_to_anthropic_tool(tool) for tool in tools]
if self.context_management is not None:
kwargs["context_management"] = self.context_management
if self.betas is not None:
beta_response = self._client.beta.messages.count_tokens(
betas=self.betas,
model=self.model,
messages=formatted_messages, # type: ignore[arg-type]
**kwargs,
)
return beta_response.input_tokens
response = self._client.messages.count_tokens(
model=self.model,
messages=formatted_messages, # type: ignore[arg-type]
**kwargs,
)
return response.input_tokens
def convert_to_anthropic_tool(
tool: dict[str, Any] | type | Callable | BaseTool,
*,
strict: bool | None = None,
) -> AnthropicTool:
"""Convert a tool-like object to an Anthropic tool definition.
Args:
tool: A tool-like object to convert. Can be an Anthropic tool dict,
a Pydantic model, a function, or a `BaseTool`.
strict: If `True`, enables strict schema adherence for the tool.
!!! note
Requires Claude Sonnet 4.5 or Opus 4.1 and the
`structured-outputs-2025-11-13` beta header.
Returns:
An Anthropic tool definition dict.
"""
# already in Anthropic tool format
if isinstance(tool, dict) and all(
k in tool for k in ("name", "description", "input_schema")
):
anthropic_formatted = AnthropicTool(tool) # type: ignore[misc]
else:
oai_formatted = convert_to_openai_tool(tool, strict=strict)["function"]
anthropic_formatted = AnthropicTool(
name=oai_formatted["name"],
input_schema=oai_formatted["parameters"],
)
if "description" in oai_formatted:
anthropic_formatted["description"] = oai_formatted["description"]
if "strict" in oai_formatted and isinstance(strict, bool):
anthropic_formatted["strict"] = oai_formatted["strict"]
return anthropic_formatted
def _tools_in_params(params: dict) -> bool:
return (
"tools" in params
or ("extra_body" in params and params["extra_body"].get("tools"))
or "mcp_servers" in params
)
def _thinking_in_params(params: dict) -> bool:
return params.get("thinking", {}).get("type") == "enabled"
def _documents_in_params(params: dict) -> bool:
for message in params.get("messages", []):
if isinstance(message.get("content"), list):
for block in message["content"]:
if (
isinstance(block, dict)
and block.get("type") == "document"
and block.get("citations", {}).get("enabled")
):
return True
return False
| ChatAnthropic |
python | sphinx-doc__sphinx | sphinx/domains/c/_ast.py | {
"start": 59673,
"end": 61148
} | class ____(ASTBase):
def __init__(
self, name: ASTNestedName, init: ASTInitializer | None, attrs: ASTAttributeList
) -> None:
self.name = name
self.init = init
self.attrs = attrs
def __eq__(self, other: object) -> bool:
if not isinstance(other, ASTEnumerator):
return NotImplemented
return (
self.name == other.name
and self.init == other.init
and self.attrs == other.attrs
)
def __hash__(self) -> int:
return hash((self.name, self.init, self.attrs))
def get_id(self, version: int, objectType: str, symbol: Symbol) -> str:
return symbol.get_full_nested_name().get_id(version)
def _stringify(self, transform: StringifyTransform) -> str:
res = [transform(self.name)]
if len(self.attrs) != 0:
res.extend((' ', transform(self.attrs)))
if self.init:
res.append(transform(self.init))
return ''.join(res)
def describe_signature(
self, signode: TextElement, mode: str, env: BuildEnvironment, symbol: Symbol
) -> None:
verify_description_mode(mode)
self.name.describe_signature(signode, mode, env, symbol)
if len(self.attrs) != 0:
signode += addnodes.desc_sig_space()
self.attrs.describe_signature(signode)
if self.init:
self.init.describe_signature(signode, 'markType', env, symbol)
| ASTEnumerator |
python | astropy__astropy | astropy/modeling/tests/test_spline.py | {
"start": 11678,
"end": 53658
} | class ____:
def setup_class(self):
def func(x, noise=0):
return np.exp(-(x**2)) + 0.1 * noise
self.x = np.linspace(-3, 3, npts)
self.y = func(self.x, noise)
self.truth = func(self.x)
arg_sort = np.argsort(self.x)
np.random.shuffle(arg_sort)
self.x_s = self.x[arg_sort]
self.y_s = func(self.x_s, noise[arg_sort])
self.npts_out = 1000
self.xs = np.linspace(-3, 3, self.npts_out)
self.t = np.linspace(-3, 3, nknots)[1:-1]
def check_parameter(self, spl, base_name, name, index, value, fixed):
assert base_name in name
assert index == int(name.split(base_name)[-1])
knot_name = f"{base_name}{index}"
assert knot_name == name
assert hasattr(spl, name)
param = getattr(spl, name)
assert isinstance(param, Parameter)
assert param.name == name
assert param.value == value(index)
assert param.model == spl
assert param.fixed is fixed
def check_parameters(self, spl, params, base_name, value, fixed):
for idx, name in enumerate(params):
self.check_parameter(spl, base_name, name, idx, value, fixed)
def update_parameters(self, spl, knots, value):
for name in knots:
param = getattr(spl, name)
param.value = value
assert param.value == value
def test___init__with_no_knot_information(self):
spl = Spline1D()
assert spl._degree == 3
assert spl._user_knots is False
assert spl._t is None
assert spl._c is None
assert spl._nu is None
# Check no parameters created
assert len(spl._knot_names) == 0
assert len(spl._coeff_names) == 0
def test___init__with_number_of_knots(self):
spl = Spline1D(knots=10)
# Check baseline data
assert spl._degree == 3
assert spl._user_knots is False
assert spl._nu is None
# Check vector data
assert len(spl._t) == 18
t = np.zeros(18)
t[-4:] = 1
assert (spl._t == t).all()
assert len(spl._c) == 18
assert (spl._c == np.zeros(18)).all()
# Check all parameter names created:
assert len(spl._knot_names) == 18
assert len(spl._coeff_names) == 18
# Check knot values:
def value0(idx):
if idx < 18 - 4:
return 0
else:
return 1
self.check_parameters(spl, spl._knot_names, "knot", value0, True)
# Check coeff values:
def value1(idx):
return 0
self.check_parameters(spl, spl._coeff_names, "coeff", value1, False)
def test___init__with_full_custom_knots(self):
t = 17 * np.arange(20) - 32
spl = Spline1D(knots=t)
# Check baseline data
assert spl._degree == 3
assert spl._user_knots is True
assert spl._nu is None
# Check vector data
assert (spl._t == t).all()
assert len(spl._c) == 20
assert (spl._c == np.zeros(20)).all()
# Check all parameter names created
assert len(spl._knot_names) == 20
assert len(spl._coeff_names) == 20
# Check knot values:
def value0(idx):
return t[idx]
self.check_parameters(spl, spl._knot_names, "knot", value0, True)
# Check coeff values
def value1(idx):
return 0
self.check_parameters(spl, spl._coeff_names, "coeff", value1, False)
def test___init__with_interior_custom_knots(self):
t = np.arange(1, 20)
spl = Spline1D(knots=t, bounds=[0, 20])
# Check baseline data
assert spl._degree == 3
assert spl._user_knots is True
assert spl._nu is None
# Check vector data
assert len(spl._t) == 27
assert (spl._t[4:-4] == t).all()
assert (spl._t[:4] == 0).all()
assert (spl._t[-4:] == 20).all()
assert len(spl._c) == 27
assert (spl._c == np.zeros(27)).all()
# Check knot values:
def value0(idx):
if idx < 4:
return 0
elif idx >= 19 + 4:
return 20
else:
return t[idx - 4]
self.check_parameters(spl, spl._knot_names, "knot", value0, True)
# Check coeff values
def value1(idx):
return 0
self.check_parameters(spl, spl._coeff_names, "coeff", value1, False)
def test___init__with_user_knots_and_coefficients(self):
t = 17 * np.arange(20) - 32
c = np.linspace(-1, 1, 20)
spl = Spline1D(knots=t, coeffs=c)
# Check baseline data
assert spl._degree == 3
assert spl._user_knots is True
assert spl._nu is None
# Check vector data
assert (spl._t == t).all()
assert len(spl._c) == 20
assert (spl._c == c).all()
# Check all parameter names created
assert len(spl._knot_names) == 20
assert len(spl._coeff_names) == 20
# Check knot values:
def value0(idx):
return t[idx]
self.check_parameters(spl, spl._knot_names, "knot", value0, True)
# Check coeff values
def value1(idx):
return c[idx]
self.check_parameters(spl, spl._coeff_names, "coeff", value1, False)
def test___init__errors(self):
# Bad knot type
knots = 3.5
MESSAGE = f"Knots: {knots} must be iterable or value"
with pytest.raises(ValueError, match=MESSAGE):
Spline1D(knots=knots)
# Not enough knots
MESSAGE = r"Must have at least 8 knots"
for idx in range(8):
with pytest.raises(ValueError, match=MESSAGE):
Spline1D(knots=np.arange(idx))
# Bad scipy spline
t = np.arange(20)[::-1]
MESSAGE = r"Knots must be in a non-decreasing order"
with pytest.raises(ValueError, match=MESSAGE):
Spline1D(knots=t)
def test_parameter_array_link(self):
spl = Spline1D(10)
# Check knot base values
def value0(idx):
if idx < 18 - 4:
return 0
else:
return 1
self.check_parameters(spl, spl._knot_names, "knot", value0, True)
# Check knot vector -> knot parameter link
t = np.arange(18)
spl._t = t.copy()
def value1(idx):
return t[idx]
self.check_parameters(spl, spl._knot_names, "knot", value1, True)
# Check knot parameter -> knot vector link
self.update_parameters(spl, spl._knot_names, 3)
assert (spl._t[:] == 3).all()
# Check coeff base values
def value2(idx):
return 0
self.check_parameters(spl, spl._coeff_names, "coeff", value2, False)
# Check coeff vector -> coeff parameter link
c = 5 * np.arange(18) + 18
spl._c = c.copy()
def value3(idx):
return c[idx]
self.check_parameters(spl, spl._coeff_names, "coeff", value3, False)
# Check coeff parameter -> coeff vector link
self.update_parameters(spl, spl._coeff_names, 4)
assert (spl._c[:] == 4).all()
def test_two_splines(self):
spl0 = Spline1D(knots=10)
spl1 = Spline1D(knots=15, degree=2)
assert spl0._degree == 3
assert len(spl0._t) == 18
t = np.zeros(18)
t[-4:] = 1
assert (spl0._t == t).all()
assert len(spl0._c) == 18
assert (spl0._c == np.zeros(18)).all()
assert spl1._degree == 2
assert len(spl1._t) == 21
t = np.zeros(21)
t[-3:] = 1
assert (spl1._t == t).all()
assert len(spl1._c) == 21
assert (spl1._c == np.zeros(21)).all()
# Check all knot names created
assert len(spl0._knot_names) == 18
assert len(spl1._knot_names) == 21
# Check knot base values
def value0(idx):
if idx < 18 - 4:
return 0
else:
return 1
self.check_parameters(spl0, spl0._knot_names, "knot", value0, True)
def value1(idx):
if idx < 21 - 3:
return 0
else:
return 1
self.check_parameters(spl1, spl1._knot_names, "knot", value1, True)
# Check knot vector -> knot parameter link
t0 = 7 * np.arange(18) + 27
t1 = 11 * np.arange(21) + 19
spl0._t[:] = t0.copy()
spl1._t[:] = t1.copy()
def value2(idx):
return t0[idx]
self.check_parameters(spl0, spl0._knot_names, "knot", value2, True)
def value3(idx):
return t1[idx]
self.check_parameters(spl1, spl1._knot_names, "knot", value3, True)
# Check knot parameter -> knot vector link
self.update_parameters(spl0, spl0._knot_names, 3)
self.update_parameters(spl1, spl1._knot_names, 4)
assert (spl0._t[:] == 3).all()
assert (spl1._t[:] == 4).all()
# Check all coeff names created
assert len(spl0._coeff_names) == 18
assert len(spl1._coeff_names) == 21
# Check coeff base values
def value4(idx):
return 0
self.check_parameters(spl0, spl0._coeff_names, "coeff", value4, False)
self.check_parameters(spl1, spl1._coeff_names, "coeff", value4, False)
# Check coeff vector -> coeff parameter link
c0 = 17 * np.arange(18) + 14
c1 = 37 * np.arange(21) + 47
spl0._c[:] = c0.copy()
spl1._c[:] = c1.copy()
def value5(idx):
return c0[idx]
self.check_parameters(spl0, spl0._coeff_names, "coeff", value5, False)
def value6(idx):
return c1[idx]
self.check_parameters(spl1, spl1._coeff_names, "coeff", value6, False)
# Check coeff parameter -> coeff vector link
self.update_parameters(spl0, spl0._coeff_names, 5)
self.update_parameters(spl1, spl1._coeff_names, 6)
assert (spl0._t[:] == 3).all()
assert (spl1._t[:] == 4).all()
assert (spl0._c[:] == 5).all()
assert (spl1._c[:] == 6).all()
def test__knot_names(self):
# no parameters
spl = Spline1D()
assert spl._knot_names == ()
# some parameters
knot_names = [f"knot{idx}" for idx in range(18)]
spl = Spline1D(10)
assert spl._knot_names == tuple(knot_names)
def test__coeff_names(self):
# no parameters
spl = Spline1D()
assert spl._coeff_names == ()
# some parameters
coeff_names = [f"coeff{idx}" for idx in range(18)]
spl = Spline1D(10)
assert spl._coeff_names == tuple(coeff_names)
def test_param_names(self):
# no parameters
spl = Spline1D()
assert spl.param_names == ()
# some parameters
knot_names = [f"knot{idx}" for idx in range(18)]
coeff_names = [f"coeff{idx}" for idx in range(18)]
param_names = knot_names + coeff_names
spl = Spline1D(10)
assert spl.param_names == tuple(param_names)
def test_t(self):
# no parameters
spl = Spline1D()
# test get
assert spl._t is None
assert (spl.t == [0, 0, 0, 0, 1, 1, 1, 1]).all()
# test set
MESSAGE = r"The model parameters must be initialized before setting knots"
with pytest.raises(ValueError, match=MESSAGE):
spl.t = mk.MagicMock()
# with parameters
spl = Spline1D(10)
# test get
t = np.zeros(18)
t[-4:] = 1
assert (spl._t == t).all()
assert (spl.t == t).all()
# test set
spl.t = np.arange(18) + 15
assert (spl._t == (np.arange(18) + 15)).all()
assert (spl.t == (np.arange(18) + 15)).all()
assert (spl.t != t).all()
# set error
MESSAGE = r"There must be exactly as many knots as previously defined"
for idx in range(30):
if idx == 18:
continue
with pytest.raises(ValueError, match=MESSAGE):
spl.t = np.arange(idx)
def test_c(self):
# no parameters
spl = Spline1D()
# test get
assert spl._c is None
assert (spl.c == [0, 0, 0, 0, 0, 0, 0, 0]).all()
# test set
MESSAGE = r"The model parameters must be initialized before setting coeffs"
with pytest.raises(ValueError, match=MESSAGE):
spl.c = mk.MagicMock()
# with parameters
spl = Spline1D(10)
# test get
assert (spl._c == np.zeros(18)).all()
assert (spl.c == np.zeros(18)).all()
# test set
spl.c = np.arange(18) + 15
assert (spl._c == (np.arange(18) + 15)).all()
assert (spl.c == (np.arange(18) + 15)).all()
assert (spl.c != np.zeros(18)).all()
# set error
MESSAGE = r"There must be exactly as many coeffs as previously defined"
for idx in range(30):
if idx == 18:
continue
with pytest.raises(ValueError, match=MESSAGE):
spl.c = np.arange(idx)
def test_degree(self):
# default degree
spl = Spline1D()
# test get
assert spl._degree == 3
assert spl.degree == 3
# test set
# non-default degree
spl = Spline1D(degree=2)
# test get
assert spl._degree == 2
assert spl.degree == 2
def test__initialized(self):
# no parameters
spl = Spline1D()
assert spl._initialized is False
# with parameters
spl = Spline1D(knots=10, degree=2)
assert spl._initialized is True
def test_tck(self):
# no parameters
spl = Spline1D()
# test get
assert (spl.t == [0, 0, 0, 0, 1, 1, 1, 1]).all()
assert (spl.c == [0, 0, 0, 0, 0, 0, 0, 0]).all()
assert spl.degree == 3
tck = spl.tck
assert (tck[0] == spl.t).all()
assert (tck[1] == spl.c).all()
assert tck[2] == spl.degree
# test set
assert spl._t is None
assert spl._c is None
assert spl._knot_names == ()
assert spl._coeff_names == ()
t = np.array([0, 0, 0, 0, 1, 2, 3, 4, 5, 5, 5, 5])
np.random.seed(619)
c = np.random.random(12)
k = 3
spl.tck = (t, c, k)
assert (spl._t == t).all()
assert (spl._c == c).all()
assert spl.degree == k
def value0(idx):
return t[idx]
self.check_parameters(spl, spl._knot_names, "knot", value0, True)
def value1(idx):
return c[idx]
self.check_parameters(spl, spl._coeff_names, "coeff", value1, False)
# with parameters
spl = Spline1D(knots=10, degree=2)
# test get
t = np.zeros(16)
t[-3:] = 1
assert (spl.t == t).all()
assert (spl.c == np.zeros(16)).all()
assert spl.degree == 2
tck = spl.tck
assert (tck[0] == spl.t).all()
assert (tck[1] == spl.c).all()
assert tck[2] == spl.degree
# test set
t = 5 * np.arange(16) + 11
c = 7 * np.arange(16) + 13
k = 2
spl.tck = (t, c, k)
assert (spl.t == t).all()
assert (spl.c == c).all()
assert spl.degree == k
tck = spl.tck
assert (tck[0] == spl.t).all()
assert (tck[1] == spl.c).all()
assert tck[2] == spl.degree
# Error
MESSAGE = r"tck has incompatible degree!"
with pytest.raises(ValueError, match=MESSAGE):
spl.tck = (t, c, 4)
def test_bspline(self):
from scipy.interpolate import BSpline
# no parameters
spl = Spline1D()
bspline = spl.bspline
assert isinstance(bspline, BSpline)
assert (bspline.tck[0] == spl.tck[0]).all()
assert (bspline.tck[1] == spl.tck[1]).all()
assert bspline.tck[2] == spl.tck[2]
t = np.array([0, 0, 0, 0, 1, 2, 3, 4, 5, 5, 5, 5])
np.random.seed(619)
c = np.random.random(12)
k = 3
def value0(idx):
return t[idx]
def value1(idx):
return c[idx]
# set (bspline)
spl = Spline1D()
assert spl._t is None
assert spl._c is None
assert spl._knot_names == ()
assert spl._coeff_names == ()
bspline = BSpline(t, c, k)
spl.bspline = bspline
assert (spl._t == t).all()
assert (spl._c == c).all()
assert spl.degree == k
self.check_parameters(spl, spl._knot_names, "knot", value0, True)
self.check_parameters(spl, spl._coeff_names, "coeff", value1, False)
# set (tuple spline)
spl = Spline1D()
assert spl._t is None
assert spl._c is None
assert spl._knot_names == ()
assert spl._coeff_names == ()
spl.bspline = (t, c, k)
assert (spl._t == t).all()
assert (spl._c == c).all()
assert spl.degree == k
self.check_parameters(spl, spl._knot_names, "knot", value0, True)
self.check_parameters(spl, spl._coeff_names, "coeff", value1, False)
# with parameters
spl = Spline1D(knots=10, degree=2)
bspline = spl.bspline
assert isinstance(bspline, BSpline)
assert (bspline.tck[0] == spl.tck[0]).all()
assert (bspline.tck[1] == spl.tck[1]).all()
assert bspline.tck[2] == spl.tck[2]
def test_knots(self):
# no parameters
spl = Spline1D()
assert spl.knots == []
# with parameters
spl = Spline1D(10)
knots = spl.knots
assert len(knots) == 18
for knot in knots:
assert isinstance(knot, Parameter)
assert hasattr(spl, knot.name)
assert getattr(spl, knot.name) == knot
def test_coeffs(self):
# no parameters
spl = Spline1D()
assert spl.coeffs == []
# with parameters
spl = Spline1D(10)
coeffs = spl.coeffs
assert len(coeffs) == 18
for coeff in coeffs:
assert isinstance(coeff, Parameter)
assert hasattr(spl, coeff.name)
assert getattr(spl, coeff.name) == coeff
def test__init_parameters(self):
spl = Spline1D()
with mk.patch.object(Spline1D, "_create_parameters", autospec=True) as mkCreate:
spl._init_parameters()
assert mkCreate.call_args_list == [
mk.call(spl, "knot", "t", fixed=True),
mk.call(spl, "coeff", "c"),
]
def test__init_bounds(self):
spl = Spline1D()
has_bounds, lower, upper = spl._init_bounds()
assert has_bounds is False
assert (lower == [0, 0, 0, 0]).all()
assert (upper == [1, 1, 1, 1]).all()
assert spl._user_bounding_box is None
has_bounds, lower, upper = spl._init_bounds((-5, 5))
assert has_bounds is True
assert (lower == [-5, -5, -5, -5]).all()
assert (upper == [5, 5, 5, 5]).all()
assert spl._user_bounding_box == (-5, 5)
def test__init_knots(self):
np.random.seed(19)
lower = np.random.random(4)
upper = np.random.random(4)
# Integer
with mk.patch.object(
Spline1D, "bspline", new_callable=mk.PropertyMock
) as mkBspline:
spl = Spline1D()
assert spl._t is None
spl._init_knots(10, mk.MagicMock(), lower, upper)
t = np.concatenate((lower, np.zeros(10), upper))
assert (spl._t == t).all()
assert mkBspline.call_args_list == [mk.call()]
# vector with bounds
with mk.patch.object(
Spline1D, "bspline", new_callable=mk.PropertyMock
) as mkBspline:
knots = np.random.random(10)
spl = Spline1D()
assert spl._t is None
spl._init_knots(knots, True, lower, upper)
t = np.concatenate((lower, knots, upper))
assert (spl._t == t).all()
assert mkBspline.call_args_list == [mk.call()]
# vector with no bounds
with mk.patch.object(
Spline1D, "bspline", new_callable=mk.PropertyMock
) as mkBspline:
knots = np.random.random(10)
spl = Spline1D()
assert spl._t is None
spl._init_knots(knots, False, lower, upper)
assert (spl._t == knots).all()
assert mkBspline.call_args_list == [mk.call()]
# error
MESSAGE = r"Must have at least 8 knots"
for num in range(8):
knots = np.random.random(num)
spl = Spline1D()
assert spl._t is None
with pytest.raises(ValueError, match=MESSAGE):
spl._init_knots(knots, False, lower, upper)
# Error
spl = Spline1D()
assert spl._t is None
MESSAGE = r"Knots: 0.5 must be iterable or value"
with pytest.raises(ValueError, match=MESSAGE):
spl._init_knots(0.5, False, lower, upper)
def test__init_coeffs(self):
np.random.seed(492)
# No coeffs
with mk.patch.object(
Spline1D, "bspline", new_callable=mk.PropertyMock
) as mkBspline:
spl = Spline1D()
assert spl._c is None
spl._t = [1, 2, 3, 4]
spl._init_coeffs()
assert (spl._c == [0, 0, 0, 0]).all()
assert mkBspline.call_args_list == [mk.call()]
# Some coeffs
with mk.patch.object(
Spline1D, "bspline", new_callable=mk.PropertyMock
) as mkBspline:
coeffs = np.random.random(10)
spl = Spline1D()
assert spl._c is None
spl._init_coeffs(coeffs)
assert (spl._c == coeffs).all()
assert mkBspline.call_args_list == [mk.call()]
def test__init_data(self):
spl = Spline1D()
knots = mk.MagicMock()
coeffs = mk.MagicMock()
bounds = mk.MagicMock()
has_bounds = mk.MagicMock()
lower = mk.MagicMock()
upper = mk.MagicMock()
with mk.patch.object(
Spline1D,
"_init_bounds",
autospec=True,
return_value=(has_bounds, lower, upper),
) as mkBounds:
with mk.patch.object(Spline1D, "_init_knots", autospec=True) as mkKnots:
with mk.patch.object(
Spline1D, "_init_coeffs", autospec=True
) as mkCoeffs:
main = mk.MagicMock()
main.attach_mock(mkBounds, "bounds")
main.attach_mock(mkKnots, "knots")
main.attach_mock(mkCoeffs, "coeffs")
spl._init_data(knots, coeffs, bounds)
assert main.mock_calls == [
mk.call.bounds(spl, bounds),
mk.call.knots(spl, knots, has_bounds, lower, upper),
mk.call.coeffs(spl, coeffs),
]
def test_evaluate(self):
spl = Spline1D()
args = tuple(mk.MagicMock() for _ in range(3))
kwargs = {f"test{idx}": mk.MagicMock() for idx in range(3)}
new_kwargs = {f"new_test{idx}": mk.MagicMock() for idx in range(3)}
with mk.patch.object(
_Spline, "evaluate", autospec=True, return_value=new_kwargs
) as mkEval:
with mk.patch.object(
Spline1D, "bspline", new_callable=mk.PropertyMock
) as mkBspline:
assert mkBspline.return_value.return_value == spl.evaluate(
*args, **kwargs
)
assert mkBspline.return_value.call_args_list == [
mk.call(args[0], **new_kwargs)
]
assert mkBspline.call_args_list == [mk.call()]
assert mkEval.call_args_list == [mk.call(spl, *args, **kwargs)]
# Error
MESSAGE = r"Cannot evaluate a derivative of order higher than 4"
for idx in range(5, 8):
with mk.patch.object(
_Spline, "evaluate", autospec=True, return_value={"nu": idx}
):
with pytest.raises(RuntimeError, match=MESSAGE):
spl.evaluate(*args, **kwargs)
def check_knots_created(self, spl, k):
def value0(idx):
return self.x[0]
def value1(idx):
return self.x[-1]
for idx in range(k + 1):
name = f"knot{idx}"
self.check_parameter(spl, "knot", name, idx, value0, True)
index = len(spl.t) - (k + 1) + idx
name = f"knot{index}"
self.check_parameter(spl, "knot", name, index, value1, True)
def value3(idx):
return spl.t[idx]
assert len(spl._knot_names) == len(spl.t)
for idx, name in enumerate(spl._knot_names):
assert name == f"knot{idx}"
self.check_parameter(spl, "knot", name, idx, value3, True)
def check_coeffs_created(self, spl):
def value(idx):
return spl.c[idx]
assert len(spl._coeff_names) == len(spl.c)
for idx, name in enumerate(spl._coeff_names):
assert name == f"coeff{idx}"
self.check_parameter(spl, "coeff", name, idx, value, False)
@staticmethod
def check_base_spline(spl, t, c, k):
"""Check the base spline form"""
if t is None:
assert spl._t is None
else:
assert_allclose(spl._t, t)
if c is None:
assert spl._c is None
else:
assert_allclose(spl._c, c)
assert spl.degree == k
assert spl._bounding_box is None
def check_spline_fit(self, fit_spl, spline, fitter, atol_fit, atol_truth):
"""Check the spline fit"""
assert_allclose(fit_spl.t, spline._eval_args[0])
assert_allclose(fit_spl.c, spline._eval_args[1])
assert_allclose(fitter.fit_info["spline"]._eval_args[0], spline._eval_args[0])
assert_allclose(fitter.fit_info["spline"]._eval_args[1], spline._eval_args[1])
# check that _parameters are correct
assert len(fit_spl._parameters) == len(fit_spl.t) + len(fit_spl.c)
assert_allclose(fit_spl._parameters[: len(fit_spl.t)], fit_spl.t)
assert_allclose(fit_spl._parameters[len(fit_spl.t) :], fit_spl.c)
# check that parameters are correct
assert len(fit_spl.parameters) == len(fit_spl.t) + len(fit_spl.c)
assert_allclose(fit_spl.parameters[: len(fit_spl.t)], fit_spl.t)
assert_allclose(fit_spl.parameters[len(fit_spl.t) :], fit_spl.c)
assert_allclose(spline.get_residual(), fitter.fit_info["resid"])
assert_allclose(fit_spl(self.x), spline(self.x))
assert_allclose(fit_spl(self.x), fitter.fit_info["spline"](self.x))
assert_allclose(fit_spl(self.x), self.y, atol=atol_fit)
assert_allclose(fit_spl(self.x), self.truth, atol=atol_truth)
def check_bbox(self, spl, fit_spl, fitter, w, **kwargs):
"""Check the spline fit with bbox option"""
bbox = [self.x[0], self.x[-1]]
bbox_spl = fitter(spl, self.x, self.y, weights=w, bbox=bbox, **kwargs)
assert bbox_spl.bounding_box == tuple(bbox)
assert_allclose(fit_spl.t, bbox_spl.t)
assert_allclose(fit_spl.c, bbox_spl.c)
def check_knots_warning(self, fitter, knots, k, w, **kwargs):
"""Check that the knots warning is raised"""
spl = Spline1D(knots=knots, degree=k)
with pytest.warns(AstropyUserWarning):
fitter(spl, self.x, self.y, weights=w, **kwargs)
@pytest.mark.parametrize("w", wieght_tests)
@pytest.mark.parametrize("k", degree_tests)
def test_interpolate_fitter(self, w, k):
fitter = SplineInterpolateFitter()
assert fitter.fit_info == {"resid": None, "spline": None}
spl = Spline1D(degree=k)
self.check_base_spline(spl, None, None, k)
fit_spl = fitter(spl, self.x, self.y, weights=w)
self.check_base_spline(spl, None, None, k)
assert len(fit_spl.t) == (len(self.x) + k + 1) == len(fit_spl._knot_names)
self.check_knots_created(fit_spl, k)
self.check_coeffs_created(fit_spl)
assert fit_spl._bounding_box is None
from scipy.interpolate import InterpolatedUnivariateSpline, UnivariateSpline
spline = InterpolatedUnivariateSpline(self.x, self.y, w=w, k=k)
assert isinstance(fitter.fit_info["spline"], UnivariateSpline)
assert spline.get_residual() == 0
self.check_spline_fit(fit_spl, spline, fitter, 0, 1)
self.check_bbox(spl, fit_spl, fitter, w)
knots = np.linspace(self.x[0], self.x[-1], len(self.x) + k + 1)
self.check_knots_warning(fitter, knots, k, w)
@pytest.mark.parametrize("w", wieght_tests)
@pytest.mark.parametrize("k", degree_tests)
@pytest.mark.parametrize("s", smoothing_tests)
def test_smoothing_fitter(self, w, k, s):
fitter = SplineSmoothingFitter()
assert fitter.fit_info == {"resid": None, "spline": None}
spl = Spline1D(degree=k)
self.check_base_spline(spl, None, None, k)
fit_spl = fitter(spl, self.x, self.y, s=s, weights=w)
self.check_base_spline(spl, None, None, k)
self.check_knots_created(fit_spl, k)
self.check_coeffs_created(fit_spl)
assert fit_spl._bounding_box is None
from scipy.interpolate import UnivariateSpline
spline = UnivariateSpline(self.x, self.y, w=w, k=k, s=s)
assert isinstance(fitter.fit_info["spline"], UnivariateSpline)
self.check_spline_fit(fit_spl, spline, fitter, 1, 1)
self.check_bbox(spl, fit_spl, fitter, w, s=s)
# test warning
knots = fit_spl.t.copy()
self.check_knots_warning(fitter, knots, k, w, s=s)
@pytest.mark.parametrize("w", wieght_tests)
@pytest.mark.parametrize("k", degree_tests)
def test_exact_knots_fitter(self, w, k):
fitter = SplineExactKnotsFitter()
assert fitter.fit_info == {"resid": None, "spline": None}
knots = [-1, 0, 1]
t = np.concatenate(([self.x[0]] * (k + 1), knots, [self.x[-1]] * (k + 1)))
c = np.zeros(len(t))
# With knots preset
spl = Spline1D(knots=knots, degree=k, bounds=[self.x[0], self.x[-1]])
self.check_base_spline(spl, t, c, k)
assert (spl.t_interior == knots).all()
fit_spl = fitter(spl, self.x, self.y, weights=w)
self.check_base_spline(spl, t, c, k)
assert (spl.t_interior == knots).all()
assert len(fit_spl.t) == len(t) == len(fit_spl._knot_names)
self.check_knots_created(fit_spl, k)
self.check_coeffs_created(fit_spl)
assert fit_spl._bounding_box is None
from scipy.interpolate import LSQUnivariateSpline, UnivariateSpline
spline = LSQUnivariateSpline(self.x, self.y, knots, w=w, k=k)
assert isinstance(fitter.fit_info["spline"], UnivariateSpline)
assert_allclose(spline.get_residual(), 0.1, atol=1)
assert_allclose(fitter.fit_info["spline"].get_residual(), 0.1, atol=1)
self.check_spline_fit(fit_spl, spline, fitter, 1, 1)
self.check_bbox(spl, fit_spl, fitter, w)
# Pass knots via fitter function
with pytest.warns(AstropyUserWarning):
fitter(spl, self.x, self.y, t=knots, weights=w)
# pass no knots
spl = Spline1D(degree=k)
MESSAGE = r"No knots have been provided"
with pytest.raises(RuntimeError, match=MESSAGE):
fitter(spl, self.x, self.y, weights=w)
@pytest.mark.parametrize("w", wieght_tests)
@pytest.mark.parametrize("k", degree_tests)
@pytest.mark.parametrize("s", smoothing_tests)
def test_splrep_fitter_no_knots(self, w, k, s):
fitter = SplineSplrepFitter()
assert fitter.fit_info == {"fp": None, "ier": None, "msg": None}
spl = Spline1D(degree=k)
self.check_base_spline(spl, None, None, k)
fit_spl = fitter(spl, self.x, self.y, s=s, weights=w)
self.check_base_spline(spl, None, None, k)
self.check_knots_created(fit_spl, k)
self.check_coeffs_created(fit_spl)
assert fit_spl._bounding_box is None
from scipy.interpolate import BSpline, splrep
tck, spline_fp, spline_ier, spline_msg = splrep(
self.x, self.y, w=w, k=k, s=s, full_output=1
)
assert_allclose(fit_spl.t, tck[0])
assert_allclose(fit_spl.c, tck[1])
assert fitter.fit_info["fp"] == spline_fp
assert fitter.fit_info["ier"] == spline_ier
assert fitter.fit_info["msg"] == spline_msg
spline = BSpline(*tck)
assert_allclose(fit_spl(self.x), spline(self.x))
assert_allclose(fit_spl(self.x), self.y, atol=1)
assert_allclose(fit_spl(self.x), self.truth, atol=1)
self.check_bbox(spl, fit_spl, fitter, w, s=s)
@pytest.mark.parametrize("w", wieght_tests)
@pytest.mark.parametrize("k", degree_tests)
def test_splrep_fitter_with_knots(self, w, k):
fitter = SplineSplrepFitter()
assert fitter.fit_info == {"fp": None, "ier": None, "msg": None}
knots = [-1, 0, 1]
t = np.concatenate(([self.x[0]] * (k + 1), knots, [self.x[-1]] * (k + 1)))
c = np.zeros(len(t))
# With knots preset
spl = Spline1D(knots=knots, degree=k, bounds=[self.x[0], self.x[-1]])
self.check_base_spline(spl, t, c, k)
assert (spl.t_interior == knots).all()
fit_spl = fitter(spl, self.x, self.y, weights=w)
self.check_base_spline(spl, t, c, k)
assert (spl.t_interior == knots).all()
self.check_knots_created(fit_spl, k)
self.check_coeffs_created(fit_spl)
assert fit_spl._bounding_box is None
from scipy.interpolate import BSpline, splrep
tck, spline_fp, spline_ier, spline_msg = splrep(
self.x, self.y, w=w, k=k, t=knots, full_output=1
)
assert_allclose(fit_spl.t, tck[0])
assert_allclose(fit_spl.c, tck[1])
assert fitter.fit_info["fp"] == spline_fp
assert fitter.fit_info["ier"] == spline_ier
assert fitter.fit_info["msg"] == spline_msg
spline = BSpline(*tck)
assert_allclose(fit_spl(self.x), spline(self.x))
assert_allclose(fit_spl(self.x), self.y, atol=1)
assert_allclose(fit_spl(self.x), self.truth, atol=1)
self.check_bbox(spl, fit_spl, fitter, w)
# test warning
with pytest.warns(AstropyUserWarning):
fitter(spl, self.x, self.y, t=knots, weights=w)
# With no knots present
spl = Spline1D(degree=k)
self.check_base_spline(spl, None, None, k)
fit_spl = fitter(spl, self.x, self.y, t=knots, weights=w)
self.check_base_spline(spl, None, None, k)
self.check_knots_created(fit_spl, k)
self.check_coeffs_created(fit_spl)
assert fit_spl._bounding_box is None
from scipy.interpolate import BSpline, splrep
tck = splrep(self.x, self.y, w=w, k=k, t=knots)
assert_allclose(fit_spl.t, tck[0])
assert_allclose(fit_spl.c, tck[1])
spline = BSpline(*tck)
assert_allclose(fit_spl(self.x), spline(self.x))
assert_allclose(fit_spl(self.x), self.y, atol=1)
assert_allclose(fit_spl(self.x), self.truth, atol=1)
self.check_bbox(spl, fit_spl, fitter, w, t=knots)
def generate_spline(self, w=None, bbox=[None] * 2, k=None, s=None, t=None):
if k is None:
k = 3
from scipy.interpolate import BSpline, splrep
tck = splrep(self.x, self.y, w=w, xb=bbox[0], xe=bbox[1], k=k, s=s, t=t)
return BSpline(*tck)
def test_derivative(self):
bspline = self.generate_spline()
spl = Spline1D()
spl.bspline = bspline
assert_allclose(spl.t, bspline.t)
assert_allclose(spl.c, bspline.c)
assert spl.degree == bspline.k
# 1st derivative
d_bspline = bspline.derivative(nu=1)
assert_allclose(d_bspline(self.xs), bspline(self.xs, nu=1))
assert_allclose(d_bspline(self.xs, nu=1), bspline(self.xs, nu=2))
assert_allclose(d_bspline(self.xs, nu=2), bspline(self.xs, nu=3))
assert_allclose(d_bspline(self.xs, nu=3), bspline(self.xs, nu=4))
der = spl.derivative()
assert_allclose(der.t, d_bspline.t)
assert_allclose(der.c, d_bspline.c)
assert der.degree == d_bspline.k == 2
assert_allclose(der.evaluate(self.xs), spl.evaluate(self.xs, nu=1))
assert_allclose(der.evaluate(self.xs, nu=1), spl.evaluate(self.xs, nu=2))
assert_allclose(der.evaluate(self.xs, nu=2), spl.evaluate(self.xs, nu=3))
assert_allclose(der.evaluate(self.xs, nu=3), spl.evaluate(self.xs, nu=4))
# 2nd derivative
d_bspline = bspline.derivative(nu=2)
assert_allclose(d_bspline(self.xs), bspline(self.xs, nu=2))
assert_allclose(d_bspline(self.xs, nu=1), bspline(self.xs, nu=3))
assert_allclose(d_bspline(self.xs, nu=2), bspline(self.xs, nu=4))
der = spl.derivative(nu=2)
assert_allclose(der.t, d_bspline.t)
assert_allclose(der.c, d_bspline.c)
assert der.degree == d_bspline.k == 1
assert_allclose(der.evaluate(self.xs), spl.evaluate(self.xs, nu=2))
assert_allclose(der.evaluate(self.xs, nu=1), spl.evaluate(self.xs, nu=3))
assert_allclose(der.evaluate(self.xs, nu=2), spl.evaluate(self.xs, nu=4))
# 3rd derivative
d_bspline = bspline.derivative(nu=3)
assert_allclose(d_bspline(self.xs), bspline(self.xs, nu=3))
assert_allclose(d_bspline(self.xs, nu=1), bspline(self.xs, nu=4))
der = spl.derivative(nu=3)
assert_allclose(der.t, d_bspline.t)
assert_allclose(der.c, d_bspline.c)
assert der.degree == d_bspline.k == 0
assert_allclose(der.evaluate(self.xs), spl.evaluate(self.xs, nu=3))
assert_allclose(der.evaluate(self.xs, nu=1), spl.evaluate(self.xs, nu=4))
# Too many derivatives
MESSAGE = r"Must have nu <= 3"
for nu in range(4, 9):
with pytest.raises(ValueError, match=MESSAGE):
spl.derivative(nu=nu)
def test_antiderivative(self):
bspline = self.generate_spline()
spl = Spline1D()
spl.bspline = bspline
# 1st antiderivative
a_bspline = bspline.antiderivative(nu=1)
assert_allclose(bspline(self.xs), a_bspline(self.xs, nu=1))
assert_allclose(bspline(self.xs, nu=1), a_bspline(self.xs, nu=2))
assert_allclose(bspline(self.xs, nu=2), a_bspline(self.xs, nu=3))
assert_allclose(bspline(self.xs, nu=3), a_bspline(self.xs, nu=4))
assert_allclose(bspline(self.xs, nu=4), a_bspline(self.xs, nu=5))
anti = spl.antiderivative()
assert_allclose(anti.t, a_bspline.t)
assert_allclose(anti.c, a_bspline.c)
assert anti.degree == a_bspline.k == 4
assert_allclose(spl.evaluate(self.xs), anti.evaluate(self.xs, nu=1))
assert_allclose(spl.evaluate(self.xs, nu=1), anti.evaluate(self.xs, nu=2))
assert_allclose(spl.evaluate(self.xs, nu=2), anti.evaluate(self.xs, nu=3))
assert_allclose(spl.evaluate(self.xs, nu=3), anti.evaluate(self.xs, nu=4))
assert_allclose(spl.evaluate(self.xs, nu=4), anti.evaluate(self.xs, nu=5))
# 2nd antiderivative
a_bspline = bspline.antiderivative(nu=2)
assert_allclose(bspline(self.xs), a_bspline(self.xs, nu=2))
assert_allclose(bspline(self.xs, nu=1), a_bspline(self.xs, nu=3))
assert_allclose(bspline(self.xs, nu=2), a_bspline(self.xs, nu=4))
assert_allclose(bspline(self.xs, nu=3), a_bspline(self.xs, nu=5))
assert_allclose(bspline(self.xs, nu=4), a_bspline(self.xs, nu=6))
anti = spl.antiderivative(nu=2)
assert_allclose(anti.t, a_bspline.t)
assert_allclose(anti.c, a_bspline.c)
assert anti.degree == a_bspline.k == 5
assert_allclose(spl.evaluate(self.xs), anti.evaluate(self.xs, nu=2))
assert_allclose(spl.evaluate(self.xs, nu=1), anti.evaluate(self.xs, nu=3))
assert_allclose(spl.evaluate(self.xs, nu=2), anti.evaluate(self.xs, nu=4))
assert_allclose(spl.evaluate(self.xs, nu=3), anti.evaluate(self.xs, nu=5))
assert_allclose(spl.evaluate(self.xs, nu=4), anti.evaluate(self.xs, nu=6))
# Too many anti derivatives
for nu in range(3, 9):
MESSAGE = (
"Supported splines can have max degree 5, antiderivative degree will"
f" be {nu + 3}"
)
with pytest.raises(ValueError, match=MESSAGE):
spl.antiderivative(nu=nu)
def test__SplineFitter_error(self):
spl = Spline1D()
class SplineFitter(_SplineFitter):
def _fit_method(self, model, x, y, **kwargs):
super()._fit_method(model, x, y, **kwargs)
fitter = SplineFitter()
MESSAGE = r"1D model can only have 2 data points"
with pytest.raises(ValueError, match=MESSAGE):
fitter(spl, mk.MagicMock(), mk.MagicMock(), mk.MagicMock())
MESSAGE = r"Only spline models are compatible with this fitter"
with pytest.raises(ModelDefinitionError, match=MESSAGE):
fitter(mk.MagicMock(), mk.MagicMock(), mk.MagicMock())
MESSAGE = r"This has not been implemented for _SplineFitter"
with pytest.raises(NotImplementedError, match=MESSAGE):
fitter(spl, mk.MagicMock(), mk.MagicMock())
| TestSpline1D |
python | realpython__materials | arcade-platformer/arcade_platformer/10_view_conversion.py | {
"start": 839,
"end": 13432
} | class ____(arcade.View):
def __init__(self) -> None:
super().__init__()
# These lists will hold different sets of sprites
self.coins = None
self.background = None
self.walls = None
self.ladders = None
self.goals = None
self.enemies = None
# One sprite for the player, no more is needed
self.player = None
# We need a physics engine as well
self.physics_engine = None
# Someplace to keep score
self.score = 0
# Which level are we on?
self.level = 1
# Load up our sounds here
self.coin_sound = arcade.load_sound(
str(ASSETS_PATH / "sounds" / "coin.wav")
)
self.jump_sound = arcade.load_sound(
str(ASSETS_PATH / "sounds" / "jump.wav")
)
self.victory_sound = arcade.load_sound(
str(ASSETS_PATH / "sounds" / "victory.wav")
)
# Check if a joystick is connected
joysticks = arcade.get_joysticks()
if joysticks:
# If so, get the first one
self.joystick = joysticks[0]
self.joystick.open()
else:
# If not, flag it so we won't use it
print("There are no Joysticks")
self.joystick = None
def setup(self) -> None:
"""Sets up the game for the current level"""
# Get the current map based on the level
map_name = f"platform_level_{self.level:02}.tmx"
map_path = ASSETS_PATH / map_name
# What are the names of the layers?
wall_layer = "ground"
coin_layer = "coins"
goal_layer = "goal"
background_layer = "background"
ladders_layer = "ladders"
# Load the current map
map = arcade.tilemap.read_tmx(str(map_path))
# Load the layers
self.background = arcade.tilemap.process_layer(
map, layer_name=background_layer, scaling=MAP_SCALING
)
self.goals = arcade.tilemap.process_layer(
map, layer_name=goal_layer, scaling=MAP_SCALING
)
self.walls = arcade.tilemap.process_layer(
map, layer_name=wall_layer, scaling=MAP_SCALING
)
self.ladders = arcade.tilemap.process_layer(
map, layer_name=ladders_layer, scaling=MAP_SCALING
)
self.coins = arcade.tilemap.process_layer(
map, layer_name=coin_layer, scaling=MAP_SCALING
)
# Set the background color
background_color = arcade.color.FRESH_AIR
if map.background_color:
background_color = map.background_color
arcade.set_background_color(background_color)
# Find the edge of the map to control viewport scrolling
self.map_width = (map.map_size.width - 1) * map.tile_size.width
# Create the player sprite, if they're not already setup
if not self.player:
self.player = self.create_player_sprite()
# Move the player sprite back to the beginning
self.player.center_x = PLAYER_START_X
self.player.center_y = PLAYER_START_Y
self.player.change_x = 0
self.player.change_y = 0
# Reset the viewport
self.view_left = 0
self.view_bottom = 0
# Load the physics engine for this map
self.physics_engine = arcade.PhysicsEnginePlatformer(
player_sprite=self.player,
platforms=self.walls,
gravity_constant=GRAVITY,
ladders=self.ladders,
)
def create_player_sprite(self) -> arcade.AnimatedWalkingSprite:
# Where are the player images stored?
texture_path = ASSETS_PATH / "images" / "player"
# Setup the appropriate textures
walking_paths = [
texture_path / f"alienGreen_walk{x}.png" for x in (1, 2)
]
climbing_paths = [
texture_path / f"alienGreen_climb{x}.png" for x in (1, 2)
]
standing_path = texture_path / "alienGreen_stand.png"
# Load them all now
walking_right_textures = [
arcade.load_texture(texture) for texture in walking_paths
]
walking_left_textures = [
arcade.load_texture(texture, mirrored=True)
for texture in walking_paths
]
walking_up_textures = [
arcade.load_texture(texture) for texture in climbing_paths
]
walking_down_textures = [
arcade.load_texture(texture) for texture in climbing_paths
]
standing_right_textures = [arcade.load_texture(standing_path)]
standing_left_textures = [
arcade.load_texture(standing_path, mirrored=True)
]
# Create the sprite
player = arcade.AnimatedWalkingSprite()
# Add the proper textures
player.stand_left_textures = standing_left_textures
player.stand_right_textures = standing_right_textures
player.walk_left_textures = walking_left_textures
player.walk_right_textures = walking_right_textures
player.walk_up_textures = walking_up_textures
player.walk_down_textures = walking_down_textures
# Set the player defaults
player.center_x = PLAYER_START_X
player.center_y = PLAYER_START_Y
player.state = arcade.FACE_RIGHT
# Set the initial texture
player.texture = player.stand_right_textures[0]
return player
def on_key_press(self, key: int, modifiers: int) -> None:
"""Arguments:
key -- Which key was pressed
modifiers -- Which modifiers were down at the time
"""
# Check for player left/right movement
if key in [arcade.key.LEFT, arcade.key.J]:
self.player.change_x = -PLAYER_MOVE_SPEED
elif key in [arcade.key.RIGHT, arcade.key.L]:
self.player.change_x = PLAYER_MOVE_SPEED
# Check if player can climb up or down
elif key in [arcade.key.UP, arcade.key.I]:
if self.physics_engine.is_on_ladder():
self.player.change_y = PLAYER_MOVE_SPEED
elif key in [arcade.key.DOWN, arcade.key.K]:
if self.physics_engine.is_on_ladder():
self.player.change_y = -PLAYER_MOVE_SPEED
# Check if we can jump
elif key == arcade.key.SPACE:
if self.physics_engine.can_jump():
self.player.change_y = PLAYER_JUMP_SPEED
# Play the jump sound
arcade.play_sound(self.jump_sound)
def on_key_release(self, key: int, modifiers: int) -> None:
"""Arguments:
key -- The key which was released
modifiers -- Which modifiers were down at the time
"""
# Check for player left/right movement
if key in [
arcade.key.LEFT,
arcade.key.J,
arcade.key.RIGHT,
arcade.key.L,
]:
self.player.change_x = 0
# Check if player can climb up or down
elif key in [
arcade.key.UP,
arcade.key.I,
arcade.key.DOWN,
arcade.key.K,
]:
if self.physics_engine.is_on_ladder():
self.player.change_y = 0
def on_update(self, delta_time: float) -> None:
"""Updates the position of all game objects
Arguments:
delta_time {float} -- How much time since the last call
"""
# First, check for joystick movement
if self.joystick:
# Check if we're in the dead zone
if abs(self.joystick.x) > DEAD_ZONE:
self.player.change_x = self.joystick.x * PLAYER_MOVE_SPEED
else:
self.player.change_x = 0
if abs(self.joystick.y) > DEAD_ZONE:
if self.physics_engine.is_on_ladder():
self.player.change_y = self.joystick.y * PLAYER_MOVE_SPEED
else:
self.player.change_y = 0
# Did the user press the jump button?
if self.joystick.buttons[0]:
if self.physics_engine.can_jump():
self.player.change_y = PLAYER_JUMP_SPEED
# Play the jump sound
arcade.play_sound(self.jump_sound)
# Update the player animation
self.player.update_animation(delta_time)
# Update player movement based on the physics engine
self.physics_engine.update()
# Restrict user movement so they can't walk off screen
if self.player.left < 0:
self.player.left = 0
# Check if we've picked up a coin
coins_hit = arcade.check_for_collision_with_list(
sprite=self.player, sprite_list=self.coins
)
for coin in coins_hit:
# Add the coin score to our score
self.score += int(coin.properties["point_value"])
# Play the coin sound
arcade.play_sound(self.coin_sound)
# Remove the coin
coin.remove_from_sprite_lists()
# Now check if we're at the ending goal
goals_hit = arcade.check_for_collision_with_list(
sprite=self.player, sprite_list=self.goals
)
if goals_hit:
# Play the victory sound
self.victory_sound.play()
# Setup the next level
self.level += 1
self.setup()
# Set the viewport, scrolling if necessary
self.scroll_viewport()
def scroll_viewport(self) -> None:
"""Scrolls the viewport when the player gets close to the edges"""
# Scroll left
# Find the current left boundary
left_boundary = self.view_left + LEFT_VIEWPORT_MARGIN
# Are we to the left of this boundary? Then we should scroll left
if self.player.left < left_boundary:
self.view_left -= left_boundary - self.player.left
# But don't scroll past the left edge of the map
if self.view_left < 0:
self.view_left = 0
# Scroll right
# Find the current right boundary
right_boundary = self.view_left + SCREEN_WIDTH - RIGHT_VIEWPORT_MARGIN
# Are we right of this boundary? Then we should scroll right
if self.player.right > right_boundary:
self.view_left += self.player.right - right_boundary
# Don't scroll past the right edge of the map
if self.view_left > self.map_width - SCREEN_WIDTH:
self.view_left = self.map_width - SCREEN_WIDTH
# Scroll up
top_boundary = self.view_bottom + SCREEN_HEIGHT - TOP_VIEWPORT_MARGIN
if self.player.top > top_boundary:
self.view_bottom += self.player.top - top_boundary
# Scroll down
bottom_boundary = self.view_bottom + BOTTOM_VIEWPORT_MARGIN
if self.player.bottom < bottom_boundary:
self.view_bottom -= bottom_boundary - self.player.bottom
# Only scroll to integers. Otherwise we end up with pixels that
# don't line up on the screen
self.view_bottom = int(self.view_bottom)
self.view_left = int(self.view_left)
# Do the scrolling
arcade.set_viewport(
left=self.view_left,
right=SCREEN_WIDTH + self.view_left,
bottom=self.view_bottom,
top=SCREEN_HEIGHT + self.view_bottom,
)
def on_draw(self) -> None:
arcade.start_render()
# Draw all the sprites
self.background.draw()
self.walls.draw()
self.coins.draw()
self.goals.draw()
self.ladders.draw()
self.player.draw()
# Draw the score in the lower left
score_text = f"Score: {self.score}"
# First a black background for a shadow effect
arcade.draw_text(
score_text,
start_x=10 + self.view_left,
start_y=10 + self.view_bottom,
color=arcade.csscolor.BLACK,
font_size=40,
)
# Now in white slightly shifted
arcade.draw_text(
score_text,
start_x=15 + self.view_left,
start_y=15 + self.view_bottom,
color=arcade.csscolor.WHITE,
font_size=40,
)
if __name__ == "__main__":
window = arcade.Window(
width=SCREEN_WIDTH, height=SCREEN_HEIGHT, title=SCREEN_TITLE
)
platform_view = PlatformerView()
platform_view.setup()
window.show_view(platform_view)
arcade.run()
| PlatformerView |
python | getsentry__sentry | src/sentry/monitors/validators.py | {
"start": 27419,
"end": 28420
} | class ____(serializers.ListField):
"""
Custom ListField that properly binds the Monitor instance to child validators.
When updating a detector, we need to ensure the MonitorDataSourceValidator
knows about the existing Monitor so slug validation works correctly.
"""
def to_internal_value(self, data):
# If we're updating (parent has instance), bind the Monitor instance to child validator
if hasattr(self.parent, "instance") and self.parent.instance:
detector = self.parent.instance
monitor = get_cron_monitor(detector)
# Bind the monitor instance so slug validation recognizes this as an update
# Type ignore: self.child is typed as Field but is actually MonitorDataSourceValidator
self.child.instance = monitor # type: ignore[attr-defined]
self.child.partial = self.parent.partial # type: ignore[attr-defined]
return super().to_internal_value(data)
| MonitorDataSourceListField |
python | h5py__h5py | h5py/tests/test_h5.py | {
"start": 396,
"end": 1216
} | class ____(TestCase):
def test_config(self):
cfg = h5.get_config()
self.assertIsInstance(cfg, h5.H5PYConfig)
cfg2 = h5.get_config()
self.assertIs(cfg, cfg2)
def test_cnames_get(self):
cfg = h5.get_config()
self.assertEqual(cfg.complex_names, ('r','i'))
def test_cnames_set(self):
self.addCleanup(fixnames)
cfg = h5.get_config()
cfg.complex_names = ('q','x')
self.assertEqual(cfg.complex_names, ('q','x'))
def test_cnames_set_exc(self):
self.addCleanup(fixnames)
cfg = h5.get_config()
with self.assertRaises(TypeError):
cfg.complex_names = ('q','i','v')
self.assertEqual(cfg.complex_names, ('r','i'))
def test_repr(self):
cfg = h5.get_config()
repr(cfg)
| TestH5 |
python | keras-team__keras | keras/src/metrics/hinge_metrics_test.py | {
"start": 1348,
"end": 2760
} | class ____(testing.TestCase):
def test_config(self):
sq_hinge_obj = hinge_metrics.SquaredHinge(
name="squared_hinge", dtype="int32"
)
self.assertEqual(sq_hinge_obj.name, "squared_hinge")
self.assertEqual(sq_hinge_obj._dtype, "int32")
# Check save and restore config
sq_hinge_obj2 = hinge_metrics.SquaredHinge.from_config(
sq_hinge_obj.get_config()
)
self.assertEqual(sq_hinge_obj2.name, "squared_hinge")
self.assertEqual(len(sq_hinge_obj2.variables), 2)
self.assertEqual(sq_hinge_obj2._dtype, "int32")
def test_unweighted(self):
sq_hinge_obj = hinge_metrics.SquaredHinge()
y_true = np.array([[0, 1, 0, 1], [0, 0, 1, 1]], dtype="float32")
y_pred = np.array([[-0.3, 0.2, -0.1, 1.6], [-0.25, -1.0, 0.5, 0.6]])
sq_hinge_obj.update_state(y_true, y_pred)
result = sq_hinge_obj.result()
self.assertAllClose(0.364, result, atol=1e-3)
def test_weighted(self):
sq_hinge_obj = hinge_metrics.SquaredHinge()
y_true = np.array([[-1, 1, -1, 1], [-1, -1, 1, 1]], dtype="float32")
y_pred = np.array([[-0.3, 0.2, -0.1, 1.6], [-0.25, -1.0, 0.5, 0.6]])
sample_weight = np.array([1.5, 2.0])
result = sq_hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(0.347, result, atol=1e-3)
| SquaredHingeTest |
python | getsentry__sentry | tests/sentry/integrations/slack/webhooks/events/test_message_im.py | {
"start": 1342,
"end": 6176
} | class ____(BaseEventTest, IntegratedApiTestCase):
def get_block_section_text(self, data):
blocks = data["blocks"]
return blocks[0]["text"]["text"], blocks[1]["text"]["text"]
@pytest.fixture(autouse=True)
def mock_chat_postMessage(self):
with patch(
"slack_sdk.web.client.WebClient.chat_postMessage",
return_value=SlackResponse(
client=None,
http_verb="POST",
api_url="https://slack.com/api/chat.postMessage",
req_args={},
data={"ok": True},
headers={},
status_code=200,
),
) as self.mock_post:
yield
def test_identifying_channel_correctly(self) -> None:
event_data = orjson.loads(MESSAGE_IM_EVENT)
self.post_webhook(event_data=event_data)
data = self.mock_post.call_args[1]
assert data.get("channel") == event_data["channel"]
def test_user_message_im_notification_platform(self) -> None:
resp = self.post_webhook(event_data=orjson.loads(MESSAGE_IM_EVENT))
assert resp.status_code == 200, resp.content
data = self.mock_post.call_args[1]
heading, contents = self.get_block_section_text(data)
assert heading == "Unknown command: `helloo`"
assert (
contents
== "Here are the commands you can use. Commands not working? Re-install the app!"
)
@patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
def test_user_message_link(self, mock_record: MagicMock) -> None:
"""
Test that when a user types in "link" to the DM we reply with the correct response.
"""
with assume_test_silo_mode(SiloMode.CONTROL):
self.create_identity_provider(type="slack", external_id="TXXXXXXX1")
resp = self.post_webhook(event_data=orjson.loads(MESSAGE_IM_EVENT_LINK))
assert resp.status_code == 200, resp.content
data = self.mock_post.call_args[1]
assert "Link your Slack identity" in get_response_text(data)
assert_slo_metric(mock_record, EventLifecycleOutcome.SUCCESS)
def test_user_message_already_linked_sdk(self) -> None:
"""
Test that when a user who has already linked their identity types in
"link" to the DM we reply with the correct response.
"""
with assume_test_silo_mode(SiloMode.CONTROL):
idp = self.create_identity_provider(type="slack", external_id="TXXXXXXX1")
Identity.objects.create(
external_id="UXXXXXXX1",
idp=idp,
user=self.user,
status=IdentityStatus.VALID,
scopes=[],
)
resp = self.post_webhook(event_data=orjson.loads(MESSAGE_IM_EVENT_LINK))
assert resp.status_code == 200, resp.content
data = self.mock_post.call_args[1]
assert "You are already linked" in get_response_text(data)
def test_user_message_unlink(self) -> None:
"""
Test that when a user types in "unlink" to the DM we reply with the correct response.
"""
with assume_test_silo_mode(SiloMode.CONTROL):
idp = self.create_identity_provider(type="slack", external_id="TXXXXXXX1")
Identity.objects.create(
external_id="UXXXXXXX1",
idp=idp,
user=self.user,
status=IdentityStatus.VALID,
scopes=[],
)
resp = self.post_webhook(event_data=orjson.loads(MESSAGE_IM_EVENT_UNLINK))
assert resp.status_code == 200, resp.content
data = self.mock_post.call_args[1]
assert "Click here to unlink your identity" in get_response_text(data)
def test_user_message_already_unlinked(self) -> None:
"""
Test that when a user without an Identity types in "unlink" to the DM we
reply with the correct response.
"""
with assume_test_silo_mode(SiloMode.CONTROL):
self.create_identity_provider(type="slack", external_id="TXXXXXXX1")
resp = self.post_webhook(event_data=orjson.loads(MESSAGE_IM_EVENT_UNLINK))
assert resp.status_code == 200, resp.content
data = self.mock_post.call_args[1]
assert "You do not have a linked identity to unlink" in get_response_text(data)
def test_bot_message_im(self) -> None:
resp = self.post_webhook(event_data=orjson.loads(MESSAGE_IM_BOT_EVENT))
assert resp.status_code == 200, resp.content
def test_user_message_im_no_text(self) -> None:
resp = self.post_webhook(event_data=orjson.loads(MESSAGE_IM_EVENT_NO_TEXT))
assert resp.status_code == 200, resp.content
assert not self.mock_post.called
| MessageIMEventTest |
python | dagster-io__dagster | python_modules/dagster/dagster/_utils/test/__init__.py | {
"start": 11184,
"end": 14082
} | class ____(SqliteEventLogStorage, ConfigurableClass):
"""Sqlite is sorta supported for concurrency, as long as the rate of concurrent writes is tolerably
low. Officially, we should not support, but in the spirit of getting code coverage in the core
dagster package, let's mark it as that.
"""
__test__ = False
def __init__(
self,
base_dir: str,
sleep_interval: Optional[float] = None,
inst_data: Optional[ConfigurableClassData] = None,
):
self._sleep_interval = sleep_interval
self._check_calls = defaultdict(int)
self._records_for_run_calls = defaultdict(int)
super().__init__(base_dir, inst_data)
@classmethod
def config_type(cls) -> UserConfigSchema:
return {"base_dir": StringSource, "sleep_interval": Field(float, is_required=False)}
@classmethod
def from_config_value( # pyright: ignore[reportIncompatibleMethodOverride]
cls, inst_data: Optional[ConfigurableClassData], config_value: TestStorageConfig
) -> "ConcurrencyEnabledSqliteTestEventLogStorage":
return ConcurrencyEnabledSqliteTestEventLogStorage(inst_data=inst_data, **config_value)
@property
def supports_global_concurrency_limits(self) -> bool: # pyright: ignore[reportIncompatibleVariableOverride]
return True
def get_records_for_run(
self,
run_id: str,
cursor: Optional[str] = None,
of_type: Optional[Union[DagsterEventType, set[DagsterEventType]]] = None,
limit: Optional[int] = None,
ascending: bool = True,
) -> EventLogConnection:
self._records_for_run_calls[run_id] = self._records_for_run_calls[run_id] + 1
return super().get_records_for_run(run_id, cursor, of_type, limit, ascending)
def get_check_calls(self, step_key: str) -> int:
return self._check_calls[step_key]
def get_records_for_run_calls(self, run_id: str) -> int:
return self._records_for_run_calls[run_id]
def check_concurrency_claim(
self, concurrency_key: str, run_id: str, step_key: str
) -> ConcurrencyClaimStatus:
self._check_calls[step_key] += 1
claim_status = super().check_concurrency_claim(concurrency_key, run_id, step_key)
if not self._sleep_interval:
return claim_status
return claim_status.with_sleep_interval(float(self._sleep_interval))
def get_all_direct_subclasses_of_marker(marker_interface_cls: type) -> list[type]:
import dagster as dagster
return [
symbol
for symbol in dagster.__dict__.values()
if isinstance(symbol, type)
and issubclass(symbol, marker_interface_cls)
and marker_interface_cls
in symbol.__bases__ # ensure that the class is a direct subclass of marker_interface_cls (not a subclass of a subclass)
]
| ConcurrencyEnabledSqliteTestEventLogStorage |
python | scikit-learn__scikit-learn | sklearn/exceptions.py | {
"start": 4084,
"end": 4385
} | class ____(UserWarning):
"""Warning class used to notify the user of a test that was skipped.
For example, one of the estimator checks requires a pandas import.
If the pandas package cannot be imported, the test will be skipped rather
than register as a failure.
"""
| SkipTestWarning |
python | getsentry__sentry-python | sentry_sdk/integrations/arq.py | {
"start": 1167,
"end": 7881
} | class ____(Integration):
identifier = "arq"
origin = f"auto.queue.{identifier}"
@staticmethod
def setup_once():
# type: () -> None
try:
if isinstance(ARQ_VERSION, str):
version = parse_version(ARQ_VERSION)
else:
version = ARQ_VERSION.version[:2]
except (TypeError, ValueError):
version = None
_check_minimum_version(ArqIntegration, version)
patch_enqueue_job()
patch_run_job()
patch_create_worker()
ignore_logger("arq.worker")
def patch_enqueue_job():
# type: () -> None
old_enqueue_job = ArqRedis.enqueue_job
original_kwdefaults = old_enqueue_job.__kwdefaults__
async def _sentry_enqueue_job(self, function, *args, **kwargs):
# type: (ArqRedis, str, *Any, **Any) -> Optional[Job]
integration = sentry_sdk.get_client().get_integration(ArqIntegration)
if integration is None:
return await old_enqueue_job(self, function, *args, **kwargs)
with sentry_sdk.start_span(
op=OP.QUEUE_SUBMIT_ARQ, name=function, origin=ArqIntegration.origin
):
return await old_enqueue_job(self, function, *args, **kwargs)
_sentry_enqueue_job.__kwdefaults__ = original_kwdefaults
ArqRedis.enqueue_job = _sentry_enqueue_job
def patch_run_job():
# type: () -> None
old_run_job = Worker.run_job
async def _sentry_run_job(self, job_id, score):
# type: (Worker, str, int) -> None
integration = sentry_sdk.get_client().get_integration(ArqIntegration)
if integration is None:
return await old_run_job(self, job_id, score)
with sentry_sdk.isolation_scope() as scope:
scope._name = "arq"
scope.clear_breadcrumbs()
transaction = Transaction(
name="unknown arq task",
status="ok",
op=OP.QUEUE_TASK_ARQ,
source=TransactionSource.TASK,
origin=ArqIntegration.origin,
)
with sentry_sdk.start_transaction(transaction):
return await old_run_job(self, job_id, score)
Worker.run_job = _sentry_run_job
def _capture_exception(exc_info):
# type: (ExcInfo) -> None
scope = sentry_sdk.get_current_scope()
if scope.transaction is not None:
if exc_info[0] in ARQ_CONTROL_FLOW_EXCEPTIONS:
scope.transaction.set_status(SPANSTATUS.ABORTED)
return
scope.transaction.set_status(SPANSTATUS.INTERNAL_ERROR)
event, hint = event_from_exception(
exc_info,
client_options=sentry_sdk.get_client().options,
mechanism={"type": ArqIntegration.identifier, "handled": False},
)
sentry_sdk.capture_event(event, hint=hint)
def _make_event_processor(ctx, *args, **kwargs):
# type: (Dict[Any, Any], *Any, **Any) -> EventProcessor
def event_processor(event, hint):
# type: (Event, Hint) -> Optional[Event]
with capture_internal_exceptions():
scope = sentry_sdk.get_current_scope()
if scope.transaction is not None:
scope.transaction.name = ctx["job_name"]
event["transaction"] = ctx["job_name"]
tags = event.setdefault("tags", {})
tags["arq_task_id"] = ctx["job_id"]
tags["arq_task_retry"] = ctx["job_try"] > 1
extra = event.setdefault("extra", {})
extra["arq-job"] = {
"task": ctx["job_name"],
"args": (
args if should_send_default_pii() else SENSITIVE_DATA_SUBSTITUTE
),
"kwargs": (
kwargs if should_send_default_pii() else SENSITIVE_DATA_SUBSTITUTE
),
"retry": ctx["job_try"],
}
return event
return event_processor
def _wrap_coroutine(name, coroutine):
# type: (str, WorkerCoroutine) -> WorkerCoroutine
async def _sentry_coroutine(ctx, *args, **kwargs):
# type: (Dict[Any, Any], *Any, **Any) -> Any
integration = sentry_sdk.get_client().get_integration(ArqIntegration)
if integration is None:
return await coroutine(ctx, *args, **kwargs)
sentry_sdk.get_isolation_scope().add_event_processor(
_make_event_processor({**ctx, "job_name": name}, *args, **kwargs)
)
try:
result = await coroutine(ctx, *args, **kwargs)
except Exception:
exc_info = sys.exc_info()
_capture_exception(exc_info)
reraise(*exc_info)
return result
return _sentry_coroutine
def patch_create_worker():
# type: () -> None
old_create_worker = arq.worker.create_worker
@ensure_integration_enabled(ArqIntegration, old_create_worker)
def _sentry_create_worker(*args, **kwargs):
# type: (*Any, **Any) -> Worker
settings_cls = args[0]
if isinstance(settings_cls, dict):
if "functions" in settings_cls:
settings_cls["functions"] = [
_get_arq_function(func)
for func in settings_cls.get("functions", [])
]
if "cron_jobs" in settings_cls:
settings_cls["cron_jobs"] = [
_get_arq_cron_job(cron_job)
for cron_job in settings_cls.get("cron_jobs", [])
]
if hasattr(settings_cls, "functions"):
settings_cls.functions = [
_get_arq_function(func) for func in settings_cls.functions
]
if hasattr(settings_cls, "cron_jobs"):
settings_cls.cron_jobs = [
_get_arq_cron_job(cron_job)
for cron_job in (settings_cls.cron_jobs or [])
]
if "functions" in kwargs:
kwargs["functions"] = [
_get_arq_function(func) for func in kwargs.get("functions", [])
]
if "cron_jobs" in kwargs:
kwargs["cron_jobs"] = [
_get_arq_cron_job(cron_job) for cron_job in kwargs.get("cron_jobs", [])
]
return old_create_worker(*args, **kwargs)
arq.worker.create_worker = _sentry_create_worker
def _get_arq_function(func):
# type: (Union[str, Function, WorkerCoroutine]) -> Function
arq_func = arq.worker.func(func)
arq_func.coroutine = _wrap_coroutine(arq_func.name, arq_func.coroutine)
return arq_func
def _get_arq_cron_job(cron_job):
# type: (CronJob) -> CronJob
cron_job.coroutine = _wrap_coroutine(cron_job.name, cron_job.coroutine)
return cron_job
| ArqIntegration |
python | getsentry__sentry | src/sentry/search/events/datasets/spans_indexed.py | {
"start": 1034,
"end": 22504
} | class ____(DatasetConfig):
optimize_wildcard_searches = True
subscriptables_with_index = {"tags"}
non_nullable_keys = {"id", "span_id", "trace", "trace_id"}
def __init__(self, builder: BaseQueryBuilder):
self.builder = builder
self.total_count: int | None = None
self.total_sum_transaction_duration: float | None = None
@property
def search_filter_converter(
self,
) -> dict[str, Callable[[SearchFilter], WhereType | None]]:
return {
"message": self._message_filter_converter,
constants.PROJECT_ALIAS: self._project_slug_filter_converter,
constants.PROJECT_NAME_ALIAS: self._project_slug_filter_converter,
constants.DEVICE_CLASS_ALIAS: self._device_class_filter_converter,
constants.SPAN_IS_SEGMENT_ALIAS: filter_aliases.span_is_segment_converter,
constants.SPAN_OP: lambda search_filter: filter_aliases.lowercase_search(
self.builder, search_filter
),
constants.SPAN_MODULE_ALIAS: lambda search_filter: filter_aliases.span_module_filter_converter(
self.builder, search_filter
),
constants.SPAN_STATUS: lambda search_filter: filter_aliases.span_status_filter_converter(
self.builder, search_filter
),
}
@property
def field_alias_converter(self) -> Mapping[str, Callable[[str], SelectType]]:
return {
constants.PROJECT_ALIAS: self._resolve_project_slug_alias,
constants.PROJECT_NAME_ALIAS: self._resolve_project_slug_alias,
constants.SPAN_MODULE_ALIAS: self._resolve_span_module,
constants.DEVICE_CLASS_ALIAS: lambda alias: field_aliases.resolve_device_class(
self.builder, alias
),
"span.duration": self._resolve_span_duration,
constants.PRECISE_FINISH_TS: lambda alias: field_aliases.resolve_precise_timestamp(
Column("end_timestamp"), Column("end_ms"), alias
),
constants.PRECISE_START_TS: lambda alias: field_aliases.resolve_precise_timestamp(
Column("start_timestamp"), Column("start_ms"), alias
),
constants.USER_DISPLAY_ALIAS: lambda alias: field_aliases.resolve_user_display_alias(
self.builder, alias
),
constants.REPLAY_ALIAS: lambda alias: field_aliases.resolve_replay_alias(
self.builder, alias
),
}
@property
def function_converter(self) -> dict[str, SnQLFunction]:
function_converter = {
function.name: function
for function in [
SnQLFunction(
"count",
optional_args=[NullColumn("column")],
snql_aggregate=lambda _, alias: Function(
"count",
[],
alias,
),
default_result_type="integer",
),
SnQLFunction(
"count_unique",
required_args=[ColumnTagArg("column")],
snql_aggregate=lambda args, alias: Function("uniq", [args["column"]], alias),
default_result_type="integer",
),
SnQLFunction(
"sum",
required_args=[NumericColumn("column", spans=True)],
snql_aggregate=lambda args, alias: Function("sum", [args["column"]], alias),
result_type_fn=self.reflective_result_type(),
default_result_type="duration",
),
SnQLFunction(
"avg",
optional_args=[
with_default("span.duration", NumericColumn("column", spans=True)),
],
snql_aggregate=lambda args, alias: Function("avg", [args["column"]], alias),
result_type_fn=self.reflective_result_type(),
default_result_type="duration",
redundant_grouping=True,
),
SnQLFunction(
"percentile",
required_args=[
NumericColumn("column", spans=True),
NumberRange("percentile", 0, 1),
],
snql_aggregate=self._resolve_percentile,
result_type_fn=self.reflective_result_type(),
default_result_type="duration",
redundant_grouping=True,
),
SnQLFunction( # deprecated in favour of `example()`
"bounded_sample",
required_args=[
NumericColumn("column", spans=True),
NumberRange("min", None, None),
],
optional_args=[with_default(None, NullableNumberRange("max", None, None))],
snql_aggregate=self._resolve_bounded_sample,
default_result_type="string",
),
SnQLFunction( # deprecated in favour of `rounded_timestamp(...)`
"rounded_time",
optional_args=[with_default(3, NumberRange("intervals", None, None))],
snql_column=self._resolve_rounded_time,
default_result_type="integer",
),
SnQLFunction(
"p50",
optional_args=[
with_default("span.duration", NumericColumn("column", spans=True)),
],
snql_aggregate=lambda args, alias: self._resolve_percentile(args, alias, 0.5),
result_type_fn=self.reflective_result_type(),
default_result_type="duration",
redundant_grouping=True,
),
SnQLFunction(
"p75",
optional_args=[
with_default("span.duration", NumericColumn("column", spans=True)),
],
snql_aggregate=lambda args, alias: self._resolve_percentile(args, alias, 0.75),
result_type_fn=self.reflective_result_type(),
default_result_type="duration",
redundant_grouping=True,
),
SnQLFunction(
"p90",
optional_args=[
with_default("span.duration", NumericColumn("column", spans=True)),
],
snql_aggregate=lambda args, alias: self._resolve_percentile(args, alias, 0.90),
result_type_fn=self.reflective_result_type(),
default_result_type="duration",
redundant_grouping=True,
),
SnQLFunction(
"p95",
optional_args=[
with_default("span.duration", NumericColumn("column", spans=True)),
],
snql_aggregate=lambda args, alias: self._resolve_percentile(args, alias, 0.95),
result_type_fn=self.reflective_result_type(),
default_result_type="duration",
redundant_grouping=True,
),
SnQLFunction(
"p99",
optional_args=[
with_default("span.duration", NumericColumn("column", spans=True)),
],
snql_aggregate=lambda args, alias: self._resolve_percentile(args, alias, 0.99),
result_type_fn=self.reflective_result_type(),
default_result_type="duration",
redundant_grouping=True,
),
SnQLFunction(
"p100",
optional_args=[
with_default("span.duration", NumericColumn("column", spans=True)),
],
snql_aggregate=lambda args, alias: self._resolve_percentile(args, alias, 1),
result_type_fn=self.reflective_result_type(),
default_result_type="duration",
redundant_grouping=True,
),
SnQLFunction(
"eps",
snql_aggregate=lambda args, alias: function_aliases.resolve_eps(
args, alias, self.builder
),
optional_args=[IntervalDefault("interval", 1, None)],
default_result_type="rate",
),
SnQLFunction(
"epm",
snql_aggregate=lambda args, alias: function_aliases.resolve_epm(
args, alias, self.builder
),
optional_args=[IntervalDefault("interval", 1, None)],
default_result_type="rate",
),
SnQLFunction(
"any",
required_args=[SnQLFieldColumn("column")],
# Not actually using `any` so that this function returns consistent results
snql_aggregate=lambda args, alias: Function("min", [args["column"]], alias),
result_type_fn=self.reflective_result_type(),
redundant_grouping=True,
),
SnQLFunction(
"examples",
required_args=[NumericColumn("column", spans=True)],
optional_args=[with_default(1, NumberRange("count", 1, None))],
snql_aggregate=self._resolve_random_samples,
private=True,
),
SnQLFunction(
"rounded_timestamp",
required_args=[IntervalDefault("interval", 1, None)],
snql_column=lambda args, alias: function_aliases.resolve_rounded_timestamp(
args["interval"], alias
),
private=True,
),
SnQLFunction(
"min",
required_args=[NumericColumn("column", spans=True)],
snql_aggregate=lambda args, alias: Function("min", [args["column"]], alias),
result_type_fn=self.reflective_result_type(),
default_result_type="duration",
redundant_grouping=True,
),
SnQLFunction(
"max",
required_args=[NumericColumn("column", spans=True)],
snql_aggregate=lambda args, alias: Function("max", [args["column"]], alias),
result_type_fn=self.reflective_result_type(),
default_result_type="duration",
redundant_grouping=True,
),
SnQLFunction(
"trace_name",
snql_aggregate=lambda args, alias: Function(
"anyIf",
[
Column("segment_name"),
Function(
"or",
[
Function("isNull", [Column("parent_span_id")]),
Function("equals", [Column("parent_span_id"), "00"]),
],
),
],
alias,
),
default_result_type="string",
private=True,
),
SnQLFunction(
"first_seen",
snql_aggregate=lambda args, alias: Function(
"plus",
[
Function(
"multiply",
[
Function(
"toUInt64",
[
self._resolve_partial_timestamp_column(
"min",
"start_timestamp",
"start_ms",
1,
),
],
),
1000,
],
),
self._resolve_partial_timestamp_column(
"min",
"start_timestamp",
"start_ms",
2,
),
],
alias,
),
default_result_type="duration",
private=True,
),
SnQLFunction(
"last_seen",
snql_aggregate=lambda args, alias: Function(
"plus",
[
Function(
"multiply",
[
Function(
"toUInt64",
[
self._resolve_partial_timestamp_column(
"max",
"end_timestamp",
"end_ms",
1,
),
],
),
1000,
],
),
self._resolve_partial_timestamp_column(
"max",
"end_timestamp",
"end_ms",
2,
),
],
alias,
),
default_result_type="duration",
private=True,
),
SnQLFunction(
"array_join",
required_args=[ColumnArg("column", allowed_columns=["tags.key"])],
snql_column=lambda args, alias: Function("arrayJoin", [args["column"]], alias),
default_result_type="string",
private=True,
),
]
}
for alias, name in constants.SPAN_FUNCTION_ALIASES.items():
if name in function_converter:
function_converter[alias] = function_converter[name].alias_as(alias)
return function_converter
@property
def orderby_converter(self) -> Mapping[str, Callable[[Direction], OrderBy]]:
return {}
def _message_filter_converter(self, search_filter: SearchFilter) -> WhereType | None:
return filter_aliases.message_filter_converter(self.builder, search_filter)
def _project_slug_filter_converter(self, search_filter: SearchFilter) -> WhereType | None:
return filter_aliases.project_slug_converter(self.builder, search_filter)
def _device_class_filter_converter(self, search_filter: SearchFilter) -> SelectType:
return filter_aliases.device_class_converter(
self.builder, search_filter, {**DEVICE_CLASS, "Unknown": {""}}
)
def _resolve_project_slug_alias(self, alias: str) -> SelectType:
return field_aliases.resolve_project_slug_alias(self.builder, alias)
def _resolve_span_module(self, alias: str) -> SelectType:
return field_aliases.resolve_span_module(self.builder, alias)
def _resolve_span_duration(self, alias: str) -> SelectType:
# In ClickHouse, duration is an UInt32 whereas self time is a Float64.
# This creates a situation where a sub-millisecond duration is truncated
# to but the self time is not.
#
# To remedy this, we take the greater of the duration and self time as
# this is the only situation where the self time can be greater than
# the duration.
#
# Also avoids strange situations on the frontend where duration is less
# than the self time.
duration = Column("duration")
self_time = self.builder.column("span.self_time")
return Function(
"if",
[
Function("greater", [self_time, duration]),
self_time,
duration,
],
alias,
)
def _resolve_bounded_sample(
self,
args: Mapping[str, str | SelectType | int | float],
alias: str,
) -> SelectType:
base_condition = Function(
"and",
[
Function("greaterOrEquals", [args["column"], args["min"]]),
Function(
"greater",
[
Function(
"position",
[
Function("toString", [Column("span_id")]),
Function(
"substring",
[Function("toString", [Function("rand", [])]), 1, 2],
),
],
),
0,
],
),
],
)
if args["max"] is not None:
condition = Function(
"and", [base_condition, Function("less", [args["column"], args["max"]])]
)
else:
condition = base_condition
return Function("minIf", [self.builder.column("id"), condition], alias)
def _resolve_rounded_time(
self,
args: Mapping[str, str | SelectType | int | float],
alias: str,
) -> SelectType:
start, end = self.builder.start, self.builder.end
intervals = args["intervals"]
if start is None or end is None:
raise InvalidSearchQuery("Need start and end to use rounded_time column")
if not isinstance(intervals, (int, float)):
raise InvalidSearchQuery("intervals must be a number")
return Function(
"floor",
[
Function(
"divide",
[
Function("minus", [end, self.builder.column("timestamp")]),
((end - start) / intervals).total_seconds(),
],
)
],
alias,
)
def _resolve_percentile(
self,
args: Mapping[str, str | SelectType | int | float],
alias: str,
fixed_percentile: float | None = None,
) -> SelectType:
return (
Function(
"max",
[args["column"]],
alias,
)
if fixed_percentile == 1
else Function(
f'quantile({fixed_percentile if fixed_percentile is not None else args["percentile"]})',
[args["column"]],
alias,
)
)
def _resolve_random_samples(
self,
args: Mapping[str, str | SelectType | int | float],
alias: str,
) -> SelectType:
offset = 0 if self.builder.offset is None else self.builder.offset.offset
limit = 0 if self.builder.limit is None else self.builder.limit.limit
return function_aliases.resolve_random_samples(
[
# DO NOT change the order of these columns as it
# changes the order of the tuple in the response
# which WILL cause errors where it assumes this
# order
self.builder.resolve_column("span.group"),
self.builder.resolve_column("timestamp"),
self.builder.resolve_column("id"),
args["column"],
],
alias,
offset,
limit,
size=int(args["count"]),
)
def _resolve_partial_timestamp_column(
self, aggregate: str, timestamp_column: str, ms_column: str, index: int
) -> SelectType:
return Function(
"tupleElement",
[
Function(
aggregate,
[
Function(
"tuple",
[Column(timestamp_column), Column(ms_column)],
),
],
),
index,
],
)
| SpansIndexedDatasetConfig |
python | pytorch__pytorch | test/dynamo/test_frame_init.py | {
"start": 2057,
"end": 4568
} | class ____(torch._dynamo.test_case.TestCase):
def test_frame_init(self):
code_map1 = {
target_with_varargs.__code__: varargs_code1.__code__,
target_with_varkwargs.__code__: varkwargs_code1.__code__,
}
code_map2 = {
target_with_varargs.__code__: varargs_code2.__code__,
target_with_varkwargs.__code__: varkwargs_code2.__code__,
}
empty_guard_manager = torch._dynamo.guards.GuardManagerWrapper()
def callback1(frame, cache_entry, frame_state):
if frame.f_code in code_map1:
transformed_code = code_map1[frame.f_code]
return wrap_guarded_code(
GuardedCode(
transformed_code,
empty_guard_manager,
CompileId(
frame_id=None, frame_compile_id=0, compiled_autograd_id=0
),
)
)
return ConvertFrameReturn()
def callback2(frame, cache_entry, frame_state):
if frame.f_code in code_map2:
transformed_code = code_map2[frame.f_code]
return wrap_guarded_code(
GuardedCode(
transformed_code,
empty_guard_manager,
CompileId(
frame_id=None, frame_compile_id=0, compiled_autograd_id=0
),
)
)
return ConvertFrameReturn()
for _ in [callback1, callback2]:
torch._dynamo.reset()
expected_varargs_output = target_with_varargs(
1, 2, 3, 4, name1=1, name2=2, name3=3
)
expected_kwargs_output = target_with_varkwargs(
1, 2, keyword_only_arg=1, name2=2, name3=3
)
original = set_eval_frame(callback1)
real_varargs_output = target_with_varargs(
1, 2, 3, 4, name1=1, name2=2, name3=3
)
real_kwargs_output = target_with_varkwargs(
1, 2, keyword_only_arg=1, name2=2, name3=3
)
self.assertEqual(real_varargs_output, expected_varargs_output)
self.assertEqual(real_kwargs_output, expected_kwargs_output)
set_eval_frame(original)
if __name__ == "__main__":
from torch._dynamo.test_case import run_tests
run_tests()
| FrameInitTests |
python | gevent__gevent | src/greentest/3.12/test_socket.py | {
"start": 94825,
"end": 96277
} | class ____(unittest.TestCase):
def testBluetoothConstants(self):
socket.BDADDR_ANY
socket.BDADDR_LOCAL
socket.AF_BLUETOOTH
socket.BTPROTO_RFCOMM
if sys.platform != "win32":
socket.BTPROTO_HCI
socket.SOL_HCI
socket.BTPROTO_L2CAP
if not sys.platform.startswith("freebsd"):
socket.BTPROTO_SCO
def testCreateRfcommSocket(self):
with socket.socket(socket.AF_BLUETOOTH, socket.SOCK_STREAM, socket.BTPROTO_RFCOMM) as s:
pass
@unittest.skipIf(sys.platform == "win32", "windows does not support L2CAP sockets")
def testCreateL2capSocket(self):
with socket.socket(socket.AF_BLUETOOTH, socket.SOCK_SEQPACKET, socket.BTPROTO_L2CAP) as s:
pass
@unittest.skipIf(sys.platform == "win32", "windows does not support HCI sockets")
def testCreateHciSocket(self):
with socket.socket(socket.AF_BLUETOOTH, socket.SOCK_RAW, socket.BTPROTO_HCI) as s:
pass
@unittest.skipIf(sys.platform == "win32" or sys.platform.startswith("freebsd"),
"windows and freebsd do not support SCO sockets")
def testCreateScoSocket(self):
with socket.socket(socket.AF_BLUETOOTH, socket.SOCK_SEQPACKET, socket.BTPROTO_SCO) as s:
pass
@unittest.skipUnless(HAVE_SOCKET_HYPERV,
'Hyper-V sockets required for this test.')
| BasicBluetoothTest |
python | fluentpython__example-code-2e | 24-class-metaprog/autoconst/autoconst.py | {
"start": 410,
"end": 508
} | class ____(type):
def __prepare__(name, bases, **kwargs):
return WilyDict()
| AutoConstMeta |
python | sympy__sympy | sympy/core/tests/test_expr.py | {
"start": 7405,
"end": 7526
} | class ____(Basic, NonBasic):
'''Like NonBasic above except this is a subclass of Basic but not Expr'''
pass
| NonExpr |
python | pandas-dev__pandas | pandas/tests/io/formats/test_to_string.py | {
"start": 11154,
"end": 16610
} | class ____:
def test_to_string_float_format_no_fixed_width(self):
# GH#21625
df = DataFrame({"x": [0.19999]})
expected = " x\n0 0.200"
assert df.to_string(float_format="%.3f") == expected
# GH#22270
df = DataFrame({"x": [100.0]})
expected = " x\n0 100"
assert df.to_string(float_format="%.0f") == expected
def test_to_string_small_float_values(self):
df = DataFrame({"a": [1.5, 1e-17, -5.5e-7]})
result = df.to_string()
# sadness per above
if _three_digit_exp():
expected = (
" a\n0 1.500000e+000\n1 1.000000e-017\n2 -5.500000e-007"
)
else:
expected = (
" a\n0 1.500000e+00\n1 1.000000e-17\n2 -5.500000e-07"
)
assert result == expected
# but not all exactly zero
df = df * 0
result = df.to_string()
expected = " 0\n0 0\n1 0\n2 -0"
# TODO: assert that these match??
def test_to_string_complex_float_formatting(self):
# GH #25514, 25745
with option_context("display.precision", 5):
df = DataFrame(
{
"x": [
(0.4467846931321966 + 0.0715185102060818j),
(0.2739442392974528 + 0.23515228785438969j),
(0.26974928742135185 + 0.3250604054898979j),
(-1j),
]
}
)
result = df.to_string()
expected = (
" x\n0 0.44678+0.07152j\n"
"1 0.27394+0.23515j\n"
"2 0.26975+0.32506j\n"
"3 -0.00000-1.00000j"
)
assert result == expected
def test_to_string_complex_float_formatting_with_exponents(self):
# GH #60393
with option_context("display.precision", 6):
df = DataFrame(
{
"x": [
(1.8816e-09 + 0j),
(1.8816e-09 + 3.39676e-09j),
]
}
)
result = df.to_string()
expected = (
" x\n0 1.881600e-09+0.000000e+00j\n"
"1 1.881600e-09+3.396760e-09j"
)
assert result == expected
def test_to_string_format_inf(self):
# GH#24861
df = DataFrame(
{
"A": [-np.inf, np.inf, -1, -2.1234, 3, 4],
"B": [-np.inf, np.inf, "foo", "foooo", "fooooo", "bar"],
}
)
result = df.to_string()
expected = (
" A B\n"
"0 -inf -inf\n"
"1 inf inf\n"
"2 -1.0000 foo\n"
"3 -2.1234 foooo\n"
"4 3.0000 fooooo\n"
"5 4.0000 bar"
)
assert result == expected
df = DataFrame(
{
"A": [-np.inf, np.inf, -1.0, -2.0, 3.0, 4.0],
"B": [-np.inf, np.inf, "foo", "foooo", "fooooo", "bar"],
}
)
result = df.to_string()
expected = (
" A B\n"
"0 -inf -inf\n"
"1 inf inf\n"
"2 -1.0 foo\n"
"3 -2.0 foooo\n"
"4 3.0 fooooo\n"
"5 4.0 bar"
)
assert result == expected
def test_to_string_int_formatting(self):
df = DataFrame({"x": [-15, 20, 25, -35]})
assert issubclass(df["x"].dtype.type, np.integer)
output = df.to_string()
expected = " x\n0 -15\n1 20\n2 25\n3 -35"
assert output == expected
def test_to_string_float_formatting(self):
with option_context(
"display.precision",
5,
"display.notebook_repr_html",
False,
):
df = DataFrame(
{"x": [0, 0.25, 3456.000, 12e45, 1.64e6, 1.7e8, 1.253456, np.pi, -1e6]}
)
df_s = df.to_string()
if _three_digit_exp():
expected = (
" x\n0 0.00000e+000\n1 2.50000e-001\n"
"2 3.45600e+003\n3 1.20000e+046\n4 1.64000e+006\n"
"5 1.70000e+008\n6 1.25346e+000\n7 3.14159e+000\n"
"8 -1.00000e+006"
)
else:
expected = (
" x\n0 0.00000e+00\n1 2.50000e-01\n"
"2 3.45600e+03\n3 1.20000e+46\n4 1.64000e+06\n"
"5 1.70000e+08\n6 1.25346e+00\n7 3.14159e+00\n"
"8 -1.00000e+06"
)
assert df_s == expected
df = DataFrame({"x": [3234, 0.253]})
df_s = df.to_string()
expected = " x\n0 3234.000\n1 0.253"
assert df_s == expected
assert get_option("display.precision") == 6
df = DataFrame({"x": [1e9, 0.2512]})
df_s = df.to_string()
if _three_digit_exp():
expected = " x\n0 1.000000e+009\n1 2.512000e-001"
else:
expected = " x\n0 1.000000e+09\n1 2.512000e-01"
assert df_s == expected
| TestToStringNumericFormatting |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-iterable/source_iterable/streams.py | {
"start": 16366,
"end": 16465
} | class ____(IterableExportStreamAdjustableRange):
data_field = "emailUnsubscribe"
| EmailUnsubscribe |
python | tensorflow__tensorflow | tensorflow/lite/python/lite_flex_test.py | {
"start": 13808,
"end": 19015
} | class ____(test_util.TensorFlowTestCase, parameterized.TestCase):
@parameterized.named_parameters(('DefaultMode', 'DEFAULT'),
('LegacyIntegerMode', 'LEGACY_INTEGER'))
def testAddOp(self, tf_quantization_mode):
root = autotrackable.AutoTrackable()
root.add_func = def_function.function(lambda x: x + x)
input_data = tf.reshape(tf.range(4, dtype=tf.float32), [1, 4])
concrete_func = root.add_func.get_concrete_function(input_data)
# Convert model and check if the op is not flex.
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func],
root)
converter._experimental_tf_quantization_mode = tf_quantization_mode
tflite_model = converter.convert()
self.assertTrue(tflite_model)
if tf_quantization_mode == 'LEGACY_INTEGER':
self.assertIn('ADD', tflite_test_util.get_ops_list(tflite_model))
else:
self.assertIn('FlexAddV2', tflite_test_util.get_ops_list(tflite_model))
# Check the model works.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
test_input = np.array([[1.0, 2.0, 3.0, 4.0]], dtype=np.float32)
interpreter.set_tensor(input_details[0]['index'], test_input)
interpreter.invoke()
output_details = interpreter.get_output_details()
expected_output = np.array([[2.0, 4.0, 6.0, 8.0]], dtype=np.float32)
output_data = interpreter.get_tensor(output_details[0]['index'])
self.assertTrue((expected_output == output_data).all())
@parameterized.named_parameters(('DefaultMode', 'DEFAULT'),
('LegacyIntegerMode', 'LEGACY_INTEGER'))
def testL2LossOp(self, tf_quantization_mode):
root = autotrackable.AutoTrackable()
root.l2_loss_func = def_function.function(lambda x: nn_ops.l2_loss(x)) # pylint: disable=unnecessary-lambda
input_data = tf.range(4, dtype=tf.float32)
concrete_func = root.l2_loss_func.get_concrete_function(input_data)
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func],
root)
converter._experimental_tf_quantization_mode = tf_quantization_mode
tflite_model = converter.convert()
self.assertTrue(tflite_model)
self.assertIn('FlexL2Loss', tflite_test_util.get_ops_list(tflite_model))
# Check the model works.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
test_input = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32)
interpreter.set_tensor(input_details[0]['index'], test_input)
interpreter.invoke()
output_details = interpreter.get_output_details()
expected_output = np.array([15.0], dtype=np.float32)
output_data = interpreter.get_tensor(output_details[0]['index'])
self.assertTrue((expected_output == output_data).all())
@parameterized.named_parameters(('DefaultMode', 'DEFAULT'),
('LegacyIntegerMode', 'LEGACY_INTEGER'))
def testConvOpWithBias(self, tf_quantization_mode):
class ConvModel(autotrackable.AutoTrackable):
@def_function.function
def conv_func(self, in_tensor, filter_tensor):
bias = constant_op.constant(3., shape=[1])
conv_tensor = tf.nn.conv2d(
in_tensor,
filter_tensor,
strides=[1, 1, 1, 1],
dilations=[1, 1, 1, 1],
padding='VALID',
data_format='NHWC')
conv_tensor = conv_tensor + bias
return tf.nn.relu(conv_tensor)
root = ConvModel()
input_data = tf.reshape(tf.range(4, dtype=tf.float32), [1, 2, 2, 1])
filter_data = tf.reshape(tf.range(2, dtype=tf.float32), [1, 2, 1, 1])
concrete_func = root.conv_func.get_concrete_function(
input_data, filter_data)
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func],
root)
converter._experimental_tf_quantization_mode = tf_quantization_mode
tflite_model = converter.convert()
self.assertTrue(tflite_model)
self.assertCountEqual(['CONV_2D', 'RESHAPE'],
tflite_test_util.get_ops_list(tflite_model))
# Check the model works.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
test_input = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32).reshape(
(1, 2, 2, 1))
interpreter.set_tensor(input_details[0]['index'], test_input)
test_filter = np.array([1.0, 0.0], dtype=np.float32).reshape((1, 2, 1, 1))
interpreter.set_tensor(input_details[1]['index'], test_filter)
interpreter.invoke()
output_details = interpreter.get_output_details()
expected_output = np.array([[[[4.]], [[6.]]]], dtype=np.float32)
output_data = interpreter.get_tensor(output_details[0]['index'])
self.assertTrue((expected_output == output_data).all())
if __name__ == '__main__':
test.main()
| TFQuantizationTest |
python | davidhalter__jedi | test/completion/invalid.py | {
"start": 2258,
"end": 2718
} | class ____():
def foo(self):
# This construct contains two places where Jedi with Python 3 can fail.
# It should just ignore those constructs and still execute `bar`.
pass
if 2:
try:
pass
except ValueError, e:
raise TypeError, e
else:
pass
def bar(self):
self.x = 3
return ''
#? str()
BrokenPartsOfClass().bar()
| BrokenPartsOfClass |
python | huggingface__transformers | src/transformers/models/canine/modeling_canine.py | {
"start": 28495,
"end": 29076
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states: tuple[torch.FloatTensor]) -> torch.FloatTensor:
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
| CaninePooler |
python | tensorflow__tensorflow | tensorflow/python/distribute/mirrored_strategy_test.py | {
"start": 23053,
"end": 27612
} | class ____(test.TestCase):
def testExecutingEagerlyOutsideFunction(self, distribution):
"""Verify we preserve the value of executing_eagerly_outside_functions()."""
def model_fn():
return ops.executing_eagerly_outside_functions()
originally = ops.executing_eagerly_outside_functions()
with distribution.scope():
in_scope = ops.executing_eagerly_outside_functions()
in_model_fn = distribution.extended.call_for_each_replica(model_fn)
unwrapped = distribution.experimental_local_results(in_model_fn)
self.assertEqual(in_scope, unwrapped[0])
self.assertEqual(in_scope, originally)
# Verify this all again, but this time in a FuncGraph.
with func_graph.FuncGraph("fg").as_default(), distribution.scope():
in_scope = ops.executing_eagerly_outside_functions()
in_model_fn = distribution.extended.call_for_each_replica(model_fn)
unwrapped = distribution.experimental_local_results(in_model_fn)
self.assertEqual(in_scope, unwrapped[0])
self.assertEqual(in_scope, originally)
def testFunctionInCallForEachReplica(self, distribution):
traces = []
@def_function.function
def model_fn():
traces.append(1)
return distribute_lib.get_replica_context().replica_id_in_sync_group
with distribution.scope():
result = distribution.extended.call_for_each_replica(model_fn)
self.assertEqual(
(0, 1),
self.evaluate(distribution.experimental_local_results(result)))
self.assertLen(traces, distribution.num_replicas_in_sync)
def testFunctionInCallForEachReplicaInsideAnotherFunction(self, distribution):
traces = []
@def_function.function
def model_fn():
traces.append(1)
return distribute_lib.get_replica_context().replica_id_in_sync_group
@def_function.function
def step():
return distribution.extended.call_for_each_replica(model_fn)
with distribution.scope():
result = step()
self.assertEqual(
(0, 1),
self.evaluate(distribution.experimental_local_results(result)))
self.assertLen(traces, distribution.num_replicas_in_sync)
def testControlFlowFunctionInCallForEachReplicaWithMergeCall(
self, distribution):
def merge_fn(strategy, value):
return strategy.reduce(reduce_util.ReduceOp.SUM, value, axis=None)
@def_function.function
def model_fn():
def body_fn(i):
return distribute_lib.get_replica_context().merge_call(
merge_fn, args=(i,))
return while_loop.while_loop_v2(lambda i: i < 2, body_fn, [0])
with distribution.scope():
with self.assertRaisesRegex(
RuntimeError, "`merge_call` called while defining a new graph."):
distribution.extended.call_for_each_replica(model_fn)
def testNestedFunctionInCallForEachReplicaWithMergeCall(self, distribution):
def merge_fn(strategy, value):
return strategy.reduce(reduce_util.ReduceOp.SUM, value, axis=None)
def model_fn():
@def_function.function
def model_fn_nested():
t = constant_op.constant(1)
return distribute_lib.get_replica_context().merge_call(
merge_fn, args=(t,))
return model_fn_nested()
with distribution.scope():
with self.assertRaisesRegex(
RuntimeError, "`merge_call` called while defining a new graph."):
distribution.extended.call_for_each_replica(model_fn)
def testFunctionInCallForEachReplicaWithMergeCall(self, distribution):
def merge_fn(_):
pass
@def_function.function
def model_fn():
distribute_lib.get_replica_context().merge_call(merge_fn)
return 0.
with distribution.scope():
self.assertEqual(
self.evaluate(distribution.extended.call_for_each_replica(model_fn)),
0.)
def testFunctionInCallForEachReplicaCached(self, distribution):
traces = []
@def_function.function
def model_fn():
traces.append(None)
self.assertEmpty(traces)
for i in range(10):
distribution.extended.call_for_each_replica(model_fn)
if i == 0:
num_devices = len(traces)
self.assertGreater(num_devices, 0)
else:
# model_fn should not have been re-evaluated so the length should remain
# the same.
self.assertLen(traces, num_devices)
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=["graph"]))
| MirroredStrategyCallForEachReplicaTest |
python | huggingface__transformers | src/transformers/models/wav2vec2_bert/modeling_wav2vec2_bert.py | {
"start": 20994,
"end": 24429
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
if config.position_embeddings_type == "relative":
self.embed_positions = Wav2Vec2BertRelPositionalEmbedding(config)
elif config.position_embeddings_type == "rotary":
self.embed_positions = Wav2Vec2BertRotaryPositionalEmbedding(config)
else:
self.embed_positions = None
self.dropout = nn.Dropout(config.hidden_dropout)
self.layers = nn.ModuleList([Wav2Vec2BertEncoderLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(
self,
hidden_states,
attention_mask=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
conv_attention_mask = attention_mask
if attention_mask is not None:
# make sure padded tokens output 0
hidden_states = hidden_states.masked_fill(~attention_mask.bool().unsqueeze(-1), 0.0)
# extend attention_mask
attention_mask = 1.0 - attention_mask[:, None, None, :].to(dtype=hidden_states.dtype)
attention_mask = attention_mask * torch.finfo(hidden_states.dtype).min
attention_mask = attention_mask.expand(
attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1]
)
hidden_states = self.dropout(hidden_states)
if self.embed_positions is not None:
relative_position_embeddings = self.embed_positions(hidden_states)
else:
relative_position_embeddings = None
synced_gpus = is_deepspeed_zero3_enabled() or is_fsdp_managed_module(self)
for i, layer in enumerate(self.layers):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
# add LayerDrop (see https://huggingface.co/papers/1909.11556 for description)
dropout_probability = torch.rand([])
skip_the_layer = self.training and dropout_probability < self.config.layerdrop
if not skip_the_layer or synced_gpus:
# under fsdp or deepspeed zero3 all gpus must run in sync
layer_outputs = layer(
hidden_states,
attention_mask=attention_mask,
relative_position_embeddings=relative_position_embeddings,
output_attentions=output_attentions,
conv_attention_mask=conv_attention_mask,
)
hidden_states = layer_outputs[0]
if skip_the_layer:
layer_outputs = (None, None)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
| Wav2Vec2BertEncoder |
python | altair-viz__altair | altair/utils/schemapi.py | {
"start": 24904,
"end": 36861
} | class ____(jsonschema.ValidationError):
_JS_TO_PY: ClassVar[Mapping[str, str]] = {
"boolean": "bool",
"integer": "int",
"number": "float",
"string": "str",
"null": "None",
"object": "Mapping[str, Any]",
"array": "Sequence",
}
def __init__(self, obj: SchemaBase, err: jsonschema.ValidationError) -> None:
"""
A wrapper for ``jsonschema.ValidationError`` with friendlier traceback.
Parameters
----------
obj
The instance that failed ``self.validate(...)``.
err
The original ``ValidationError``.
Notes
-----
We do not raise `from err` as else the resulting traceback is very long
as it contains part of the Vega-Lite schema.
It would also first show the less helpful `ValidationError` instead of
the more user friendly `SchemaValidationError`.
"""
super().__init__(**err._contents())
self.obj = obj
self._errors: GroupedValidationErrors = getattr(
err, "_all_errors", {getattr(err, "json_path", _json_path(err)): [err]}
)
# This is the message from err
self._original_message = self.message
self.message = self._get_message()
def __str__(self) -> str:
return self.message
def _get_message(self) -> str:
def indent_second_line_onwards(message: str, indent: int = 4) -> str:
modified_lines: list[str] = []
for idx, line in enumerate(message.split("\n")):
if idx > 0 and len(line) > 0:
line = " " * indent + line
modified_lines.append(line)
return "\n".join(modified_lines)
error_messages: list[str] = []
# Only show a maximum of 3 errors as else the final message returned by this
# method could get very long.
for errors in list(self._errors.values())[:3]:
error_messages.append(self._get_message_for_errors_group(errors))
message = ""
if len(error_messages) > 1:
error_messages = [
indent_second_line_onwards(f"Error {error_id}: {m}")
for error_id, m in enumerate(error_messages, start=1)
]
message += "Multiple errors were found.\n\n"
message += "\n\n".join(error_messages)
return message
def _get_message_for_errors_group(
self,
errors: ValidationErrorList,
) -> str:
if errors[0].validator == "additionalProperties":
# During development, we only found cases where an additionalProperties
# error was raised if that was the only error for the offending instance
# as identifiable by the json path. Therefore, we just check here the first
# error. However, other constellations might exist in which case
# this should be adapted so that other error messages are shown as well.
message = self._get_additional_properties_error_message(errors[0])
else:
message = self._get_default_error_message(errors=errors)
return message.strip()
def _get_additional_properties_error_message(
self,
error: jsonschema.exceptions.ValidationError,
) -> str:
"""Output all existing parameters when an unknown parameter is specified."""
altair_cls = self._get_altair_class_for_error(error)
param_dict_keys = inspect.signature(altair_cls).parameters.keys()
param_names_table = self._format_params_as_table(param_dict_keys)
# Error messages for these errors look like this:
# "Additional properties are not allowed ('unknown' was unexpected)"
# Line below extracts "unknown" from this string
parameter_name = error.message.split("('")[-1].split("'")[0]
message = f"""\
`{altair_cls.__name__}` has no parameter named '{parameter_name}'
Existing parameter names are:
{param_names_table}
See the help for `{altair_cls.__name__}` to read the full description of these parameters"""
return message
def _get_altair_class_for_error(
self, error: jsonschema.exceptions.ValidationError
) -> type[SchemaBase]:
"""
Try to get the lowest class possible in the chart hierarchy so it can be displayed in the error message.
This should lead to more informative error messages pointing the user closer to the source of the issue.
If we did not find a suitable class based on traversing the path so we fall
back on the class of the top-level object which created the SchemaValidationError
"""
from altair import vegalite
for prop_name in reversed(error.absolute_path):
# Check if str as e.g. first item can be a 0
if isinstance(prop_name, str):
candidate = prop_name[0].upper() + prop_name[1:]
if tp := getattr(vegalite, candidate, None):
return _maybe_channel(tp, self.instance)
return type(self.obj)
@staticmethod
def _format_params_as_table(param_dict_keys: Iterable[str]) -> str:
"""Format param names into a table so that they are easier to read."""
param_names: tuple[str, ...]
name_lengths: tuple[int, ...]
param_names, name_lengths = zip(
*[
(name, len(name))
for name in param_dict_keys
if name not in {"kwds", "self"}
]
)
# Worst case scenario with the same longest param name in the same
# row for all columns
max_name_length = max(name_lengths)
max_column_width = 80
# Output a square table if not too big (since it is easier to read)
num_param_names = len(param_names)
square_columns = ceil(num_param_names**0.5)
columns = min(max_column_width // max_name_length, square_columns)
# Compute roughly equal column heights to evenly divide the param names
def split_into_equal_parts(n: int, p: int) -> list[int]:
return [n // p + 1] * (n % p) + [n // p] * (p - n % p)
column_heights = split_into_equal_parts(num_param_names, columns)
# Section the param names into columns and compute their widths
param_names_columns: list[tuple[str, ...]] = []
column_max_widths: list[int] = []
last_end_idx: int = 0
for ch in column_heights:
param_names_columns.append(param_names[last_end_idx : last_end_idx + ch])
column_max_widths.append(
max(len(param_name) for param_name in param_names_columns[-1])
)
last_end_idx = ch + last_end_idx
# Transpose the param name columns into rows to facilitate looping
param_names_rows: list[tuple[str, ...]] = []
for li in zip_longest(*param_names_columns, fillvalue=""):
param_names_rows.append(li)
# Build the table as a string by iterating over and formatting the rows
param_names_table: str = ""
for param_names_row in param_names_rows:
for num, param_name in enumerate(param_names_row):
# Set column width based on the longest param in the column
max_name_length_column = column_max_widths[num]
column_pad = 3
param_names_table += "{:<{}}".format(
param_name, max_name_length_column + column_pad
)
# Insert newlines and spacing after the last element in each row
if num == (len(param_names_row) - 1):
param_names_table += "\n"
return param_names_table
def _format_type_reprs(self, errors: Iterable[ValidationError], /) -> str:
"""
Translate jsonschema types to how they appear in annotations.
Adapts parts of:
- `tools.schemapi.utils.sort_type_reprs`_
- `tools.schemapi.utils.SchemaInfo.to_type_repr`_
.. _tools.schemapi.utils.sort_type_reprs:
https://github.com/vega/altair/blob/48e976ef9388ce08a2e871a0f67ed012b914597a/tools/schemapi/utils.py#L1106-L1146
.. _tools.schemapi.utils.SchemaInfo.to_type_repr:
https://github.com/vega/altair/blob/48e976ef9388ce08a2e871a0f67ed012b914597a/tools/schemapi/utils.py#L449-L543
"""
to_py_types = (
self._JS_TO_PY.get(val, val) for val in _validator_values(errors)
)
it = sorted(to_py_types, key=str.lower)
it = sorted(it, key=len)
it = sorted(it, key=partial(operator.eq, "None"))
return f"of type `{' | '.join(it)}`"
def _get_default_error_message(
self,
errors: ValidationErrorList,
) -> str:
bullet_points: list[str] = []
errors_by_validator = _group_errors_by_validator(errors)
if errs_enum := errors_by_validator.get("enum", None):
bullet_points.extend(
f"one of {val}" for val in _validator_values(errs_enum)
)
if errs_type := errors_by_validator.get("type", None):
bullet_points.append(self._format_type_reprs(errs_type))
# It should not matter which error is specifically used as they are all
# about the same offending instance (i.e. invalid value), so we can just
# take the first one
error = errors[0]
# Add a summary line when parameters are passed an invalid value
# For example: "'asdf' is an invalid value for `stack`
message = f"'{error.instance}' is an invalid value"
if error.absolute_path:
message += f" for `{error.absolute_path[-1]}`"
# Add bullet points
if len(bullet_points) == 0:
message += ".\n\n"
elif len(bullet_points) == 1:
message += f". Valid values are {bullet_points[0]}.\n\n"
else:
# We don't use .capitalize below to make the first letter uppercase
# as that makes the rest of the message lowercase
bullet_points = [point[0].upper() + point[1:] for point in bullet_points]
message += ". Valid values are:\n\n"
message += "\n".join([f"- {point}" for point in bullet_points])
message += "\n\n"
# Add unformatted messages of any remaining errors which were not
# considered so far. This is not expected to be used but more exists
# as a fallback for cases which were not known during development.
it = (
"\n".join(e.message for e in errors)
for validator, errors in errors_by_validator.items()
if validator not in {"enum", "type"}
)
message += "".join(it)
return message
_JSON_VT_co = TypeVar(
"_JSON_VT_co",
Literal["string"],
Literal["object"],
Literal["array"],
covariant=True,
)
"""
One of a subset of JSON Schema `primitive types`_:
["string", "object", "array"]
.. _primitive types:
https://json-schema.org/draft-07/json-schema-validation#rfc.section.6.1.1
"""
_TypeMap = TypeAliasType(
"_TypeMap", Mapping[Literal["type"], _JSON_VT_co], type_params=(_JSON_VT_co,)
)
"""
A single item JSON Schema using the `type`_ keyword.
This may represent **one of**:
{"type": "string"}
{"type": "object"}
{"type": "array"}
.. _type:
https://json-schema.org/understanding-json-schema/reference/type
"""
# NOTE: Type checkers want opposing things:
# - `mypy` : Covariant type variable "_JSON_VT_co" used in protocol where invariant one is expected [misc]
# - `pyright`: Type variable "_JSON_VT_co" used in generic protocol "SchemaLike" should be covariant [reportInvalidTypeVarUse]
# Siding with `pyright` as this is consistent with https://github.com/python/typeshed/blob/9e506eb5e8fc2823db8c60ad561b1145ff114947/stdlib/typing.pyi#L690
@runtime_checkable
| SchemaValidationError |
python | numba__numba | numba/cuda/tests/doc_examples/test_ffi.py | {
"start": 369,
"end": 2654
} | class ____(CUDATestCase):
def test_ex_linking_cu(self):
# magictoken.ex_linking_cu.begin
from numba import cuda
import numpy as np
import os
# Declaration of the foreign function
mul = cuda.declare_device('mul_f32_f32', 'float32(float32, float32)')
# Path to the source containing the foreign function
# (here assumed to be in a subdirectory called "ffi")
basedir = os.path.dirname(os.path.abspath(__file__))
functions_cu = os.path.join(basedir, 'ffi', 'functions.cu')
# Kernel that links in functions.cu and calls mul
@cuda.jit(link=[functions_cu])
def multiply_vectors(r, x, y):
i = cuda.grid(1)
if i < len(r):
r[i] = mul(x[i], y[i])
# Generate random data
N = 32
np.random.seed(1)
x = np.random.rand(N).astype(np.float32)
y = np.random.rand(N).astype(np.float32)
r = np.zeros_like(x)
# Run the kernel
multiply_vectors[1, 32](r, x, y)
# Sanity check - ensure the results match those expected
np.testing.assert_array_equal(r, x * y)
# magictoken.ex_linking_cu.end
def test_ex_from_buffer(self):
from numba import cuda
import os
basedir = os.path.dirname(os.path.abspath(__file__))
functions_cu = os.path.join(basedir, 'ffi', 'functions.cu')
# magictoken.ex_from_buffer_decl.begin
signature = 'float32(CPointer(float32), int32)'
sum_reduce = cuda.declare_device('sum_reduce', signature)
# magictoken.ex_from_buffer_decl.end
# magictoken.ex_from_buffer_kernel.begin
import cffi
ffi = cffi.FFI()
@cuda.jit(link=[functions_cu])
def reduction_caller(result, array):
array_ptr = ffi.from_buffer(array)
result[()] = sum_reduce(array_ptr, len(array))
# magictoken.ex_from_buffer_kernel.end
import numpy as np
x = np.arange(10).astype(np.float32)
r = np.ndarray((), dtype=np.float32)
reduction_caller[1, 1](r, x)
expected = np.sum(x)
actual = r[()]
np.testing.assert_allclose(expected, actual)
if __name__ == '__main__':
unittest.main()
| TestFFI |
python | patrick-kidger__equinox | equinox/nn/_embedding.py | {
"start": 523,
"end": 4012
} | class ____(Module):
"""A simple lookup table that stores embeddings of a fixed size."""
num_embeddings: int = field(static=True)
embedding_size: int = field(static=True)
weight: Array
def __init__(
self,
num_embeddings: int | None = None, # pyright: ignore
embedding_size: int | None = None, # pyright: ignore
weight: Float[Array, "num_embeddings embedding_size"] | None = None,
dtype=None,
*,
key: PRNGKeyArray | None = None,
):
"""**Arguments:**
`Embedding` should be initialised with either:
- `num_embeddings`: Size of embedding dictionary. Must be non-negative.
- `embedding_size`: Size of each embedding vector. Must be non-negative.
- `dtype`: The dtype to use for the embedding weights. Defaults to either
`jax.numpy.float32` or `jax.numpy.float64` depending on whether JAX is in
64-bit mode.
- `key`: A `jax.random.PRNGKey` used to provide randomness for initialisation
of the embedding lookup table. (Keyword only argument.)
Or:
- `weight`: The embedding lookup table, of shape
`(num_embeddings, embedding_size)`.
"""
dtype = default_floating_dtype() if dtype is None else dtype
if weight is None:
if num_embeddings is None or embedding_size is None or key is None:
raise ValueError(
"Must provide `eqx.nn.Embedding(num_embeddings=..., "
"embedding_size=..., key=...)` if not providing the weight "
"directly."
)
if num_embeddings < 0:
raise ValueError("num_embeddings must not be negative.")
if embedding_size < 0:
raise ValueError("embedding_size must not be negative.")
self.weight = jrandom.normal(
key, (num_embeddings, embedding_size), dtype=dtype
)
else:
if weight.ndim != 2:
raise ValueError(
"weight must have shape (num_embeddings, embedding_size)."
)
if num_embeddings is None:
num_embeddings: int = weight.shape[0]
if embedding_size is None:
embedding_size: int = weight.shape[1]
if weight.shape != (num_embeddings, embedding_size):
raise ValueError(
"weight must have shape (num_embeddings, embedding_size)."
)
self.weight = weight
self.num_embeddings = num_embeddings
self.embedding_size = embedding_size
@named_scope("eqx.nn.Embedding")
def __call__(
self, x: Int[ArrayLike, ""], *, key: PRNGKeyArray | None = None
) -> Array:
"""**Arguments:**
- `x`: The table index. Should be a scalar integer array.
- `key`: Ignored; provided for compatibility with the rest of the Equinox API.
(Keyword only argument.)
**Returns:**
A JAX array of shape `(embedding_size,)`, from the x-th index of the embedding
table.
"""
if is_array_like(x) and jnp.shape(x) == ():
return self.weight[x]
else:
raise ValueError(
"`eqx.nn.Embedding()(x)` should be called with a scalar index `x`. "
"Use `jax.vmap` if you would like to index with multiple values."
)
| Embedding |
python | pallets__werkzeug | src/werkzeug/datastructures/mixins.py | {
"start": 6968,
"end": 9039
} | class ____(dict[K, V]):
"""Makes dicts call `self.on_update` on modifications.
.. versionchanged:: 3.1
Implement ``|=`` operator.
.. versionadded:: 0.5
:private:
"""
on_update: cabc.Callable[[te.Self], None] | None = None
def setdefault(self: te.Self, key: K, default: V | None = None) -> V:
modified = key not in self
rv = super().setdefault(key, default) # type: ignore[arg-type]
if modified and self.on_update is not None:
self.on_update(self)
return rv
@t.overload
def pop(self: te.Self, key: K) -> V: ...
@t.overload
def pop(self: te.Self, key: K, default: V) -> V: ...
@t.overload
def pop(self: te.Self, key: K, default: T) -> T: ...
def pop(
self: te.Self,
key: K,
default: V | T = _missing, # type: ignore[assignment]
) -> V | T:
modified = key in self
if default is _missing:
rv: V | T = super().pop(key)
else:
rv = super().pop(key, default)
if modified and self.on_update is not None:
self.on_update(self)
return rv
@_always_update
def __setitem__(self, key: K, value: V) -> None:
super().__setitem__(key, value)
@_always_update
def __delitem__(self, key: K) -> None:
super().__delitem__(key)
@_always_update
def clear(self) -> None:
super().clear()
@_always_update
def popitem(self) -> tuple[K, V]:
return super().popitem()
@_always_update
def update( # type: ignore[override]
self,
arg: cabc.Mapping[K, V] | cabc.Iterable[tuple[K, V]] | None = None,
/,
**kwargs: V,
) -> None:
if arg is None:
super().update(**kwargs) # type: ignore[call-overload]
else:
super().update(arg, **kwargs)
@_always_update
def __ior__( # type: ignore[override]
self, other: cabc.Mapping[K, V] | cabc.Iterable[tuple[K, V]]
) -> te.Self:
return super().__ior__(other)
| UpdateDictMixin |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/sqltypes.py | {
"start": 122093,
"end": 122187
} | class ____(DateTime):
"""The SQL DATETIME type."""
__visit_name__ = "DATETIME"
| DATETIME |
python | neetcode-gh__leetcode | python/0459-repeated-substring-pattern.py | {
"start": 0,
"end": 115
} | class ____:
def repeatedSubstringPattern(self, s: str) -> bool:
return s in (s + s)[1:-1]
| Solution |
python | wandb__wandb | wandb/sdk/artifacts/_generated/update_team_registry_role.py | {
"start": 274,
"end": 380
} | class ____(GQLResult):
success: bool
UpdateTeamRegistryRole.model_rebuild()
| UpdateTeamRegistryRoleResult |
python | modin-project__modin | modin/config/envvars.py | {
"start": 30438,
"end": 30868
} | class ____(EnvironmentVariable, type=ExactStr):
"""Set ``LogMode`` value if users want to opt-in."""
varname = "MODIN_LOG_MODE"
choices = ("enable", "disable")
default = "disable"
@classmethod
def enable(cls) -> None:
"""Enable all logging levels."""
cls.put("enable")
@classmethod
def disable(cls) -> None:
"""Disable logging feature."""
cls.put("disable")
| LogMode |
python | keras-team__keras | keras/src/ops/numpy.py | {
"start": 223124,
"end": 224647
} | class ____(Operation):
def __init__(self, axis=None, *, name=None):
super().__init__(name=name)
self.axis = axis
def call(self, x):
return backend.numpy.squeeze(x, axis=self.axis)
def compute_output_spec(self, x):
input_shape = list(x.shape)
sparse = getattr(x, "sparse", False)
axis = to_tuple_or_list(self.axis)
if axis is None:
output_shape = list(filter((1).__ne__, input_shape))
return KerasTensor(output_shape, dtype=x.dtype, sparse=sparse)
else:
for a in axis:
if input_shape[a] != 1:
raise ValueError(
f"Cannot squeeze axis {a}, because the dimension "
"is not 1."
)
axis = [canonicalize_axis(a, len(input_shape)) for a in axis]
for a in sorted(axis, reverse=True):
del input_shape[a]
return KerasTensor(input_shape, dtype=x.dtype, sparse=sparse)
@keras_export(["keras.ops.squeeze", "keras.ops.numpy.squeeze"])
def squeeze(x, axis=None):
"""Remove axes of length one from `x`.
Args:
x: Input tensor.
axis: Select a subset of the entries of length one in the shape.
Returns:
The input tensor with all or a subset of the dimensions of
length 1 removed.
"""
if any_symbolic_tensors((x,)):
return Squeeze(axis=axis).symbolic_call(x)
return backend.numpy.squeeze(x, axis=axis)
| Squeeze |
python | Lightning-AI__lightning | src/lightning/pytorch/utilities/model_summary/model_summary_deepspeed.py | {
"start": 1168,
"end": 1847
} | class ____(LayerSummary):
@property
@override
def num_parameters(self) -> int:
"""Returns the number of parameters in this module."""
return sum(deepspeed_param_size(p) if not _tensor_has_shape(p) else 0 for p in self._module.parameters())
@property
def average_shard_parameters(self) -> int:
"""Returns the number of parameters in this module."""
def partitioned_size(p: Parameter) -> int:
return p.partitioned_size() if RequirementCache("deepspeed<0.6.6") else p.partition_numel()
return sum(partitioned_size(p) if not _tensor_has_shape(p) else 0 for p in self._module.parameters())
| DeepSpeedLayerSummary |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_type_checking/init_var.py | {
"start": 194,
"end": 356
} | class ____:
i: int
j: int = None
database: InitVar[Path] = None
err: FrozenInstanceError = None
def __post_init__(self, database):
...
| C |
python | getsentry__sentry | src/sentry/models/authidentityreplica.py | {
"start": 385,
"end": 1534
} | class ____(Model):
__relocation_scope__ = RelocationScope.Excluded
auth_identity_id = HybridCloudForeignKey(
"sentry.AuthIdentity", on_delete="CASCADE", unique=True
)
user_id = HybridCloudForeignKey("sentry.User", on_delete="CASCADE")
auth_provider_id = HybridCloudForeignKey("sentry.AuthProvider", on_delete="CASCADE")
ident = models.CharField(max_length=128)
data = models.JSONField(default=dict)
last_verified = models.DateTimeField(default=timezone.now, db_default=Now())
# This represents the time at which this model was created, NOT the date_added of the original auth identity
# we are replicating from.
date_added = models.DateTimeField(default=timezone.now)
class Meta:
app_label = "sentry"
db_table = "sentry_authidentityreplica"
unique_together = (("auth_provider_id", "ident"), ("auth_provider_id", "user_id"))
__repr__ = sane_repr("user_id", "auth_provider_id")
def __str__(self) -> str:
return self.ident
def get_audit_log_data(self) -> dict[str, Any]:
return {"user_id": self.user_id, "data": self.data}
| AuthIdentityReplica |
python | tensorflow__tensorflow | tensorflow/python/ops/linalg/linear_operator_zeros.py | {
"start": 1596,
"end": 19786
} | class ____(linear_operator.LinearOperator):
"""`LinearOperator` acting like a [batch] zero matrix.
This operator acts like a [batch] zero matrix `A` with shape
`[B1,...,Bb, N, M]` for some `b >= 0`. The first `b` indices index a
batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is
an `N x M` matrix. This matrix `A` is not materialized, but for
purposes of broadcasting this shape will be relevant.
`LinearOperatorZeros` is initialized with `num_rows`, and optionally
`num_columns, `batch_shape`, and `dtype` arguments. If `num_columns` is
`None`, then this operator will be initialized as a square matrix. If
`batch_shape` is `None`, this operator efficiently passes through all
arguments. If `batch_shape` is provided, broadcasting may occur, which will
require making copies.
```python
# Create a 2 x 2 zero matrix.
operator = LinearOperatorZero(num_rows=2, dtype=tf.float32)
operator.to_dense()
==> [[0., 0.]
[0., 0.]]
operator.shape
==> [2, 2]
operator.determinant()
==> 0.
x = ... Shape [2, 4] Tensor
operator.matmul(x)
==> Shape [2, 4] Tensor, same as x.
# Create a 2-batch of 2x2 zero matrices
operator = LinearOperatorZeros(num_rows=2, batch_shape=[2])
operator.to_dense()
==> [[[0., 0.]
[0., 0.]],
[[0., 0.]
[0., 0.]]]
# Here, even though the operator has a batch shape, the input is the same as
# the output, so x can be passed through without a copy. The operator is able
# to detect that no broadcast is necessary because both x and the operator
# have statically defined shape.
x = ... Shape [2, 2, 3]
operator.matmul(x)
==> Shape [2, 2, 3] Tensor, same as tf.zeros_like(x)
# Here the operator and x have different batch_shape, and are broadcast.
# This requires a copy, since the output is different size than the input.
x = ... Shape [1, 2, 3]
operator.matmul(x)
==> Shape [2, 2, 3] Tensor, equal to tf.zeros_like([x, x])
```
### Shape compatibility
This operator acts on [batch] matrix with compatible shape.
`x` is a batch matrix with compatible shape for `matmul` and `solve` if
```
operator.shape = [B1,...,Bb] + [N, M], with b >= 0
x.shape = [C1,...,Cc] + [M, R],
and [C1,...,Cc] broadcasts with [B1,...,Bb] to [D1,...,Dd]
```
#### Matrix property hints
This `LinearOperator` is initialized with boolean flags of the form `is_X`,
for `X = non_singular, self_adjoint, positive_definite, square`.
These have the following meaning:
* If `is_X == True`, callers should expect the operator to have the
property `X`. This is a promise that should be fulfilled, but is *not* a
runtime assert. For example, finite floating point precision may result
in these promises being violated.
* If `is_X == False`, callers should expect the operator to not have `X`.
* If `is_X == None` (the default), callers should have no expectation either
way.
"""
def __init__(self,
num_rows,
num_columns=None,
batch_shape=None,
dtype=None,
is_non_singular=False,
is_self_adjoint=True,
is_positive_definite=False,
is_square=True,
assert_proper_shapes=False,
name="LinearOperatorZeros"):
r"""Initialize a `LinearOperatorZeros`.
The `LinearOperatorZeros` is initialized with arguments defining `dtype`
and shape.
This operator is able to broadcast the leading (batch) dimensions, which
sometimes requires copying data. If `batch_shape` is `None`, the operator
can take arguments of any batch shape without copying. See examples.
Args:
num_rows: Scalar non-negative integer `Tensor`. Number of rows in the
corresponding zero matrix.
num_columns: Scalar non-negative integer `Tensor`. Number of columns in
the corresponding zero matrix. If `None`, defaults to the value of
`num_rows`.
batch_shape: Optional `1-D` integer `Tensor`. The shape of the leading
dimensions. If `None`, this operator has no leading dimensions.
dtype: Data type of the matrix that this operator represents.
is_non_singular: Expect that this operator is non-singular.
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose.
is_positive_definite: Expect that this operator is positive definite,
meaning the quadratic form `x^H A x` has positive real part for all
nonzero `x`. Note that we do not require the operator to be
self-adjoint to be positive-definite. See:
https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices
is_square: Expect that this operator acts like square [batch] matrices.
assert_proper_shapes: Python `bool`. If `False`, only perform static
checks that initialization and method arguments have proper shape.
If `True`, and static checks are inconclusive, add asserts to the graph.
name: A name for this `LinearOperator`
Raises:
ValueError: If `num_rows` is determined statically to be non-scalar, or
negative.
ValueError: If `num_columns` is determined statically to be non-scalar,
or negative.
ValueError: If `batch_shape` is determined statically to not be 1-D, or
negative.
ValueError: If any of the following is not `True`:
`{is_self_adjoint, is_non_singular, is_positive_definite}`.
"""
parameters = dict(
num_rows=num_rows,
num_columns=num_columns,
batch_shape=batch_shape,
dtype=dtype,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
assert_proper_shapes=assert_proper_shapes,
name=name
)
dtype = dtype or dtypes.float32
self._assert_proper_shapes = assert_proper_shapes
with ops.name_scope(name):
dtype = dtypes.as_dtype(dtype)
if not is_self_adjoint and is_square:
raise ValueError("A zero operator is always self adjoint.")
if is_non_singular:
raise ValueError("A zero operator is always singular.")
if is_positive_definite:
raise ValueError("A zero operator is always not positive-definite.")
super(LinearOperatorZeros, self).__init__(
dtype=dtype,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
parameters=parameters,
name=name)
linear_operator_util.assert_not_ref_type(num_rows, "num_rows")
linear_operator_util.assert_not_ref_type(num_columns, "num_columns")
linear_operator_util.assert_not_ref_type(batch_shape, "batch_shape")
self._num_rows = linear_operator_util.shape_tensor(
num_rows, name="num_rows")
self._num_rows_static = tensor_util.constant_value(self._num_rows)
if num_columns is None:
num_columns = num_rows
self._num_columns = linear_operator_util.shape_tensor(
num_columns, name="num_columns")
self._num_columns_static = tensor_util.constant_value(self._num_columns)
self._check_domain_range_possibly_add_asserts()
if (self._num_rows_static is not None and
self._num_columns_static is not None):
if is_square and self._num_rows_static != self._num_columns_static:
raise ValueError(
"LinearOperatorZeros initialized as is_square=True, but got "
"num_rows({}) != num_columns({})".format(
self._num_rows_static,
self._num_columns_static))
if batch_shape is None:
self._batch_shape_arg = None
else:
self._batch_shape_arg = linear_operator_util.shape_tensor(
batch_shape, name="batch_shape_arg")
self._batch_shape_static = tensor_util.constant_value(
self._batch_shape_arg)
self._check_batch_shape_possibly_add_asserts()
def _shape(self):
matrix_shape = tensor_shape.TensorShape((self._num_rows_static,
self._num_columns_static))
if self._batch_shape_arg is None:
return matrix_shape
batch_shape = tensor_shape.TensorShape(self._batch_shape_static)
return batch_shape.concatenate(matrix_shape)
def _shape_tensor(self):
matrix_shape = array_ops_stack.stack(
(self._num_rows, self._num_columns), axis=0)
if self._batch_shape_arg is None:
return matrix_shape
return array_ops.concat((self._batch_shape_arg, matrix_shape), 0)
def _assert_non_singular(self):
raise errors.InvalidArgumentError(
node_def=None, op=None, message="Zero operators are always "
"non-invertible.")
def _assert_positive_definite(self):
raise errors.InvalidArgumentError(
node_def=None, op=None, message="Zero operators are always "
"non-positive definite.")
def _assert_self_adjoint(self):
return control_flow_ops.no_op("assert_self_adjoint")
def _possibly_broadcast_batch_shape(self, x):
"""Return 'x', possibly after broadcasting the leading dimensions."""
# If we have no batch shape, our batch shape broadcasts with everything!
if self._batch_shape_arg is None:
return x
# Static attempt:
# If we determine that no broadcast is necessary, pass x through
# If we need a broadcast, add to an array of zeros.
#
# special_shape is the shape that, when broadcast with x's shape, will give
# the correct broadcast_shape. Note that
# We have already verified the second to last dimension of self.shape
# matches x's shape in assert_compatible_matrix_dimensions.
# Also, the final dimension of 'x' can have any shape.
# Therefore, the final two dimensions of special_shape are 1's.
special_shape = self.batch_shape.concatenate([1, 1])
bshape = array_ops.broadcast_static_shape(x.shape, special_shape)
if special_shape.is_fully_defined():
# bshape.is_fully_defined iff special_shape.is_fully_defined.
if bshape == x.shape:
return x
# Use the built in broadcasting of addition.
zeros = array_ops.zeros(shape=special_shape, dtype=self.dtype)
return x + zeros
# Dynamic broadcast:
# Always add to an array of zeros, rather than using a "cond", since a
# cond would require copying data from GPU --> CPU.
special_shape = array_ops.concat((self.batch_shape_tensor(), [1, 1]), 0)
zeros = array_ops.zeros(shape=special_shape, dtype=self.dtype)
return x + zeros
def _matmul(self, x, adjoint=False, adjoint_arg=False):
if self._assert_proper_shapes:
x = linalg.adjoint(x) if adjoint_arg else x
aps = linear_operator_util.assert_compatible_matrix_dimensions(self, x)
x = control_flow_ops.with_dependencies([aps], x)
if self.is_square:
# Note that adjoint has no effect since this matrix is self-adjoint.
if adjoint_arg:
output_shape = array_ops.concat([
array_ops.shape(x)[:-2],
[array_ops.shape(x)[-1], array_ops.shape(x)[-2]]], axis=0)
else:
output_shape = array_ops.shape(x)
return self._possibly_broadcast_batch_shape(
array_ops.zeros(shape=output_shape, dtype=x.dtype))
x_shape = array_ops.shape(x)
n = self._num_columns if adjoint else self._num_rows
m = x_shape[-2] if adjoint_arg else x_shape[-1]
output_shape = array_ops.concat([x_shape[:-2], [n, m]], axis=0)
zeros = array_ops.zeros(shape=output_shape, dtype=x.dtype)
return self._possibly_broadcast_batch_shape(zeros)
def _linop_matmul(
self,
left_operator: "LinearOperatorZeros",
right_operator: linear_operator.LinearOperator
) -> linear_operator.LinearOperator:
if not left_operator.is_square or not right_operator.is_square:
raise ValueError("Matmul with non-square `LinearOperator`s or non-square "
"`LinearOperatorZeros` not supported at this time.")
return left_operator
def _determinant(self):
if self.batch_shape.is_fully_defined():
return array_ops.zeros(shape=self.batch_shape, dtype=self.dtype)
else:
return array_ops.zeros(shape=self.batch_shape_tensor(), dtype=self.dtype)
def _trace(self):
# Get Tensor of all zeros of same shape as self.batch_shape.
if self.batch_shape.is_fully_defined():
return array_ops.zeros(shape=self.batch_shape, dtype=self.dtype)
else:
return array_ops.zeros(shape=self.batch_shape_tensor(), dtype=self.dtype)
def _diag_part(self):
return self._zeros_diag()
def add_to_tensor(self, mat, name="add_to_tensor"):
"""Add matrix represented by this operator to `mat`. Equiv to `I + mat`.
Args:
mat: `Tensor` with same `dtype` and shape broadcastable to `self`.
name: A name to give this `Op`.
Returns:
A `Tensor` with broadcast shape and same `dtype` as `self`.
"""
return self._possibly_broadcast_batch_shape(mat)
def _check_domain_range_possibly_add_asserts(self):
"""Static check of init arg `num_rows`, possibly add asserts."""
# Possibly add asserts.
if self._assert_proper_shapes:
self._num_rows = control_flow_ops.with_dependencies([
check_ops.assert_rank(
self._num_rows,
0,
message="Argument num_rows must be a 0-D Tensor."),
check_ops.assert_non_negative(
self._num_rows,
message="Argument num_rows must be non-negative."),
], self._num_rows)
self._num_columns = control_flow_ops.with_dependencies([
check_ops.assert_rank(
self._num_columns,
0,
message="Argument num_columns must be a 0-D Tensor."),
check_ops.assert_non_negative(
self._num_columns,
message="Argument num_columns must be non-negative."),
], self._num_columns)
# Static checks.
if not self._num_rows.dtype.is_integer:
raise TypeError("Argument num_rows must be integer type. Found:"
" %s" % self._num_rows)
if not self._num_columns.dtype.is_integer:
raise TypeError("Argument num_columns must be integer type. Found:"
" %s" % self._num_columns)
num_rows_static = self._num_rows_static
num_columns_static = self._num_columns_static
if num_rows_static is not None:
if num_rows_static.ndim != 0:
raise ValueError("Argument num_rows must be a 0-D Tensor. Found:"
" %s" % num_rows_static)
if num_rows_static < 0:
raise ValueError("Argument num_rows must be non-negative. Found:"
" %s" % num_rows_static)
if num_columns_static is not None:
if num_columns_static.ndim != 0:
raise ValueError("Argument num_columns must be a 0-D Tensor. Found:"
" %s" % num_columns_static)
if num_columns_static < 0:
raise ValueError("Argument num_columns must be non-negative. Found:"
" %s" % num_columns_static)
def _check_batch_shape_possibly_add_asserts(self):
"""Static check of init arg `batch_shape`, possibly add asserts."""
if self._batch_shape_arg is None:
return
# Possibly add asserts
if self._assert_proper_shapes:
self._batch_shape_arg = control_flow_ops.with_dependencies([
check_ops.assert_rank(
self._batch_shape_arg,
1,
message="Argument batch_shape must be a 1-D Tensor."),
check_ops.assert_non_negative(
self._batch_shape_arg,
message="Argument batch_shape must be non-negative."),
], self._batch_shape_arg)
# Static checks
if not self._batch_shape_arg.dtype.is_integer:
raise TypeError("Argument batch_shape must be integer type. Found:"
" %s" % self._batch_shape_arg)
if self._batch_shape_static is None:
return # Cannot do any other static checks.
if self._batch_shape_static.ndim != 1:
raise ValueError("Argument batch_shape must be a 1-D Tensor. Found:"
" %s" % self._batch_shape_static)
if np.any(self._batch_shape_static < 0):
raise ValueError("Argument batch_shape must be non-negative. Found:"
"%s" % self._batch_shape_static)
def _min_matrix_dim(self):
"""Minimum of domain/range dimension, if statically available, else None."""
domain_dim = self.domain_dimension.value
range_dim = self.range_dimension.value
if domain_dim is None or range_dim is None:
return None
return min(domain_dim, range_dim)
def _min_matrix_dim_tensor(self):
"""Minimum of domain/range dimension, as a tensor."""
return math_ops.reduce_min(self.shape_tensor()[-2:])
def _zeros_diag(self):
"""Returns the diagonal of this operator as all zeros."""
if self.shape.is_fully_defined():
d_shape = self.batch_shape.concatenate([self._min_matrix_dim()])
else:
d_shape = array_ops.concat(
[self.batch_shape_tensor(),
[self._min_matrix_dim_tensor()]], axis=0)
return array_ops.zeros(shape=d_shape, dtype=self.dtype)
def _eigvals(self):
return self._zeros_diag()
@property
def _composite_tensor_prefer_static_fields(self):
return ("num_rows", "num_columns", "batch_shape")
@property
def _composite_tensor_fields(self):
return ("num_rows", "num_columns", "batch_shape", "dtype",
"assert_proper_shapes")
def __getitem__(self, slices):
# Slice the batch shape and return a new LinearOperatorIdentity.
# Use a proxy shape and slice it. Use this as the new batch shape
new_batch_shape = array_ops.shape(
array_ops.ones(self._batch_shape_arg)[slices])
parameters = dict(self.parameters, batch_shape=new_batch_shape)
return LinearOperatorZeros(**parameters)
| LinearOperatorZeros |
python | ray-project__ray | release/benchmarks/distributed/many_nodes_tests/dashboard_test.py | {
"start": 1234,
"end": 2776
} | class ____:
def __init__(self, interval_s: int = 1):
self.dashboard_url = get_address_for_submission_client(None)
# Ping interval for all endpoints.
self.interval_s = interval_s
# endpoint -> a list of latencies
self.result = defaultdict(list)
async def run(self):
await asyncio.gather(*[self.ping(endpoint) for endpoint in endpoints])
async def ping(self, endpoint):
"""Synchronously call an endpoint."""
node_id = ray.get_runtime_context().get_node_id()
while True:
start = time.monotonic()
# for logs API, we should append node ID and glob.
if "/api/v0/logs" in endpoint:
glob_filter = "*"
options_dict = {"node_id": node_id, "glob": glob_filter}
url = (
f"{self.dashboard_url}{endpoint}?"
f"{urllib.parse.urlencode(options_dict)}"
)
else:
url = f"{self.dashboard_url}{endpoint}"
resp = requests.get(url, timeout=30)
elapsed = time.monotonic() - start
if resp.status_code == 200:
self.result[endpoint].append(time.monotonic() - start)
else:
try:
resp.raise_for_status()
except Exception as e:
logger.exception(e)
await asyncio.sleep(max(0, self.interval_s, elapsed))
def get_result(self):
return self.result
| DashboardTester |
python | ray-project__ray | ci/raydepsets/tests/test_cli.py | {
"start": 1556,
"end": 37270
} | class ____(unittest.TestCase):
def test_cli_load_fail_no_config(self):
with tempfile.TemporaryDirectory() as tmpdir:
copy_data_to_tmpdir(tmpdir)
result = _invoke_build(tmpdir, "fake_path/test.depsets.yaml")
assert result.exit_code == 1
assert isinstance(result.exception, FileNotFoundError)
assert "No such file or directory" in str(result.exception)
def test_dependency_set_manager_init(self):
with tempfile.TemporaryDirectory() as tmpdir:
copy_data_to_tmpdir(tmpdir)
manager = _create_test_manager(tmpdir)
assert manager is not None
assert manager.workspace.dir == tmpdir
assert len(manager.config.depsets) > 0
assert len(manager.build_graph.nodes) > 0
def test_uv_binary_exists(self):
assert _uv_binary() is not None
def test_uv_version(self):
result = subprocess.run(
[_uv_binary(), "--version"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
assert result.returncode == 0
assert "uv 0.8.17" in result.stdout.decode("utf-8")
assert result.stderr.decode("utf-8") == ""
def test_compile(self):
with tempfile.TemporaryDirectory() as tmpdir:
copy_data_to_tmpdir(tmpdir)
save_file_as(
Path(tmpdir) / "requirements_compiled_test.txt",
Path(tmpdir) / "requirements_compiled.txt",
)
manager = _create_test_manager(tmpdir)
manager.compile(
constraints=["requirement_constraints_test.txt"],
requirements=["requirements_test.txt"],
append_flags=["--no-annotate"],
name="ray_base_test_depset",
output="requirements_compiled.txt",
)
output_file = Path(tmpdir) / "requirements_compiled.txt"
output_text = output_file.read_text()
output_file_valid = Path(tmpdir) / "requirements_compiled_test.txt"
output_text_valid = output_file_valid.read_text()
assert output_text == output_text_valid
def test_compile_update_package(self):
with tempfile.TemporaryDirectory() as tmpdir:
copy_data_to_tmpdir(tmpdir)
compiled_file = Path(
_runfiles.Rlocation(f"{tmpdir}/requirement_constraints_test.txt")
)
replace_in_file(compiled_file, "emoji==2.9.0", "emoji==2.10.0")
output_file = Path(
_runfiles.Rlocation(f"{tmpdir}/requirements_compiled.txt")
)
save_file_as(compiled_file, output_file)
manager = _create_test_manager(tmpdir)
manager.compile(
constraints=["requirement_constraints_test.txt"],
requirements=["requirements_test.txt"],
append_flags=["--no-annotate"],
name="ray_base_test_depset",
output="requirements_compiled.txt",
)
output_file = Path(tmpdir) / "requirements_compiled.txt"
output_text = output_file.read_text()
output_file_valid = Path(tmpdir) / "requirements_compiled_test_update.txt"
output_text_valid = output_file_valid.read_text()
assert output_text == output_text_valid
@patch("sys.stdout", new_callable=io.StringIO)
def test_compile_with_append_and_override_flags(self, mock_stdout):
with tempfile.TemporaryDirectory() as tmpdir:
copy_data_to_tmpdir(tmpdir)
manager = _create_test_manager(tmpdir)
manager.compile(
constraints=["requirement_constraints_test.txt"],
requirements=["requirements_test.txt"],
append_flags=["--no-annotate", "--python-version 3.10"],
override_flags=[
"--extra-index-url https://download.pytorch.org/whl/cu124"
],
name="ray_base_test_depset",
output="requirements_compiled.txt",
)
stdout = mock_stdout.getvalue()
assert "--python-version 3.10" in stdout
assert "--extra-index-url https://download.pytorch.org/whl/cu124" in stdout
assert (
"--extra-index-url https://download.pytorch.org/whl/cu128" not in stdout
)
def test_compile_by_depset_name(self):
with tempfile.TemporaryDirectory() as tmpdir:
copy_data_to_tmpdir(tmpdir, ignore_patterns="test2.depsets.yaml")
result = _invoke_build(tmpdir, "test.depsets.yaml", "ray_base_test_depset")
output_fp = Path(tmpdir) / "requirements_compiled.txt"
assert output_fp.is_file()
assert result.exit_code == 0
assert (
"Dependency set ray_base_test_depset compiled successfully"
in result.output
)
def test_subset(self):
with tempfile.TemporaryDirectory() as tmpdir:
copy_data_to_tmpdir(tmpdir)
# Add six to requirements_test_subset.txt
save_packages_to_file(
Path(tmpdir) / "requirements_test_subset.txt",
["six==1.16.0"],
)
manager = _create_test_manager(tmpdir)
# Compile general_depset with requirements_test.txt and requirements_test_subset.txt
manager.compile(
constraints=["requirement_constraints_test.txt"],
requirements=["requirements_test.txt", "requirements_test_subset.txt"],
append_flags=["--no-annotate"],
name="general_depset__py311_cpu",
output="requirements_compiled_general.txt",
)
# Subset general_depset with requirements_test.txt (should lock emoji & pyperclip)
manager.subset(
source_depset="general_depset__py311_cpu",
requirements=["requirements_test.txt"],
append_flags=["--no-annotate"],
name="subset_general_depset__py311_cpu",
output="requirements_compiled_subset_general.txt",
)
output_file = Path(tmpdir) / "requirements_compiled_subset_general.txt"
output_text = output_file.read_text()
output_file_valid = Path(tmpdir) / "requirements_compiled_test.txt"
output_text_valid = output_file_valid.read_text()
assert output_text == output_text_valid
def test_subset_does_not_exist(self):
with tempfile.TemporaryDirectory() as tmpdir:
copy_data_to_tmpdir(tmpdir)
# Add six to requirements_test_subset.txt
save_packages_to_file(
Path(tmpdir) / "requirements_test_subset.txt",
["six==1.16.0"],
)
manager = _create_test_manager(tmpdir)
manager.compile(
constraints=["requirement_constraints_test.txt"],
requirements=["requirements_test.txt", "requirements_test_subset.txt"],
append_flags=["--no-annotate"],
name="general_depset__py311_cpu",
output="requirements_compiled_general.txt",
)
with self.assertRaises(RuntimeError) as e:
manager.subset(
source_depset="general_depset__py311_cpu",
requirements=["requirements_compiled_test.txt"],
append_flags=["--no-annotate"],
name="subset_general_depset__py311_cpu",
output="requirements_compiled_subset_general.txt",
)
assert (
"Requirement requirements_compiled_test.txt is not a subset of general_depset__py311_cpu in config test.depsets.yaml"
in str(e.exception)
)
def test_subset_with_expanded_depsettest_subset_with_expanded_depset(self):
with tempfile.TemporaryDirectory() as tmpdir:
copy_data_to_tmpdir(tmpdir)
compile_depset = Depset(
name="compile_depset",
operation="compile",
requirements=["requirements_test.txt"],
output="requirements_compiled.txt",
config_name="test.depsets.yaml",
)
expand_depset = Depset(
name="expand_depset",
operation="expand",
depsets=["compile_depset"],
requirements=["requirements_compiled_test_expand.txt"],
output="requirements_compiled_expanded.txt",
config_name="test.depsets.yaml",
)
nested_expand_subset = Depset(
name="nested_expand_subset_depset",
operation="subset",
source_depset="expand_depset",
requirements=["requirements_test.txt"],
output="requirements_compiled_subset_nested_expand.txt",
config_name="test.depsets.yaml",
)
write_to_config_file(
tmpdir,
[compile_depset, expand_depset, nested_expand_subset],
"test.depsets.yaml",
)
manager = _create_test_manager(tmpdir, build_all_configs=True)
manager.check_subset_exists(expand_depset, ["requirements_test.txt"])
def test_check_if_subset_exists(self):
with tempfile.TemporaryDirectory() as tmpdir:
copy_data_to_tmpdir(tmpdir)
manager = _create_test_manager(tmpdir)
source_depset = Depset(
name="general_depset__py311_cpu",
operation="compile",
requirements=["requirements_1.txt", "requirements_2.txt"],
constraints=["requirement_constraints_1.txt"],
output="requirements_compiled_general.txt",
append_flags=[],
override_flags=[],
config_name="test.depsets.yaml",
)
with self.assertRaises(RuntimeError) as e:
manager.check_subset_exists(
source_depset=source_depset,
requirements=["requirements_3.txt"],
)
assert (
"Requirement requirements_3.txt is not a subset of general_depset__py311_cpu in config test.depsets.yaml"
in str(e.exception)
)
def test_compile_bad_requirements(self):
with tempfile.TemporaryDirectory() as tmpdir:
copy_data_to_tmpdir(tmpdir)
manager = _create_test_manager(tmpdir)
with self.assertRaises(RuntimeError) as e:
manager.compile(
constraints=[],
requirements=["requirements_test_bad.txt"],
name="general_depset",
output="requirements_compiled_general.txt",
)
assert "File not found: `requirements_test_bad.txt" in str(e.exception)
def test_get_path(self):
with tempfile.TemporaryDirectory() as tmpdir:
copy_data_to_tmpdir(tmpdir)
manager = _create_test_manager(tmpdir)
assert (
manager.get_path("requirements_test.txt")
== Path(tmpdir) / "requirements_test.txt"
)
@patch("sys.stdout", new_callable=io.StringIO)
def test_append_uv_flags_exist_in_output(self, mock_stdout):
with tempfile.TemporaryDirectory() as tmpdir:
copy_data_to_tmpdir(tmpdir)
manager = _create_test_manager(tmpdir)
manager.compile(
constraints=[],
requirements=["requirements_test.txt"],
name="general_depset",
output="requirements_compiled_general.txt",
append_flags=["--python-version=3.10"],
)
stdout = mock_stdout.getvalue()
assert "--python-version=3.10" in stdout
@patch("sys.stdout", new_callable=io.StringIO)
def test_append_uv_flags_with_space_in_flag(self, mock_stdout):
with tempfile.TemporaryDirectory() as tmpdir:
copy_data_to_tmpdir(tmpdir)
manager = _create_test_manager(tmpdir)
manager.compile(
constraints=[],
requirements=["requirements_test.txt"],
name="general_depset",
output="requirements_compiled_general.txt",
append_flags=["--python-version 3.10"],
)
stdout = mock_stdout.getvalue()
assert "--python-version 3.10" in stdout
def test_include_setuptools(self):
with tempfile.TemporaryDirectory() as tmpdir:
copy_data_to_tmpdir(tmpdir)
manager = _create_test_manager(tmpdir)
manager.compile(
constraints=[],
requirements=["requirements_test.txt"],
name="general_depset",
output="requirements_compiled_general.txt",
include_setuptools=True,
)
output_file = Path(tmpdir) / "requirements_compiled_general.txt"
output_text = output_file.read_text()
assert "--unsafe-package setuptools" not in output_text
@patch("sys.stdout", new_callable=io.StringIO)
def test_ignore_setuptools(self, mock_stdout):
with tempfile.TemporaryDirectory() as tmpdir:
copy_data_to_tmpdir(tmpdir)
manager = _create_test_manager(tmpdir)
manager.compile(
constraints=[],
requirements=["requirements_test.txt"],
name="general_depset",
output="requirements_compiled_general.txt",
include_setuptools=False,
)
stdout = mock_stdout.getvalue()
assert "--unsafe-package setuptools" in stdout
def test_override_uv_flag_single_flag(self):
expected_flags = DEFAULT_UV_FLAGS.copy()
expected_flags.remove("--index-strategy")
expected_flags.remove("unsafe-best-match")
expected_flags.extend(["--index-strategy", "first-index"])
assert (
_override_uv_flags(
["--index-strategy first-index"],
DEFAULT_UV_FLAGS.copy(),
)
== expected_flags
)
def test_override_uv_flag_multiple_flags(self):
expected_flags = DEFAULT_UV_FLAGS.copy()
expected_flags.remove("--index-strategy")
expected_flags.remove("unsafe-best-match")
expected_flags.extend(["--index-strategy", "first-index"])
assert (
_override_uv_flags(
["--index-strategy first-index"],
DEFAULT_UV_FLAGS.copy(),
)
== expected_flags
)
def test_flatten_flags(self):
assert _flatten_flags(["--no-annotate", "--no-header"]) == [
"--no-annotate",
"--no-header",
]
assert _flatten_flags(
[
"--no-annotate",
"--no-header",
"--extra-index-url https://download.pytorch.org/whl/cu128",
]
) == [
"--no-annotate",
"--no-header",
"--extra-index-url",
"https://download.pytorch.org/whl/cu128",
]
def test_build_graph(self):
with tempfile.TemporaryDirectory() as tmpdir:
copy_data_to_tmpdir(tmpdir)
manager = _create_test_manager(tmpdir)
assert manager.build_graph is not None
assert len(manager.build_graph.nodes()) == 7
assert len(manager.build_graph.edges()) == 4
# assert that the compile depsets are first
assert (
manager.build_graph.nodes["general_depset__py311_cpu"]["operation"]
== "compile"
)
assert (
manager.build_graph.nodes["subset_general_depset"]["operation"]
== "subset"
)
assert (
manager.build_graph.nodes["expand_general_depset__py311_cpu"][
"operation"
]
== "expand"
)
sorted_nodes = list(topological_sort(manager.build_graph))
# assert that the root nodes are the compile depsets
first_nodes = sorted_nodes[:4]
assert all(
manager.build_graph.nodes[node]["operation"] == "compile"
or manager.build_graph.nodes[node]["operation"] == "pre_hook"
for node in first_nodes
)
def test_build_graph_predecessors(self):
with tempfile.TemporaryDirectory() as tmpdir:
copy_data_to_tmpdir(tmpdir)
manager = _create_test_manager(tmpdir)
assert manager.build_graph is not None
assert (
manager.build_graph.nodes["general_depset__py311_cpu"]["operation"]
== "compile"
)
assert (
manager.build_graph.nodes["expanded_depset__py311_cpu"]["operation"]
== "compile"
)
assert (
manager.build_graph.nodes["expand_general_depset__py311_cpu"][
"operation"
]
== "expand"
)
assert set(
manager.build_graph.predecessors("expand_general_depset__py311_cpu")
) == {"general_depset__py311_cpu", "expanded_depset__py311_cpu"}
def test_build_graph_bad_operation(self):
with tempfile.TemporaryDirectory() as tmpdir:
copy_data_to_tmpdir(tmpdir, ignore_patterns="test2.depsets.yaml")
depset = Depset(
name="invalid_op_depset",
operation="invalid_op",
requirements=["requirements_test.txt"],
output="requirements_compiled_invalid_op.txt",
config_name="test.depsets.yaml",
)
write_to_config_file(tmpdir, [depset], "test.depsets.yaml")
with self.assertRaises(ValueError) as e:
_create_test_manager(tmpdir)
assert (
"Invalid operation: invalid_op for depset invalid_op_depset in config test.depsets.yaml"
in str(e.exception)
)
def test_execute(self):
with tempfile.TemporaryDirectory() as tmpdir:
copy_data_to_tmpdir(tmpdir)
def test_execute_single_depset(self):
with tempfile.TemporaryDirectory() as tmpdir:
copy_data_to_tmpdir(tmpdir)
manager = _create_test_manager(tmpdir)
manager.execute(single_depset_name="general_depset__py311_cpu")
assert (
manager.build_graph.nodes["general_depset__py311_cpu"]["operation"]
== "compile"
)
assert len(manager.build_graph.nodes()) == 1
def test_execute_single_depset_that_does_not_exist(self):
with tempfile.TemporaryDirectory() as tmpdir:
copy_data_to_tmpdir(tmpdir)
manager = _create_test_manager(tmpdir)
with self.assertRaises(KeyError) as e:
manager.execute(single_depset_name="fake_depset")
assert "Dependency set fake_depset not found" in str(e.exception)
def test_expand(self):
with tempfile.TemporaryDirectory() as tmpdir:
copy_data_to_tmpdir(tmpdir)
save_packages_to_file(
Path(tmpdir) / "requirements_expanded.txt",
["six"],
)
save_file_as(
Path(tmpdir) / "requirement_constraints_test.txt",
Path(tmpdir) / "requirement_constraints_expand.txt",
)
append_to_file(
Path(tmpdir) / "requirement_constraints_expand.txt",
"six==1.17.0",
)
manager = _create_test_manager(tmpdir)
manager.compile(
constraints=["requirement_constraints_test.txt"],
requirements=["requirements_test.txt"],
append_flags=["--no-annotate"],
name="general_depset__py311_cpu",
output="requirements_compiled_general.txt",
)
manager.compile(
constraints=[],
requirements=["requirements_expanded.txt"],
append_flags=["--no-annotate"],
name="expanded_depset__py311_cpu",
output="requirements_compiled_expanded.txt",
)
manager.expand(
depsets=["general_depset__py311_cpu", "expanded_depset__py311_cpu"],
constraints=["requirement_constraints_expand.txt"],
append_flags=["--no-annotate"],
requirements=[],
name="expand_general_depset__py311_cpu",
output="requirements_compiled_expand_general.txt",
)
output_file = Path(tmpdir) / "requirements_compiled_expand_general.txt"
output_text = output_file.read_text()
output_file_valid = Path(tmpdir) / "requirements_compiled_test_expand.txt"
output_text_valid = output_file_valid.read_text()
assert output_text == output_text_valid
def test_expand_with_requirements(self):
with tempfile.TemporaryDirectory() as tmpdir:
copy_data_to_tmpdir(tmpdir)
save_packages_to_file(
Path(tmpdir) / "requirements_expanded.txt",
["six"],
)
save_file_as(
Path(tmpdir) / "requirement_constraints_test.txt",
Path(tmpdir) / "requirement_constraints_expand.txt",
)
append_to_file(
Path(tmpdir) / "requirement_constraints_expand.txt",
"six==1.17.0",
)
manager = _create_test_manager(tmpdir)
manager.compile(
constraints=["requirement_constraints_test.txt"],
requirements=["requirements_test.txt"],
append_flags=["--no-annotate"],
name="general_depset__py311_cpu",
output="requirements_compiled_general.txt",
)
manager.expand(
depsets=["general_depset__py311_cpu"],
requirements=["requirements_expanded.txt"],
constraints=["requirement_constraints_expand.txt"],
append_flags=["--no-annotate"],
name="expand_general_depset__py311_cpu",
output="requirements_compiled_expand_general.txt",
)
output_file = Path(tmpdir) / "requirements_compiled_expand_general.txt"
output_text = output_file.read_text()
output_file_valid = Path(tmpdir) / "requirements_compiled_test_expand.txt"
output_text_valid = output_file_valid.read_text()
assert output_text == output_text_valid
def test_get_depset_with_build_arg_set(self):
with tempfile.TemporaryDirectory() as tmpdir:
copy_data_to_tmpdir(tmpdir)
manager = DependencySetManager(
config_path="test.depsets.yaml",
workspace_dir=tmpdir,
)
depset = _get_depset(
manager.config.depsets, "build_args_test_depset__py311_cpu"
)
assert depset.name == "build_args_test_depset__py311_cpu"
def test_get_depset_without_build_arg_set(self):
with tempfile.TemporaryDirectory() as tmpdir:
copy_data_to_tmpdir(tmpdir)
manager = DependencySetManager(
config_path="test.depsets.yaml",
workspace_dir=tmpdir,
)
depset = _get_depset(manager.config.depsets, "ray_base_test_depset")
assert depset.name == "ray_base_test_depset"
def test_execute_single_pre_hook(self):
with tempfile.TemporaryDirectory() as tmpdir:
copy_data_to_tmpdir(tmpdir)
result = _invoke_build(tmpdir, "test2.depsets.yaml", "pre_hook_test_depset")
assert (Path(tmpdir) / "test.depsets.yaml").exists()
assert result.exit_code == 0
assert "Pre-hook test" in result.output
assert "Executed pre_hook pre-hook-test.sh successfully" in result.output
def test_execute_single_invalid_pre_hook(self):
with tempfile.TemporaryDirectory() as tmpdir:
copy_data_to_tmpdir(tmpdir)
result = _invoke_build(
tmpdir, "test2.depsets.yaml", "pre_hook_invalid_test_depset"
)
assert result.exit_code == 1
assert isinstance(result.exception, RuntimeError)
assert (
"Failed to execute pre_hook pre-hook-error-test.sh with error:"
in str(result.exception)
)
def test_copy_lock_files_to_temp_dir(self):
with tempfile.TemporaryDirectory() as tmpdir:
copy_data_to_tmpdir(tmpdir, ignore_patterns="test2.depsets.yaml")
depset = Depset(
name="check_depset",
operation="compile",
constraints=["requirement_constraints_test.txt"],
requirements=["requirements_test.txt"],
output="requirements_compiled_test.txt",
config_name="test.depsets.yaml",
)
write_to_config_file(tmpdir, [depset], "test.depsets.yaml")
save_file_as(
Path(tmpdir) / "requirements_compiled_test.txt",
Path(tmpdir) / "requirements_compiled.txt",
)
manager = _create_test_manager(tmpdir, check=True)
manager.compile(
constraints=["requirement_constraints_test.txt"],
requirements=["requirements_test.txt"],
append_flags=["--no-annotate"],
name="check_depset",
output="requirements_compiled_test.txt",
)
assert (
Path(manager.workspace.dir) / "requirements_compiled_test.txt"
).exists()
assert (Path(manager.temp_dir) / "requirements_compiled_test.txt").exists()
def test_diff_lock_files_out_of_date(self):
with tempfile.TemporaryDirectory() as tmpdir:
copy_data_to_tmpdir(tmpdir, ignore_patterns="test2.depsets.yaml")
depset = Depset(
name="check_depset",
operation="compile",
constraints=["requirement_constraints_test.txt"],
requirements=["requirements_test.txt"],
output="requirements_compiled_test.txt",
config_name="test.depsets.yaml",
)
write_to_config_file(tmpdir, [depset], "test.depsets.yaml")
manager = _create_test_manager(tmpdir, check=True)
manager.compile(
constraints=["requirement_constraints_test.txt"],
requirements=["requirements_test.txt"],
append_flags=["--no-annotate"],
name="check_depset",
output="requirements_compiled_test.txt",
)
replace_in_file(
Path(manager.workspace.dir) / "requirements_compiled_test.txt",
"emoji==2.9.0",
"emoji==2.8.0",
)
with self.assertRaises(RuntimeError) as e:
manager.diff_lock_files()
assert (
"Lock files are not up to date for config: test.depsets.yaml. Please update lock files and push the changes."
in str(e.exception)
)
assert "+emoji==2.8.0" in str(e.exception)
assert "-emoji==2.9.0" in str(e.exception)
def test_diff_lock_files_up_to_date(self):
with tempfile.TemporaryDirectory() as tmpdir:
copy_data_to_tmpdir(tmpdir, ignore_patterns="test2.depsets.yaml")
depset = Depset(
name="check_depset",
operation="compile",
constraints=["requirement_constraints_test.txt"],
requirements=["requirements_test.txt"],
output="requirements_compiled_test.txt",
config_name="test.depsets.yaml",
)
write_to_config_file(tmpdir, [depset], "test.depsets.yaml")
manager = _create_test_manager(tmpdir, check=True)
manager.compile(
constraints=["requirement_constraints_test.txt"],
requirements=["requirements_test.txt"],
append_flags=["--no-annotate"],
name="check_depset",
output="requirements_compiled_test.txt",
)
manager.diff_lock_files()
def test_compile_with_packages(self):
with tempfile.TemporaryDirectory() as tmpdir:
copy_data_to_tmpdir(tmpdir)
save_file_as(
Path(tmpdir) / "requirements_compiled_test.txt",
Path(tmpdir) / "requirements_compiled_test_packages.txt",
)
manager = _create_test_manager(tmpdir)
manager.compile(
constraints=["requirement_constraints_test.txt"],
packages=["emoji==2.9.0", "pyperclip==1.6.0"],
append_flags=["--no-annotate"],
name="packages_test_depset",
output="requirements_compiled_test_packages.txt",
)
output_file = Path(tmpdir) / "requirements_compiled_test_packages.txt"
output_text = output_file.read_text()
output_file_valid = Path(tmpdir) / "requirements_compiled_test.txt"
output_text_valid = output_file_valid.read_text()
assert output_text == output_text_valid
def test_compile_with_packages_and_requirements(self):
with tempfile.TemporaryDirectory() as tmpdir:
copy_data_to_tmpdir(tmpdir)
save_file_as(
Path(tmpdir) / "requirements_compiled_test.txt",
Path(tmpdir) / "requirements_compiled_test_packages.txt",
)
manager = _create_test_manager(tmpdir)
manager.compile(
constraints=["requirement_constraints_test.txt"],
packages=["emoji==2.9.0", "pyperclip==1.6.0"],
requirements=["requirements_test.txt"],
append_flags=["--no-annotate"],
name="packages_test_depset",
output="requirements_compiled_test_packages.txt",
)
output_file = Path(tmpdir) / "requirements_compiled_test_packages.txt"
output_text = output_file.read_text()
output_file_valid = Path(tmpdir) / "requirements_compiled_test.txt"
output_text_valid = output_file_valid.read_text()
assert output_text == output_text_valid
@patch("sys.stdout", new_callable=io.StringIO)
def test_requirements_ordering(self, mock_stdout):
with tempfile.TemporaryDirectory() as tmpdir:
copy_data_to_tmpdir(tmpdir)
save_packages_to_file(
Path(tmpdir) / "requirements_expanded.txt",
["six"],
)
save_packages_to_file(
Path(tmpdir) / "requirements_compiled_test_expand.txt",
["zipp"],
)
manager = _create_test_manager(tmpdir)
manager.compile(
constraints=["requirement_constraints_test.txt"],
requirements=[
"requirements_test.txt",
"requirements_expanded.txt",
"requirements_compiled_test_expand.txt",
],
append_flags=["--no-annotate"],
name="requirements_ordering_test_depset",
output="requirements_compiled_requirements_ordering.txt",
)
stdout = mock_stdout.getvalue()
assert (
"requirements_compiled_test_expand.txt requirements_expanded.txt requirements_test.txt"
in stdout
)
@patch("sys.stdout", new_callable=io.StringIO)
def test_constraints_ordering(self, mock_stdout):
with tempfile.TemporaryDirectory() as tmpdir:
copy_data_to_tmpdir(tmpdir)
save_packages_to_file(
Path(tmpdir) / "requirements_expanded.txt",
["six==1.17.0"],
)
save_packages_to_file(
Path(tmpdir) / "requirements_compiled_test_expand.txt",
["zipp==3.19.2"],
)
manager = _create_test_manager(tmpdir)
manager.compile(
requirements=["requirements_test.txt"],
constraints=[
"requirement_constraints_test.txt",
"requirements_expanded.txt",
"requirements_compiled_test_expand.txt",
],
append_flags=["--no-annotate"],
name="constraints_ordering_test_depset",
output="requirements_compiled_constraints_ordering.txt",
)
stdout = mock_stdout.getvalue()
assert (
"-c requirement_constraints_test.txt -c requirements_compiled_test_expand.txt -c requirements_expanded.txt"
in stdout
)
@patch("sys.stdout", new_callable=io.StringIO)
def test_execute_pre_hook(self, mock_stdout):
with tempfile.TemporaryDirectory() as tmpdir:
copy_data_to_tmpdir(tmpdir)
manager = _create_test_manager(tmpdir)
manager.execute_pre_hook("pre-hook-test.sh test")
stdout = mock_stdout.getvalue()
assert "Pre-hook test\n" in stdout
assert "Executed pre_hook pre-hook-test.sh test successfully" in stdout
def test_get_expanded_depset_requirements(self):
with tempfile.TemporaryDirectory() as tmpdir:
copy_data_to_tmpdir(tmpdir)
manager = _create_test_manager(tmpdir)
requirements = manager.get_expanded_depset_requirements(
"general_depset__py311_cpu", []
)
assert requirements == ["requirements_test.txt"]
requirements = manager.get_expanded_depset_requirements(
"expand_general_depset__py311_cpu", []
)
assert sorted(requirements) == sorted(
[
"requirements_test.txt",
"requirements_expanded.txt",
]
)
requirements = manager.get_expanded_depset_requirements(
"nested_expand_depset__py311_cpu", []
)
assert sorted(requirements) == sorted(
[
"requirements_compiled_test_expand.txt",
"requirements_expanded.txt",
"requirements_test.txt",
]
)
def test_build_all_configs(self):
with tempfile.TemporaryDirectory() as tmpdir:
copy_data_to_tmpdir(tmpdir)
manager = _create_test_manager(
tmpdir, config_path="*.depsets.yaml", build_all_configs=True
)
assert manager.build_graph is not None
assert len(manager.build_graph.nodes) == 12
assert len(manager.build_graph.edges) == 8
if __name__ == "__main__":
sys.exit(pytest.main(["-vvv", __file__]))
| TestCli |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.