language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/hooks/logs.py | {
"start": 1584,
"end": 9010
} | class ____(AwsBaseHook):
"""
Interact with Amazon CloudWatch Logs.
Provide thin wrapper around :external+boto3:py:class:`boto3.client("logs") <CloudWatchLogs.Client>`.
Additional arguments (such as ``aws_conn_id``) may be specified and
are passed down to the underlying AwsBaseHook.
.. seealso::
- :class:`airflow.providers.amazon.aws.hooks.base_aws.AwsBaseHook`
"""
def __init__(self, *args, **kwargs) -> None:
kwargs["client_type"] = "logs"
super().__init__(*args, **kwargs)
class ContinuationToken:
"""Just a wrapper around a str token to allow updating it from the caller."""
def __init__(self):
self.value: str | None = None
def get_log_events(
self,
log_group: str,
log_stream_name: str,
start_time: int = 0,
skip: int = 0,
start_from_head: bool | None = None,
continuation_token: ContinuationToken | None = None,
end_time: int | None = None,
) -> Generator:
"""
Return a generator for log items in a single stream; yields all items available at the current moment.
.. seealso::
- :external+boto3:py:meth:`CloudWatchLogs.Client.get_log_events`
:param log_group: The name of the log group.
:param log_stream_name: The name of the specific stream.
:param start_time: The timestamp value in ms to start reading the logs from (default: 0).
:param skip: The number of log entries to skip at the start (default: 0).
This is for when there are multiple entries at the same timestamp.
:param continuation_token: a token indicating where to read logs from.
Will be updated as this method reads new logs, to be reused in subsequent calls.
:param end_time: The timestamp value in ms to stop reading the logs from (default: None).
If None is provided, reads it until the end of the log stream
:return: | A CloudWatch log event with the following key-value pairs:
| 'timestamp' (int): The time in milliseconds of the event.
| 'message' (str): The log event data.
| 'ingestionTime' (int): The time in milliseconds the event was ingested.
"""
if continuation_token is None:
continuation_token = AwsLogsHook.ContinuationToken()
num_consecutive_empty_response = 0
while True:
if continuation_token.value is not None:
token_arg: dict[str, str] = {"nextToken": continuation_token.value}
else:
token_arg = {}
response = self.conn.get_log_events(
**prune_dict(
{
"logGroupName": log_group,
"logStreamName": log_stream_name,
"startTime": start_time,
"endTime": end_time,
"startFromHead": True,
**token_arg,
}
)
)
events = response["events"]
event_count = len(events)
if event_count > skip:
events = events[skip:]
skip = 0
else:
skip -= event_count
events = []
yield from events
if continuation_token.value == response["nextForwardToken"]:
return
if not event_count:
num_consecutive_empty_response += 1
if num_consecutive_empty_response >= NUM_CONSECUTIVE_EMPTY_RESPONSE_EXIT_THRESHOLD:
# Exit if there are more than NUM_CONSECUTIVE_EMPTY_RESPONSE_EXIT_THRESHOLD consecutive
# empty responses
return
else:
num_consecutive_empty_response = 0
continuation_token.value = response["nextForwardToken"]
async def describe_log_streams_async(
self, log_group: str, stream_prefix: str, order_by: str, count: int
) -> dict[str, Any] | None:
"""
Async function to get the list of log streams for the specified log group.
You can list all the log streams or filter the results by prefix. You can also control
how the results are ordered.
:param log_group: The name of the log group.
:param stream_prefix: The prefix to match.
:param order_by: If the value is LogStreamName , the results are ordered by log stream name.
If the value is LastEventTime , the results are ordered by the event time. The default value is LogStreamName.
:param count: The maximum number of items returned
"""
async with await self.get_async_conn() as client:
try:
response: dict[str, Any] = await client.describe_log_streams(
logGroupName=log_group,
logStreamNamePrefix=stream_prefix,
orderBy=order_by,
limit=count,
)
return response
except ClientError as error:
# On the very first training job run on an account, there's no log group until
# the container starts logging, so ignore any errors thrown about that
if error.response["Error"]["Code"] == "ResourceNotFoundException":
return None
raise error
async def get_log_events_async(
self,
log_group: str,
log_stream_name: str,
start_time: int = 0,
skip: int = 0,
start_from_head: bool = True,
) -> AsyncGenerator[Any, dict[str, Any]]:
"""
Yield all the available items in a single log stream.
:param log_group: The name of the log group.
:param log_stream_name: The name of the specific stream.
:param start_time: The time stamp value to start reading the logs from (default: 0).
:param skip: The number of log entries to skip at the start (default: 0).
This is for when there are multiple entries at the same timestamp.
:param start_from_head: whether to start from the beginning (True) of the log or
at the end of the log (False).
"""
next_token = None
while True:
if next_token is not None:
token_arg: dict[str, str] = {"nextToken": next_token}
else:
token_arg = {}
async with await self.get_async_conn() as client:
response = await client.get_log_events(
logGroupName=log_group,
logStreamName=log_stream_name,
startTime=start_time,
startFromHead=start_from_head,
**token_arg,
)
events = response["events"]
event_count = len(events)
if event_count > skip:
events = events[skip:]
skip = 0
else:
skip -= event_count
events = []
for event in events:
await asyncio.sleep(1)
yield event
if next_token != response["nextForwardToken"]:
next_token = response["nextForwardToken"]
| AwsLogsHook |
python | mlflow__mlflow | tests/pyfunc/test_pyfunc_model_config.py | {
"start": 487,
"end": 5259
} | class ____(mlflow.pyfunc.PythonModel):
def predict(self, context, model_input, params=None):
# This mock class returns the internal inference configuration keys and values available
return context.model_config.items()
def test_save_with_model_config(model_path, model_config):
model = InferenceContextModel()
mlflow.pyfunc.save_model(model_path, python_model=model, model_config=model_config)
loaded_model = mlflow.pyfunc.load_model(model_uri=model_path)
assert loaded_model.model_config
assert set(model_config.keys()) == set(loaded_model.model_config)
assert all(loaded_model.model_config[k] == v for k, v in model_config.items())
assert all(loaded_model.model_config[k] == v for k, v in loaded_model.predict([[0]]))
@pytest.mark.parametrize(
"model_config_path",
[
os.path.abspath("tests/pyfunc/sample_code/config.yml"),
"tests/pyfunc/../pyfunc/sample_code/config.yml",
],
)
def test_save_with_model_config_path(model_path, model_config, model_config_path):
model = InferenceContextModel()
mlflow.pyfunc.save_model(model_path, python_model=model, model_config=model_config_path)
loaded_model = mlflow.pyfunc.load_model(model_uri=model_path)
assert loaded_model.model_config
assert set(model_config.keys()) == set(loaded_model.model_config)
assert all(loaded_model.model_config[k] == v for k, v in model_config.items())
assert all(loaded_model.model_config[k] == v for k, v in loaded_model.predict([[0]]))
def test_override_model_config(model_path, model_config):
model = TestModel()
inference_override = {"timeout": 400}
mlflow.pyfunc.save_model(model_path, python_model=model, model_config=model_config)
loaded_model = mlflow.pyfunc.load_model(model_uri=model_path, model_config=inference_override)
assert loaded_model.model_config["timeout"] == 400
assert all(loaded_model.model_config[k] == v for k, v in inference_override.items())
@pytest.mark.parametrize(
"model_config_path",
[
os.path.abspath("tests/pyfunc/sample_code/config.yml"),
"tests/pyfunc/../pyfunc/sample_code/config.yml",
],
)
def test_override_model_config_path(tmp_path, model_path, model_config_path):
model = TestModel()
inference_override = {"timeout": 400}
config_path = tmp_path / "config.yml"
config_path.write_text(yaml.dump(inference_override))
mlflow.pyfunc.save_model(model_path, python_model=model, model_config=model_config_path)
loaded_model = mlflow.pyfunc.load_model(model_uri=model_path, model_config=str(config_path))
assert loaded_model.model_config["timeout"] == 400
assert all(loaded_model.model_config[k] == v for k, v in inference_override.items())
def test_override_model_config_ignore_invalid(model_path, model_config):
model = TestModel()
inference_override = {"invalid_key": 400}
mlflow.pyfunc.save_model(model_path, python_model=model, model_config=model_config)
loaded_model = mlflow.pyfunc.load_model(model_uri=model_path, model_config=inference_override)
assert loaded_model.predict([[5]])
assert all(k not in loaded_model.model_config for k in inference_override.keys())
@pytest.mark.parametrize(
"model_config_path",
[
os.path.abspath("tests/pyfunc/sample_code/config.yml"),
"tests/pyfunc/../pyfunc/sample_code/config.yml",
],
)
def test_override_model_config_path_ignore_invalid(tmp_path, model_path, model_config_path):
model = TestModel()
inference_override = {"invalid_key": 400}
config_path = tmp_path / "config.yml"
config_path.write_text(yaml.dump(inference_override))
mlflow.pyfunc.save_model(model_path, python_model=model, model_config=model_config_path)
loaded_model = mlflow.pyfunc.load_model(model_uri=model_path, model_config=str(config_path))
assert loaded_model.predict([[5]])
assert all(k not in loaded_model.model_config for k in inference_override.keys())
def test_pyfunc_without_model_config(model_path, model_config):
model = TestModel()
mlflow.pyfunc.save_model(model_path, python_model=model)
loaded_model = mlflow.pyfunc.load_model(model_uri=model_path, model_config=model_config)
assert loaded_model.predict([[5]])
assert not loaded_model.model_config
def test_pyfunc_loader_without_model_config(model_path):
mlflow.pyfunc.save_model(
path=model_path,
data_path=".",
loader_module=__name__,
code_paths=[__file__],
mlflow_model=Model(run_id="test", artifact_path="testtest"),
)
inference_override = {"invalid_key": 400}
pyfunc_model = mlflow.pyfunc.load_model(model_path, model_config=inference_override)
assert not pyfunc_model.model_config
| InferenceContextModel |
python | altair-viz__altair | altair/vegalite/v6/schema/channels.py | {
"start": 977606,
"end": 978333
} | class ____(ValueChannelMixin, core.PositionValueDef):
"""
XValue schema wrapper.
Definition object for a constant value (primitive value or gradient definition) of an
encoding channel.
Parameters
----------
value : dict, float, :class:`ExprRef`, Literal['height', 'width']
A constant value in visual domain (e.g., ``"red"`` / ``"#0099ff"`` / `gradient
definition <https://vega.github.io/vega-lite/docs/types.html#gradient>`__ for color,
values between ``0`` to ``1`` for opacity).
"""
_class_is_valid_at_instantiation = False
_encoding_name = "x"
def __init__(self, value, **kwds):
super().__init__(value=value, **kwds)
@with_property_setters
| XValue |
python | zarr-developers__zarr-python | tests/test_metadata/test_consolidated.py | {
"start": 1710,
"end": 31582
} | class ____:
async def test_open_consolidated_false_raises(self) -> None:
store = zarr.storage.MemoryStore()
with pytest.raises(TypeError, match="use_consolidated"):
await zarr.api.asynchronous.open_consolidated(store, use_consolidated=False) # type: ignore[arg-type]
def test_open_consolidated_false_raises_sync(self) -> None:
store = zarr.storage.MemoryStore()
with pytest.raises(TypeError, match="use_consolidated"):
zarr.open_consolidated(store, use_consolidated=False) # type: ignore[arg-type]
async def test_consolidated(self, memory_store_with_hierarchy: Store) -> None:
# TODO: Figure out desired keys in
# TODO: variety in the hierarchies
# More nesting
# arrays under arrays
# single array
# etc.
with pytest.warns(
ZarrUserWarning,
match="Consolidated metadata is currently not part in the Zarr format 3 specification.",
):
await consolidate_metadata(memory_store_with_hierarchy)
group2 = await AsyncGroup.open(memory_store_with_hierarchy)
array_metadata: dict[str, JSON] = {
"attributes": {},
"chunk_key_encoding": {
"configuration": {"separator": "/"},
"name": "default",
},
"codecs": (
{"configuration": {"endian": "little"}, "name": "bytes"},
{"configuration": {"level": 0, "checksum": False}, "name": "zstd"},
),
"data_type": "uint8",
"fill_value": 0,
"node_type": "array",
# "shape": (1, 2, 3),
"zarr_format": 3,
}
expected = GroupMetadata(
attributes={"foo": "bar"},
consolidated_metadata=ConsolidatedMetadata(
kind="inline",
must_understand=False,
metadata={
"air": ArrayV3Metadata.from_dict(
{
"shape": (1, 2, 3),
"chunk_grid": {
"configuration": {"chunk_shape": (1, 2, 3)},
"name": "regular",
},
**array_metadata,
}
),
"lat": ArrayV3Metadata.from_dict(
{
"shape": (1,),
"chunk_grid": {
"configuration": {"chunk_shape": (1,)},
"name": "regular",
},
**array_metadata,
}
),
"lon": ArrayV3Metadata.from_dict(
{
"shape": (2,),
"chunk_grid": {
"configuration": {"chunk_shape": (2,)},
"name": "regular",
},
**array_metadata,
}
),
"time": ArrayV3Metadata.from_dict(
{
"shape": (3,),
"chunk_grid": {
"configuration": {"chunk_shape": (3,)},
"name": "regular",
},
**array_metadata,
}
),
"child": GroupMetadata(
attributes={"key": "child"},
consolidated_metadata=ConsolidatedMetadata(
metadata={
"array": ArrayV3Metadata.from_dict(
{
**array_metadata,
"attributes": {"key": "child"},
"shape": (4, 4),
"chunk_grid": {
"configuration": {"chunk_shape": (4, 4)},
"name": "regular",
},
}
),
"grandchild": GroupMetadata(
attributes={"key": "grandchild"},
consolidated_metadata=ConsolidatedMetadata(
metadata={
# known to be empty child group
"empty_group": GroupMetadata(
consolidated_metadata=ConsolidatedMetadata(
metadata={}
),
attributes={"key": "empty"},
),
"array": ArrayV3Metadata.from_dict(
{
**array_metadata,
"attributes": {"key": "grandchild"},
"shape": (4, 4),
"chunk_grid": {
"configuration": {"chunk_shape": (4, 4)},
"name": "regular",
},
}
),
}
),
),
},
),
),
},
),
)
assert group2.metadata == expected
group3 = await open(store=memory_store_with_hierarchy)
assert group3.metadata == expected
group4 = await open_consolidated(store=memory_store_with_hierarchy)
assert group4.metadata == expected
buf = await memory_store_with_hierarchy.get(
"zarr.json", prototype=default_buffer_prototype()
)
assert buf is not None
result_raw = json.loads(buf.to_bytes())["consolidated_metadata"]
assert result_raw["kind"] == "inline"
assert sorted(result_raw["metadata"]) == [
"air",
"child",
"child/array",
"child/grandchild",
"child/grandchild/array",
"child/grandchild/empty_group",
"lat",
"lon",
"time",
]
def test_consolidated_sync(self, memory_store: Store) -> None:
g = zarr.api.synchronous.group(store=memory_store, attributes={"foo": "bar"})
dtype = "uint8"
g.create_array(name="air", shape=(1, 2, 3), dtype=dtype)
g.create_array(name="lat", shape=(1,), dtype=dtype)
g.create_array(name="lon", shape=(2,), dtype=dtype)
g.create_array(name="time", shape=(3,), dtype=dtype)
with pytest.warns(
ZarrUserWarning,
match="Consolidated metadata is currently not part in the Zarr format 3 specification.",
):
zarr.api.synchronous.consolidate_metadata(memory_store)
group2 = zarr.Group.open(memory_store)
array_metadata: dict[str, JSON] = {
"attributes": {},
"chunk_key_encoding": {
"configuration": {"separator": "/"},
"name": "default",
},
"codecs": (
{"configuration": {"endian": "little"}, "name": "bytes"},
{"configuration": {"level": 0, "checksum": False}, "name": "zstd"},
),
"data_type": dtype,
"fill_value": 0,
"node_type": "array",
# "shape": (1, 2, 3),
"zarr_format": 3,
}
expected = GroupMetadata(
attributes={"foo": "bar"},
consolidated_metadata=ConsolidatedMetadata(
kind="inline",
must_understand=False,
metadata={
"air": ArrayV3Metadata.from_dict(
{
"shape": (1, 2, 3),
"chunk_grid": {
"configuration": {"chunk_shape": (1, 2, 3)},
"name": "regular",
},
**array_metadata,
}
),
"lat": ArrayV3Metadata.from_dict(
{
"shape": (1,),
"chunk_grid": {
"configuration": {"chunk_shape": (1,)},
"name": "regular",
},
**array_metadata,
}
),
"lon": ArrayV3Metadata.from_dict(
{
"shape": (2,),
"chunk_grid": {
"configuration": {"chunk_shape": (2,)},
"name": "regular",
},
**array_metadata,
}
),
"time": ArrayV3Metadata.from_dict(
{
"shape": (3,),
"chunk_grid": {
"configuration": {"chunk_shape": (3,)},
"name": "regular",
},
**array_metadata,
}
),
},
),
)
assert group2.metadata == expected
group3 = zarr.api.synchronous.open(store=memory_store)
assert group3.metadata == expected
group4 = zarr.api.synchronous.open_consolidated(store=memory_store)
assert group4.metadata == expected
async def test_not_writable_raises(self, memory_store: zarr.storage.MemoryStore) -> None:
await group(store=memory_store, attributes={"foo": "bar"})
read_store = zarr.storage.MemoryStore(store_dict=memory_store._store_dict, read_only=True)
with pytest.raises(ValueError, match="does not support writing"):
await consolidate_metadata(read_store)
async def test_non_root_node(self, memory_store_with_hierarchy: Store) -> None:
with pytest.warns(
ZarrUserWarning,
match="Consolidated metadata is currently not part in the Zarr format 3 specification.",
):
await consolidate_metadata(memory_store_with_hierarchy, path="child")
root = await AsyncGroup.open(memory_store_with_hierarchy)
child = await AsyncGroup.open(StorePath(memory_store_with_hierarchy) / "child")
assert root.metadata.consolidated_metadata is None
assert child.metadata.consolidated_metadata is not None
assert "air" not in child.metadata.consolidated_metadata.metadata
assert "grandchild" in child.metadata.consolidated_metadata.metadata
def test_consolidated_metadata_from_dict(self) -> None:
data: dict[str, JSON] = {"must_understand": False}
# missing kind
with pytest.raises(ValueError, match="kind='None'"):
ConsolidatedMetadata.from_dict(data)
# invalid kind
data["kind"] = "invalid"
with pytest.raises(ValueError, match="kind='invalid'"):
ConsolidatedMetadata.from_dict(data)
# missing metadata
data["kind"] = "inline"
with pytest.raises(TypeError, match="Unexpected type for 'metadata'"):
ConsolidatedMetadata.from_dict(data)
data["kind"] = "inline"
# empty is fine
data["metadata"] = {}
ConsolidatedMetadata.from_dict(data)
def test_flatten(self) -> None:
array_metadata: dict[str, Any] = {
"attributes": {},
"chunk_key_encoding": {
"configuration": {"separator": "/"},
"name": "default",
},
"codecs": ({"configuration": {"endian": "little"}, "name": "bytes"},),
"data_type": "float64",
"fill_value": np.float64(0.0),
"node_type": "array",
# "shape": (1, 2, 3),
"zarr_format": 3,
}
metadata = ConsolidatedMetadata(
kind="inline",
must_understand=False,
metadata={
"air": ArrayV3Metadata.from_dict(
{
"shape": (1, 2, 3),
"chunk_grid": {
"configuration": {"chunk_shape": (1, 2, 3)},
"name": "regular",
},
**array_metadata,
}
),
"lat": ArrayV3Metadata.from_dict(
{
"shape": (1,),
"chunk_grid": {
"configuration": {"chunk_shape": (1,)},
"name": "regular",
},
**array_metadata,
}
),
"child": GroupMetadata(
attributes={"key": "child"},
consolidated_metadata=ConsolidatedMetadata(
metadata={
"array": ArrayV3Metadata.from_dict(
{
**array_metadata,
"attributes": {"key": "child"},
"shape": (4, 4),
"chunk_grid": {
"configuration": {"chunk_shape": (4, 4)},
"name": "regular",
},
}
),
"grandchild": GroupMetadata(
attributes={"key": "grandchild"},
consolidated_metadata=ConsolidatedMetadata(
metadata={
"array": ArrayV3Metadata.from_dict(
{
**array_metadata,
"attributes": {"key": "grandchild"},
"shape": (4, 4),
"chunk_grid": {
"configuration": {"chunk_shape": (4, 4)},
"name": "regular",
},
}
)
}
),
),
},
),
),
},
)
result = metadata.flattened_metadata
expected = {
"air": metadata.metadata["air"],
"lat": metadata.metadata["lat"],
"child": GroupMetadata(
attributes={"key": "child"}, consolidated_metadata=ConsolidatedMetadata(metadata={})
),
"child/array": metadata.metadata["child"].consolidated_metadata.metadata["array"], # type: ignore[union-attr]
"child/grandchild": GroupMetadata(
attributes={"key": "grandchild"},
consolidated_metadata=ConsolidatedMetadata(metadata={}),
),
"child/grandchild/array": (
metadata.metadata["child"]
.consolidated_metadata.metadata["grandchild"] # type: ignore[union-attr]
.consolidated_metadata.metadata["array"]
),
}
assert result == expected
def test_invalid_metadata_raises(self) -> None:
payload: dict[str, JSON] = {
"kind": "inline",
"must_understand": False,
"metadata": {
"foo": [1, 2, 3] # invalid
},
}
with pytest.raises(TypeError, match="key='foo', type='list'"):
ConsolidatedMetadata.from_dict(payload)
def test_to_dict_empty(self) -> None:
meta = ConsolidatedMetadata(
metadata={
"empty": GroupMetadata(
attributes={"key": "empty"},
consolidated_metadata=ConsolidatedMetadata(metadata={}),
)
}
)
result = meta.to_dict()
expected = {
"kind": "inline",
"must_understand": False,
"metadata": {
"empty": {
"attributes": {"key": "empty"},
"consolidated_metadata": {
"kind": "inline",
"must_understand": False,
"metadata": {},
},
"node_type": "group",
"zarr_format": 3,
}
},
}
assert result == expected
@pytest.mark.parametrize("zarr_format", [2, 3])
async def test_to_dict_order(
self, memory_store: zarr.storage.MemoryStore, zarr_format: ZarrFormat
) -> None:
with zarr.config.set(default_zarr_format=zarr_format):
g = await group(store=memory_store)
# Create groups in non-lexicographix order
dtype = "float32"
await g.create_array(name="b", shape=(1,), dtype=dtype)
child = await g.create_group("c", attributes={"key": "child"})
await g.create_array(name="a", shape=(1,), dtype=dtype)
await child.create_array("e", shape=(1,), dtype=dtype)
await child.create_array("d", shape=(1,), dtype=dtype)
# Consolidate metadata and re-open store
if zarr_format == 3:
with pytest.warns(
ZarrUserWarning,
match="Consolidated metadata is currently not part in the Zarr format 3 specification.",
):
await zarr.api.asynchronous.consolidate_metadata(memory_store)
else:
await zarr.api.asynchronous.consolidate_metadata(memory_store)
g2 = await zarr.api.asynchronous.open_group(store=memory_store)
assert g2.metadata.consolidated_metadata is not None
assert list(g2.metadata.consolidated_metadata.metadata) == ["a", "b", "c"]
assert list(g2.metadata.consolidated_metadata.flattened_metadata) == [
"a",
"b",
"c",
"c/d",
"c/e",
]
@pytest.mark.parametrize("zarr_format", [2, 3])
async def test_open_consolidated_raises_async(self, zarr_format: ZarrFormat) -> None:
store = zarr.storage.MemoryStore()
await AsyncGroup.from_store(store, zarr_format=zarr_format)
with pytest.raises(ValueError):
await zarr.api.asynchronous.open_consolidated(store, zarr_format=zarr_format)
with pytest.raises(ValueError):
await zarr.api.asynchronous.open_consolidated(store, zarr_format=None)
@pytest.fixture
async def v2_consolidated_metadata_empty_dataset(
self, memory_store: zarr.storage.MemoryStore
) -> AsyncGroup:
zgroup_bytes = cpu.Buffer.from_bytes(json.dumps({"zarr_format": 2}).encode())
zmetadata_bytes = cpu.Buffer.from_bytes(
b'{"metadata":{".zgroup":{"zarr_format":2}},"zarr_consolidated_format":1}'
)
return AsyncGroup._from_bytes_v2(
StorePath(memory_store, path=""),
zgroup_bytes,
zattrs_bytes=None,
consolidated_metadata_bytes=zmetadata_bytes,
)
async def test_consolidated_metadata_backwards_compatibility(
self, v2_consolidated_metadata_empty_dataset: AsyncGroup
) -> None:
"""
Test that consolidated metadata handles a missing .zattrs key. This is necessary for backwards compatibility with zarr-python 2.x. See https://github.com/zarr-developers/zarr-python/issues/2694
"""
store = zarr.storage.MemoryStore()
await zarr.api.asynchronous.open(store=store, zarr_format=2)
await zarr.api.asynchronous.consolidate_metadata(store)
result = await zarr.api.asynchronous.open_consolidated(store, zarr_format=2)
assert result.metadata == v2_consolidated_metadata_empty_dataset.metadata
async def test_consolidated_metadata_v2(self) -> None:
store = zarr.storage.MemoryStore()
g = await AsyncGroup.from_store(store, attributes={"key": "root"}, zarr_format=2)
dtype = parse_dtype("uint8", zarr_format=2)
await g.create_array(name="a", shape=(1,), attributes={"key": "a"}, dtype=dtype)
g1 = await g.create_group(name="g1", attributes={"key": "g1"})
await g1.create_group(name="g2", attributes={"key": "g2"})
await zarr.api.asynchronous.consolidate_metadata(store)
result = await zarr.api.asynchronous.open_consolidated(store, zarr_format=2)
expected = GroupMetadata(
attributes={"key": "root"},
zarr_format=2,
consolidated_metadata=ConsolidatedMetadata(
metadata={
"a": ArrayV2Metadata(
shape=(1,),
dtype=dtype,
attributes={"key": "a"},
chunks=(1,),
fill_value=0,
compressor=Blosc(),
order="C",
),
"g1": GroupMetadata(
attributes={"key": "g1"},
zarr_format=2,
consolidated_metadata=ConsolidatedMetadata(
metadata={
"g2": GroupMetadata(
attributes={"key": "g2"},
zarr_format=2,
consolidated_metadata=ConsolidatedMetadata(metadata={}),
)
}
),
),
}
),
)
assert result.metadata == expected
@pytest.mark.parametrize("zarr_format", [2, 3])
async def test_use_consolidated_false(
self, memory_store: zarr.storage.MemoryStore, zarr_format: ZarrFormat
) -> None:
with zarr.config.set(default_zarr_format=zarr_format):
g = await group(store=memory_store, attributes={"foo": "bar"})
await g.create_group(name="a")
# test a stale read
if zarr_format == 3:
with pytest.warns(
ZarrUserWarning,
match="Consolidated metadata is currently not part in the Zarr format 3 specification.",
):
await zarr.api.asynchronous.consolidate_metadata(memory_store)
else:
await zarr.api.asynchronous.consolidate_metadata(memory_store)
await g.create_group(name="b")
stale = await zarr.api.asynchronous.open_group(store=memory_store)
assert len([x async for x in stale.members()]) == 1
assert stale.metadata.consolidated_metadata
assert list(stale.metadata.consolidated_metadata.metadata) == ["a"]
# bypass stale data
good = await zarr.api.asynchronous.open_group(
store=memory_store, use_consolidated=False
)
assert len([x async for x in good.members()]) == 2
# reconsolidate
if zarr_format == 3:
with pytest.warns(
ZarrUserWarning,
match="Consolidated metadata is currently not part in the Zarr format 3 specification.",
):
await zarr.api.asynchronous.consolidate_metadata(memory_store)
else:
await zarr.api.asynchronous.consolidate_metadata(memory_store)
good = await zarr.api.asynchronous.open_group(store=memory_store)
assert len([x async for x in good.members()]) == 2
assert good.metadata.consolidated_metadata
assert sorted(good.metadata.consolidated_metadata.metadata) == ["a", "b"]
async def test_stale_child_metadata_ignored(
self, memory_store: zarr.storage.MemoryStore
) -> None:
# https://github.com/zarr-developers/zarr-python/issues/2921
# When consolidating metadata, we should ignore any (possibly stale) metadata
# from previous consolidations, *including at child nodes*.
root = await zarr.api.asynchronous.group(store=memory_store, zarr_format=3)
await root.create_group("foo")
await zarr.api.asynchronous.consolidate_metadata(memory_store, path="foo")
await root.create_group("foo/bar/spam")
with pytest.warns(
ZarrUserWarning,
match="Consolidated metadata is currently not part in the Zarr format 3 specification.",
):
await zarr.api.asynchronous.consolidate_metadata(memory_store)
reopened = await zarr.api.asynchronous.open_consolidated(store=memory_store, zarr_format=3)
result = [x[0] async for x in reopened.members(max_depth=None)]
expected = ["foo", "foo/bar", "foo/bar/spam"]
assert result == expected
async def test_use_consolidated_for_children_members(
self, memory_store: zarr.storage.MemoryStore
) -> None:
# A test that has *unconsolidated* metadata at the root group, but discovers
# a child group with consolidated metadata.
root = await zarr.api.asynchronous.create_group(store=memory_store)
await root.create_group("a/b")
# Consolidate metadata at "a/b"
await zarr.api.asynchronous.consolidate_metadata(memory_store, path="a/b")
# Add a new group a/b/c, that's not present in the CM at "a/b"
await root.create_group("a/b/c")
# Now according to the consolidated metadata, "a" has children ["b"]
# but according to the unconsolidated metadata, "a" has children ["b", "c"]
group = await zarr.api.asynchronous.open_group(store=memory_store, path="a")
with pytest.warns(ZarrUserWarning, match="Object at 'c' not found"):
result = sorted([x[0] async for x in group.members(max_depth=None)])
expected = ["b"]
assert result == expected
result = sorted(
[x[0] async for x in group.members(max_depth=None, use_consolidated_for_children=False)]
)
expected = ["b", "b/c"]
assert result == expected
async def test_absolute_path_for_subgroup(self, memory_store: zarr.storage.MemoryStore) -> None:
root = await zarr.api.asynchronous.create_group(store=memory_store)
await root.create_group("a/b")
with pytest.warns(
ZarrUserWarning,
match="Consolidated metadata is currently not part in the Zarr format 3 specification.",
):
await zarr.api.asynchronous.consolidate_metadata(memory_store)
group = await zarr.api.asynchronous.open_group(store=memory_store)
subgroup = await group.getitem("/a")
assert isinstance(subgroup, AsyncGroup)
members = [x async for x in subgroup.keys()] # noqa: SIM118
assert members == ["b"]
@pytest.mark.parametrize("fill_value", [np.nan, np.inf, -np.inf])
async def test_consolidated_metadata_encodes_special_chars(
memory_store: Store, zarr_format: ZarrFormat, fill_value: float
) -> None:
root = await group(store=memory_store, zarr_format=zarr_format)
_time = await root.create_array("time", shape=(12,), dtype=np.float64, fill_value=fill_value)
if zarr_format == 3:
with pytest.warns(
ZarrUserWarning,
match="Consolidated metadata is currently not part in the Zarr format 3 specification.",
):
await zarr.api.asynchronous.consolidate_metadata(memory_store)
else:
await zarr.api.asynchronous.consolidate_metadata(memory_store)
root = await group(store=memory_store, zarr_format=zarr_format)
root_buffer = root.metadata.to_buffer_dict(default_buffer_prototype())
if zarr_format == 2:
root_metadata = json.loads(root_buffer[".zmetadata"].to_bytes().decode("utf-8"))["metadata"]
elif zarr_format == 3:
root_metadata = json.loads(root_buffer["zarr.json"].to_bytes().decode("utf-8"))[
"consolidated_metadata"
]["metadata"]
expected_fill_value = _time._zdtype.to_json_scalar(fill_value, zarr_format=2)
if zarr_format == 2:
assert root_metadata["time/.zarray"]["fill_value"] == expected_fill_value
elif zarr_format == 3:
assert root_metadata["time"]["fill_value"] == expected_fill_value
| TestConsolidated |
python | run-llama__llama_index | llama-index-integrations/storage/docstore/llama-index-storage-docstore-gel/llama_index/storage/docstore/gel/base.py | {
"start": 235,
"end": 835
} | class ____(KVDocumentStore):
"""
Gel Document (Node) store.
A Gel store for Document and Node objects.
Args:
gel_kvstore (GelKVStore): Gel key-value store
namespace (str): namespace for the docstore
batch_size (int): batch size for bulk operations
"""
def __init__(
self,
gel_kvstore: GelKVStore,
namespace: Optional[str] = None,
batch_size: int = DEFAULT_BATCH_SIZE,
) -> None:
"""Init a GelDocumentStore."""
super().__init__(gel_kvstore, namespace=namespace, batch_size=batch_size)
| GelDocumentStore |
python | huggingface__transformers | tests/models/superpoint/test_image_processing_superpoint.py | {
"start": 1230,
"end": 3737
} | class ____:
def __init__(
self,
parent,
batch_size=7,
num_channels=3,
image_size=18,
min_resolution=30,
max_resolution=400,
do_resize=True,
size=None,
do_grayscale=True,
):
size = size if size is not None else {"height": 480, "width": 640}
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size = size
self.do_grayscale = do_grayscale
def prepare_image_processor_dict(self):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_grayscale": self.do_grayscale,
}
def expected_output_image_shape(self, images):
return self.num_channels, self.size["height"], self.size["width"]
def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False):
return prepare_image_inputs(
batch_size=self.batch_size,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
numpify=numpify,
torchify=torchify,
)
def prepare_keypoint_detection_output(self, pixel_values):
max_number_keypoints = 50
batch_size = len(pixel_values)
mask = torch.zeros((batch_size, max_number_keypoints))
keypoints = torch.zeros((batch_size, max_number_keypoints, 2))
scores = torch.zeros((batch_size, max_number_keypoints))
descriptors = torch.zeros((batch_size, max_number_keypoints, 16))
for i in range(batch_size):
random_number_keypoints = np.random.randint(0, max_number_keypoints)
mask[i, :random_number_keypoints] = 1
keypoints[i, :random_number_keypoints] = torch.rand((random_number_keypoints, 2))
scores[i, :random_number_keypoints] = torch.rand((random_number_keypoints,))
descriptors[i, :random_number_keypoints] = torch.rand((random_number_keypoints, 16))
return SuperPointKeypointDescriptionOutput(
loss=None, keypoints=keypoints, scores=scores, descriptors=descriptors, mask=mask, hidden_states=None
)
@require_torch
@require_vision
| SuperPointImageProcessingTester |
python | getsentry__sentry | fixtures/page_objects/base.py | {
"start": 494,
"end": 843
} | class ____(BaseElement):
label_attr = "aria-label"
disabled_attr = "aria-disabled"
@property
def disabled(self):
return self.element.get_attribute(self.disabled_attr)
@property
def label(self):
return self.element.get_attribute(self.label_attr)
def click(self):
self.element.click()
| ButtonElement |
python | xlwings__xlwings | xlwings/constants.py | {
"start": 73885,
"end": 74007
} | class ____:
xlLinkTypeExcelLinks = 1 # from enum XlLinkType
xlLinkTypeOLELinks = 2 # from enum XlLinkType
| LinkType |
python | pytorch__pytorch | torch/nn/modules/dropout.py | {
"start": 3847,
"end": 5974
} | class ____(_DropoutNd):
r"""Randomly zero out entire channels.
A channel is a 2D feature map,
e.g., the :math:`j`-th channel of the :math:`i`-th sample in the
batched input is a 2D tensor :math:`\text{input}[i, j]`.
Each channel will be zeroed out independently on every forward call with
probability :attr:`p` using samples from a Bernoulli distribution.
Usually the input comes from :class:`nn.Conv2d` modules.
As described in the paper
`Efficient Object Localization Using Convolutional Networks`_ ,
if adjacent pixels within feature maps are strongly correlated
(as is normally the case in early convolution layers) then i.i.d. dropout
will not regularize the activations and will otherwise just result
in an effective learning rate decrease.
In this case, :func:`nn.Dropout2d` will help promote independence between
feature maps and should be used instead.
Args:
p (float, optional): probability of an element to be zero-ed.
inplace (bool, optional): If set to ``True``, will do this operation
in-place
.. warning ::
Due to historical reasons, this class will perform 1D channel-wise dropout
for 3D inputs (as done by :class:`nn.Dropout1d`). Thus, it currently does NOT
support inputs without a batch dimension of shape :math:`(C, H, W)`. This
behavior will change in a future release to interpret 3D inputs as no-batch-dim
inputs. To maintain the old behavior, switch to :class:`nn.Dropout1d`.
Shape:
- Input: :math:`(N, C, H, W)` or :math:`(N, C, L)`.
- Output: :math:`(N, C, H, W)` or :math:`(N, C, L)` (same shape as input).
Examples::
>>> m = nn.Dropout2d(p=0.2)
>>> input = torch.randn(20, 16, 32, 32)
>>> output = m(input)
.. _Efficient Object Localization Using Convolutional Networks:
https://arxiv.org/abs/1411.4280
"""
def forward(self, input: Tensor) -> Tensor:
"""
Runs the forward pass.
"""
return F.dropout2d(input, self.p, self.training, self.inplace)
| Dropout2d |
python | doocs__leetcode | solution/3100-3199/3134.Find the Median of the Uniqueness Array/Solution.py | {
"start": 0,
"end": 672
} | class ____:
def medianOfUniquenessArray(self, nums: List[int]) -> int:
def check(mx: int) -> bool:
cnt = defaultdict(int)
k = l = 0
for r, x in enumerate(nums):
cnt[x] += 1
while len(cnt) > mx:
y = nums[l]
cnt[y] -= 1
if cnt[y] == 0:
cnt.pop(y)
l += 1
k += r - l + 1
if k >= (m + 1) // 2:
return True
return False
n = len(nums)
m = (1 + n) * n // 2
return bisect_left(range(n), True, key=check)
| Solution |
python | kamyu104__LeetCode-Solutions | Python/best-time-to-buy-and-sell-stock-using-strategy.py | {
"start": 60,
"end": 694
} | class ____(object):
def maxProfit(self, prices, strategy, k):
"""
:type prices: List[int]
:type strategy: List[int]
:type k: int
:rtype: int
"""
result = curr = 0
for i in xrange(len(prices)):
curr += prices[i]*(0 if i < k//2 else 1) if i < k else prices[i]*strategy[i]
result += prices[i]*strategy[i]
result = max(result, curr)
for i in xrange(k, len(prices)):
curr += (prices[i-k]*strategy[i-k])+(prices[i]-prices[i-k//2])-(prices[i]*strategy[i])
result = max(result, curr)
return result
| Solution |
python | python-pillow__Pillow | Tests/test_imageops.py | {
"start": 268,
"end": 19128
} | class ____(ImageOps.SupportsGetMesh):
def getmesh(
self, im: Image.Image
) -> list[
tuple[tuple[int, int, int, int], tuple[int, int, int, int, int, int, int, int]]
]:
x, y = im.size
return [((0, 0, x, y), (0, 0, x, 0, x, y, y, 0))]
deformer = Deformer()
def test_sanity() -> None:
ImageOps.autocontrast(hopper("L"))
ImageOps.autocontrast(hopper("RGB"))
ImageOps.autocontrast(hopper("L"), cutoff=10)
ImageOps.autocontrast(hopper("L"), cutoff=(2, 10))
ImageOps.autocontrast(hopper("L"), ignore=[0, 255])
ImageOps.autocontrast(hopper("L"), mask=hopper("L"))
ImageOps.autocontrast(hopper("L"), preserve_tone=True)
ImageOps.colorize(hopper("L"), (0, 0, 0), (255, 255, 255))
ImageOps.colorize(hopper("L"), "black", "white")
ImageOps.pad(hopper("L"), (128, 128))
ImageOps.pad(hopper("RGB"), (128, 128))
ImageOps.contain(hopper("L"), (128, 128))
ImageOps.contain(hopper("RGB"), (128, 128))
ImageOps.cover(hopper("L"), (128, 128))
ImageOps.cover(hopper("RGB"), (128, 128))
ImageOps.crop(hopper("L"), 1)
ImageOps.crop(hopper("RGB"), 1)
ImageOps.deform(hopper("L"), deformer)
ImageOps.deform(hopper("RGB"), deformer)
ImageOps.equalize(hopper("L"))
ImageOps.equalize(hopper("RGB"))
ImageOps.expand(hopper("L"), 1)
ImageOps.expand(hopper("RGB"), 1)
ImageOps.expand(hopper("L"), 2, "blue")
ImageOps.expand(hopper("RGB"), 2, "blue")
ImageOps.fit(hopper("L"), (128, 128))
ImageOps.fit(hopper("RGB"), (128, 128))
ImageOps.flip(hopper("L"))
ImageOps.flip(hopper("RGB"))
ImageOps.grayscale(hopper("L"))
ImageOps.grayscale(hopper("RGB"))
ImageOps.invert(hopper("1"))
ImageOps.invert(hopper("L"))
ImageOps.invert(hopper("RGB"))
ImageOps.mirror(hopper("L"))
ImageOps.mirror(hopper("RGB"))
ImageOps.posterize(hopper("L"), 4)
ImageOps.posterize(hopper("RGB"), 4)
ImageOps.solarize(hopper("L"))
ImageOps.solarize(hopper("RGB"))
ImageOps.exif_transpose(hopper("L"))
ImageOps.exif_transpose(hopper("RGB"))
def test_1pxfit() -> None:
# Division by zero in equalize if image is 1 pixel high
newimg = ImageOps.fit(hopper("RGB").resize((1, 1)), (35, 35))
assert newimg.size == (35, 35)
newimg = ImageOps.fit(hopper("RGB").resize((1, 100)), (35, 35))
assert newimg.size == (35, 35)
newimg = ImageOps.fit(hopper("RGB").resize((100, 1)), (35, 35))
assert newimg.size == (35, 35)
def test_fit_same_ratio() -> None:
# The ratio for this image is 1000.0 / 755 = 1.3245033112582782
# If the ratios are not acknowledged to be the same,
# and Pillow attempts to adjust the width to
# 1.3245033112582782 * 755 = 1000.0000000000001
# then centering this greater width causes a negative x offset when cropping
with Image.new("RGB", (1000, 755)) as im:
new_im = ImageOps.fit(im, (1000, 755))
assert new_im.size == (1000, 755)
@pytest.mark.parametrize("new_size", ((256, 256), (512, 256), (256, 512)))
def test_contain(new_size: tuple[int, int]) -> None:
im = hopper()
new_im = ImageOps.contain(im, new_size)
assert new_im.size == (256, 256)
def test_contain_round() -> None:
im = Image.new("1", (43, 63), 1)
new_im = ImageOps.contain(im, (5, 7))
assert new_im.width == 5
im = Image.new("1", (63, 43), 1)
new_im = ImageOps.contain(im, (7, 5))
assert new_im.height == 5
@pytest.mark.parametrize(
"image_name, expected_size",
(
("colr_bungee.png", (1024, 256)), # landscape
("imagedraw_stroke_multiline.png", (256, 640)), # portrait
("hopper.png", (256, 256)), # square
),
)
def test_cover(image_name: str, expected_size: tuple[int, int]) -> None:
with Image.open("Tests/images/" + image_name) as im:
new_im = ImageOps.cover(im, (256, 256))
assert new_im.size == expected_size
def test_pad() -> None:
# Same ratio
im = hopper()
new_size = (im.width * 2, im.height * 2)
new_im = ImageOps.pad(im, new_size)
assert new_im.size == new_size
for label, color, new_size in [
("h", None, (im.width * 4, im.height * 2)),
("v", "#f00", (im.width * 2, im.height * 4)),
]:
for i, centering in enumerate([(0, 0), (0.5, 0.5), (1, 1)]):
new_im = ImageOps.pad(im, new_size, color=color, centering=centering)
assert new_im.size == new_size
assert_image_similar_tofile(
new_im, "Tests/images/imageops_pad_" + label + "_" + str(i) + ".jpg", 6
)
def test_pad_round() -> None:
im = Image.new("1", (1, 1), 1)
new_im = ImageOps.pad(im, (4, 1))
assert new_im.getpixel((2, 0)) == 1
new_im = ImageOps.pad(im, (1, 4))
assert new_im.getpixel((0, 2)) == 1
@pytest.mark.parametrize("mode", ("P", "PA"))
def test_palette(mode: str) -> None:
im = hopper(mode)
# Expand
expanded_im = ImageOps.expand(im)
assert_image_equal(im.convert("RGB"), expanded_im.convert("RGB"))
# Pad
padded_im = ImageOps.pad(im, (256, 128), centering=(0, 0))
assert_image_equal(
im.convert("RGB"), padded_im.convert("RGB").crop((0, 0, 128, 128))
)
def test_rgba_palette() -> None:
im = Image.new("P", (1, 1))
red = (255, 0, 0, 255)
translucent_black = (0, 0, 0, 127)
im.putpalette(red + translucent_black, "RGBA")
expanded_im = ImageOps.expand(im, 1, 1)
palette = expanded_im.palette
assert palette is not None
assert palette.mode == "RGBA"
assert expanded_im.convert("RGBA").getpixel((0, 0)) == translucent_black
def test_pil163() -> None:
# Division by zero in equalize if < 255 pixels in image (@PIL163)
i = hopper("RGB").resize((15, 16))
ImageOps.equalize(i.convert("L"))
ImageOps.equalize(i.convert("P"))
ImageOps.equalize(i.convert("RGB"))
def test_scale() -> None:
# Test the scaling function
i = hopper("L").resize((50, 50))
with pytest.raises(ValueError):
ImageOps.scale(i, -1)
newimg = ImageOps.scale(i, 1)
assert newimg.size == (50, 50)
newimg = ImageOps.scale(i, 2)
assert newimg.size == (100, 100)
newimg = ImageOps.scale(i, 0.5)
assert newimg.size == (25, 25)
@pytest.mark.parametrize("border", (10, (1, 2, 3, 4)))
def test_expand_palette(border: int | tuple[int, int, int, int]) -> None:
with Image.open("Tests/images/p_16.tga") as im:
im_expanded = ImageOps.expand(im, border, (255, 0, 0))
if isinstance(border, int):
left = top = right = bottom = border
else:
left, top, right, bottom = border
px = im_expanded.convert("RGB").load()
assert px is not None
for x in range(im_expanded.width):
for b in range(top):
assert px[x, b] == (255, 0, 0)
for b in range(bottom):
assert px[x, im_expanded.height - 1 - b] == (255, 0, 0)
for y in range(im_expanded.height):
for b in range(left):
assert px[b, y] == (255, 0, 0)
for b in range(right):
assert px[im_expanded.width - 1 - b, y] == (255, 0, 0)
im_cropped = im_expanded.crop(
(left, top, im_expanded.width - right, im_expanded.height - bottom)
)
assert_image_equal(im_cropped, im)
def test_colorize_2color() -> None:
# Test the colorizing function with 2-color functionality
# Open test image (256px by 10px, black to white)
with Image.open("Tests/images/bw_gradient.png") as im:
im = im.convert("L")
# Create image with original 2-color functionality
im_test = ImageOps.colorize(im, "red", "green")
# Test output image (2-color)
left = (0, 1)
middle = (127, 1)
right = (255, 1)
value = im_test.getpixel(left)
assert isinstance(value, tuple)
assert_tuple_approx_equal(
value,
(255, 0, 0),
threshold=1,
msg="black test pixel incorrect",
)
value = im_test.getpixel(middle)
assert isinstance(value, tuple)
assert_tuple_approx_equal(
value,
(127, 63, 0),
threshold=1,
msg="mid test pixel incorrect",
)
value = im_test.getpixel(right)
assert isinstance(value, tuple)
assert_tuple_approx_equal(
value,
(0, 127, 0),
threshold=1,
msg="white test pixel incorrect",
)
def test_colorize_2color_offset() -> None:
# Test the colorizing function with 2-color functionality and offset
# Open test image (256px by 10px, black to white)
with Image.open("Tests/images/bw_gradient.png") as im:
im = im.convert("L")
# Create image with original 2-color functionality with offsets
im_test = ImageOps.colorize(
im, black="red", white="green", blackpoint=50, whitepoint=100
)
# Test output image (2-color) with offsets
left = (25, 1)
middle = (75, 1)
right = (125, 1)
value = im_test.getpixel(left)
assert isinstance(value, tuple)
assert_tuple_approx_equal(
value,
(255, 0, 0),
threshold=1,
msg="black test pixel incorrect",
)
value = im_test.getpixel(middle)
assert isinstance(value, tuple)
assert_tuple_approx_equal(
value,
(127, 63, 0),
threshold=1,
msg="mid test pixel incorrect",
)
value = im_test.getpixel(right)
assert isinstance(value, tuple)
assert_tuple_approx_equal(
value,
(0, 127, 0),
threshold=1,
msg="white test pixel incorrect",
)
def test_colorize_3color_offset() -> None:
# Test the colorizing function with 3-color functionality and offset
# Open test image (256px by 10px, black to white)
with Image.open("Tests/images/bw_gradient.png") as im:
im = im.convert("L")
# Create image with new three color functionality with offsets
im_test = ImageOps.colorize(
im,
black="red",
white="green",
mid="blue",
blackpoint=50,
whitepoint=200,
midpoint=100,
)
# Test output image (3-color) with offsets
left = (25, 1)
left_middle = (75, 1)
middle = (100, 1)
right_middle = (150, 1)
right = (225, 1)
value = im_test.getpixel(left)
assert isinstance(value, tuple)
assert_tuple_approx_equal(
value,
(255, 0, 0),
threshold=1,
msg="black test pixel incorrect",
)
value = im_test.getpixel(left_middle)
assert isinstance(value, tuple)
assert_tuple_approx_equal(
value,
(127, 0, 127),
threshold=1,
msg="low-mid test pixel incorrect",
)
value = im_test.getpixel(middle)
assert isinstance(value, tuple)
assert_tuple_approx_equal(value, (0, 0, 255), threshold=1, msg="mid incorrect")
value = im_test.getpixel(right_middle)
assert isinstance(value, tuple)
assert_tuple_approx_equal(
value,
(0, 63, 127),
threshold=1,
msg="high-mid test pixel incorrect",
)
value = im_test.getpixel(right)
assert isinstance(value, tuple)
assert_tuple_approx_equal(
value,
(0, 127, 0),
threshold=1,
msg="white test pixel incorrect",
)
def test_exif_transpose() -> None:
exts = [".jpg"]
if features.check("webp"):
exts.append(".webp")
for ext in exts:
with Image.open("Tests/images/hopper" + ext) as base_im:
def check(orientation_im: Image.Image) -> None:
for im in [
orientation_im,
orientation_im.copy(),
]: # ImageFile # Image
if orientation_im is base_im:
assert "exif" not in im.info
else:
original_exif = im.info["exif"]
transposed_im = ImageOps.exif_transpose(im)
assert_image_similar(base_im, transposed_im, 17)
if orientation_im is base_im:
assert "exif" not in im.info
else:
assert transposed_im.info["exif"] != original_exif
assert 0x0112 in im.getexif()
assert 0x0112 not in transposed_im.getexif()
# Repeat the operation to test that it does not keep transposing
transposed_im2 = ImageOps.exif_transpose(transposed_im)
assert_image_equal(transposed_im2, transposed_im)
check(base_im)
for i in range(2, 9):
with Image.open(
"Tests/images/hopper_orientation_" + str(i) + ext
) as orientation_im:
check(orientation_im)
# Orientation from "XML:com.adobe.xmp" info key
for suffix in ("", "_exiftool"):
with Image.open("Tests/images/xmp_tags_orientation" + suffix + ".png") as im:
assert im.getexif()[0x0112] == 3
transposed_im = ImageOps.exif_transpose(im)
assert 0x0112 not in transposed_im.getexif()
transposed_im._reload_exif()
assert 0x0112 not in transposed_im.getexif()
# Orientation from "Raw profile type exif" info key
# This test image has been manually hexedited from exif_imagemagick.png
# to have a different orientation
with Image.open("Tests/images/exif_imagemagick_orientation.png") as im:
assert im.getexif()[0x0112] == 3
transposed_im = ImageOps.exif_transpose(im)
assert 0x0112 not in transposed_im.getexif()
# Orientation set directly on Image.Exif
im = hopper()
im.getexif()[0x0112] = 3
transposed_im = ImageOps.exif_transpose(im)
assert 0x0112 not in transposed_im.getexif()
def test_exif_transpose_with_xmp_tuple() -> None:
with Image.open("Tests/images/xmp_tags_orientation.png") as im:
assert im.getexif()[0x0112] == 3
im.info["xmp"] = (b"test",)
transposed_im = ImageOps.exif_transpose(im)
assert 0x0112 not in transposed_im.getexif()
def test_exif_transpose_xml_without_xmp() -> None:
with Image.open("Tests/images/xmp_tags_orientation.png") as im:
assert im.getexif()[0x0112] == 3
assert "XML:com.adobe.xmp" in im.info
del im.info["xmp"]
transposed_im = ImageOps.exif_transpose(im)
assert 0x0112 not in transposed_im.getexif()
def test_exif_transpose_in_place() -> None:
with Image.open("Tests/images/orientation_rectangle.jpg") as im:
assert im.size == (2, 1)
assert im.getexif()[0x0112] == 8
expected = im.rotate(90, expand=True)
ImageOps.exif_transpose(im, in_place=True)
assert im.size == (1, 2)
assert 0x0112 not in im.getexif()
assert_image_equal(im, expected)
def test_autocontrast_unsupported_mode() -> None:
im = Image.new("RGBA", (1, 1))
with pytest.raises(OSError):
ImageOps.autocontrast(im)
def test_autocontrast_cutoff() -> None:
# Test the cutoff argument of autocontrast
with Image.open("Tests/images/bw_gradient.png") as img:
def autocontrast(cutoff: int | tuple[int, int]) -> list[int]:
return ImageOps.autocontrast(img, cutoff).histogram()
assert autocontrast(10) == autocontrast((10, 10))
assert autocontrast(10) != autocontrast((1, 10))
def test_autocontrast_mask_toy_input() -> None:
# Test the mask argument of autocontrast
with Image.open("Tests/images/bw_gradient.png") as img:
rect_mask = Image.new("L", img.size, 0)
draw = ImageDraw.Draw(rect_mask)
x0 = img.size[0] // 4
y0 = img.size[1] // 4
x1 = 3 * img.size[0] // 4
y1 = 3 * img.size[1] // 4
draw.rectangle((x0, y0, x1, y1), fill=255)
result = ImageOps.autocontrast(img, mask=rect_mask)
result_nomask = ImageOps.autocontrast(img)
assert result != result_nomask
assert ImageStat.Stat(result, mask=rect_mask).median == [127]
assert ImageStat.Stat(result_nomask).median == [128]
def test_autocontrast_mask_real_input() -> None:
# Test the autocontrast with a rectangular mask
with Image.open("Tests/images/iptc.jpg") as img:
rect_mask = Image.new("L", img.size, 0)
draw = ImageDraw.Draw(rect_mask)
x0, y0 = img.size[0] // 2, img.size[1] // 2
x1, y1 = img.size[0] - 40, img.size[1]
draw.rectangle((x0, y0, x1, y1), fill=255)
result = ImageOps.autocontrast(img, mask=rect_mask)
result_nomask = ImageOps.autocontrast(img)
assert result_nomask != result
assert_tuple_approx_equal(
ImageStat.Stat(result, mask=rect_mask).median,
(195, 202, 184),
threshold=2,
msg="autocontrast with mask pixel incorrect",
)
assert_tuple_approx_equal(
ImageStat.Stat(result_nomask).median,
(119, 106, 79),
threshold=2,
msg="autocontrast without mask pixel incorrect",
)
def test_autocontrast_preserve_tone() -> None:
def autocontrast(mode: str, preserve_tone: bool) -> list[int]:
im = hopper(mode)
return ImageOps.autocontrast(im, preserve_tone=preserve_tone).histogram()
assert autocontrast("RGB", True) != autocontrast("RGB", False)
assert autocontrast("L", True) == autocontrast("L", False)
def test_autocontrast_preserve_gradient() -> None:
gradient = Image.linear_gradient("L")
# test with a grayscale gradient that extends to 0,255.
# Should be a noop.
out = ImageOps.autocontrast(gradient, cutoff=0, preserve_tone=True)
assert_image_equal(gradient, out)
# cutoff the top and bottom
# autocontrast should make the first and last histogram entries equal
# and, with rounding, should be 10% of the image pixels
out = ImageOps.autocontrast(gradient, cutoff=10, preserve_tone=True)
hist = out.histogram()
assert hist[0] == hist[-1]
assert hist[-1] == 256 * round(256 * 0.10)
# in rgb
img = gradient.convert("RGB")
out = ImageOps.autocontrast(img, cutoff=0, preserve_tone=True)
assert_image_equal(img, out)
@pytest.mark.parametrize(
"color", ((255, 255, 255), (127, 255, 0), (127, 127, 127), (0, 0, 0))
)
def test_autocontrast_preserve_one_color(color: tuple[int, int, int]) -> None:
img = Image.new("RGB", (10, 10), color)
# single color images shouldn't change
out = ImageOps.autocontrast(img, cutoff=0, preserve_tone=True)
assert_image_equal(img, out) # single color, no cutoff
# even if there is a cutoff
out = ImageOps.autocontrast(
img, cutoff=10, preserve_tone=True
) # single color 10 cutoff
assert_image_equal(img, out)
| Deformer |
python | Textualize__textual | docs/examples/guide/actions/actions01.py | {
"start": 57,
"end": 374
} | class ____(App):
def action_set_background(self, color: str) -> None:
self.screen.styles.background = color
def on_key(self, event: events.Key) -> None:
if event.key == "r":
self.action_set_background("red")
if __name__ == "__main__":
app = ActionsApp()
app.run()
| ActionsApp |
python | walkccc__LeetCode | solutions/1493. Longest Subarray of 1's After Deleting One Element/1493-2.py | {
"start": 0,
"end": 264
} | class ____:
def longestSubarray(self, nums: list[int]) -> int:
l = 0
zeros = 0
for num in nums:
if num == 0:
zeros += 1
if zeros > 1:
if nums[l] == 0:
zeros -= 1
l += 1
return len(nums) - l - 1
| Solution |
python | django-import-export__django-import-export | tests/core/tests/test_resources/test_bulk_operations.py | {
"start": 22134,
"end": 22756
} | class ____(BulkTest):
class DeleteBookResource(resources.ModelResource):
def for_delete(self, row, instance):
return True
class Meta:
model = UUIDBook
use_bulk = True
batch_size = 5
def setUp(self):
super().setUp()
self.resource = self.DeleteBookResource()
self.init_update_test_data(model=UUIDBook)
def test_bulk_delete_batch_size_of_5(self):
self.assertEqual(10, UUIDBook.objects.count())
self.resource.import_data(self.dataset)
self.assertEqual(0, UUIDBook.objects.count())
| BulkUUIDBookDeleteTest |
python | doocs__leetcode | lcof2/剑指 Offer II 020. 回文子字符串的个数/Solution.py | {
"start": 0,
"end": 362
} | class ____:
def countSubstrings(self, s: str) -> int:
def f(i, j):
cnt = 0
while i >= 0 and j < n:
if s[i] != s[j]:
break
cnt += 1
i, j = i - 1, j + 1
return cnt
n = len(s)
return sum(f(i, i) + f(i, i + 1) for i in range(n))
| Solution |
python | pypa__warehouse | warehouse/manage/views/teams.py | {
"start": 7728,
"end": 23839
} | class ____:
def __init__(self, team, request):
self.team = team
self.request = request
self.organization_service = request.find_service(
IOrganizationService, context=None
)
self.user_service = request.find_service(IUserService, context=None)
self.user_choices = sorted(
user.username
for user in set(
organization_owners(self.request, self.team.organization)
+ organization_managers(self.request, self.team.organization)
+ organization_members(self.request, self.team.organization)
)
if user not in self.team.members
)
@property
def default_response(self):
return {
"team": self.team,
"roles": self.organization_service.get_team_roles(self.team.id),
"form": CreateTeamRoleForm(
self.request.POST,
user_choices=self.user_choices,
),
}
@view_config(request_method="GET", permission=Permissions.OrganizationTeamsRead)
def manage_team_roles(self):
return self.default_response
@view_config(request_method="POST")
def create_team_role(self):
# Get and validate form from default response.
default_response = self.default_response
form = default_response["form"]
if not form.validate():
return default_response
# Add user to team.
username = form.username.data
role_name = TeamRoleType.Member
user_id = self.user_service.find_userid(username)
role = self.organization_service.add_team_role(
team_id=self.team.id,
user_id=user_id,
role_name=role_name,
)
# Record events.
self.team.organization.record_event(
tag=EventTag.Organization.TeamRoleAdd,
request=self.request,
additional={
"submitted_by_user_id": str(self.request.user.id),
"team_name": self.team.name,
"role_name": role_name.value,
"target_user_id": str(user_id),
},
)
self.team.record_event(
tag=EventTag.Team.TeamRoleAdd,
request=self.request,
additional={
"submitted_by_user_id": str(self.request.user.id),
"role_name": role_name.value,
"target_user_id": str(user_id),
},
)
role.user.record_event(
tag=EventTag.Account.TeamRoleAdd,
request=self.request,
additional={
"submitted_by_user_id": str(self.request.user.id),
"organization_name": self.team.organization.name,
"team_name": self.team.name,
"role_name": role_name.value,
},
)
# Send notification emails.
owner_and_manager_users = set(
organization_owners(self.request, self.team.organization)
+ organization_managers(self.request, self.team.organization)
)
owner_and_manager_users.discard(role.user)
send_team_member_added_email(
self.request,
owner_and_manager_users,
user=role.user,
submitter=self.request.user,
organization_name=self.team.organization.name,
team_name=self.team.name,
)
send_added_as_team_member_email(
self.request,
role.user,
submitter=self.request.user,
organization_name=self.team.organization.name,
team_name=self.team.name,
)
# Display notification message.
self.request.session.flash(
f"Added the team {self.team.name!r} to {self.team.organization.name!r}",
queue="success",
)
# Refresh teams list.
return HTTPSeeOther(self.request.path)
@view_config(
request_method="POST",
route_name="manage.team.delete_role",
permission=Permissions.OrganizationTeamsRead,
)
def delete_team_role(self):
# Get team role.
role_id = self.request.POST["role_id"]
role = self.organization_service.get_team_role(role_id)
if not role or role.team_id != self.team.id:
self.request.session.flash("Could not find member", queue="error")
elif (
not self.request.has_permission(Permissions.OrganizationTeamsManage)
and role.user != self.request.user
):
self.request.session.flash(
"Cannot remove other people from the team", queue="error"
)
else:
# Delete team role.
self.organization_service.delete_team_role(role.id)
# Record events.
self.team.organization.record_event(
tag=EventTag.Organization.TeamRoleRemove,
request=self.request,
additional={
"submitted_by_user_id": str(self.request.user.id),
"team_name": self.team.name,
"role_name": role.role_name.value,
"target_user_id": str(role.user.id),
},
)
self.team.record_event(
tag=EventTag.Team.TeamRoleRemove,
request=self.request,
additional={
"submitted_by_user_id": str(self.request.user.id),
"role_name": role.role_name.value,
"target_user_id": str(role.user.id),
},
)
role.user.record_event(
tag=EventTag.Account.TeamRoleRemove,
request=self.request,
additional={
"submitted_by_user_id": str(self.request.user.id),
"organization_name": self.team.organization.name,
"team_name": self.team.name,
"role_name": role.role_name.value,
},
)
# Send notification emails.
owner_and_manager_users = set(
organization_owners(self.request, self.team.organization)
+ organization_managers(self.request, self.team.organization)
)
owner_and_manager_users.discard(role.user)
send_team_member_removed_email(
self.request,
owner_and_manager_users,
user=role.user,
submitter=self.request.user,
organization_name=self.team.organization.name,
team_name=self.team.name,
)
send_removed_as_team_member_email(
self.request,
role.user,
submitter=self.request.user,
organization_name=self.team.organization.name,
team_name=self.team.name,
)
# Display notification message.
self.request.session.flash("Removed from team", queue="success")
# Refresh teams list.
return HTTPSeeOther(
self.request.route_path(
"manage.team.roles",
organization_name=self.team.organization.normalized_name,
team_name=self.team.normalized_name,
)
)
@view_config(
route_name="manage.team.history",
context=Team,
renderer="warehouse:templates/manage/team/history.html",
uses_session=True,
permission=Permissions.OrganizationTeamsManage,
has_translations=True,
)
def manage_team_history(team, request):
try:
page_num = int(request.params.get("page", 1))
except ValueError:
raise HTTPBadRequest("'page' must be an integer.")
events_query = (
request.db.query(Team.Event)
.join(Team.Event.source)
.filter(Team.Event.source_id == team.id)
.order_by(Team.Event.time.desc())
.order_by(Team.Event.tag.desc())
)
events = SQLAlchemyORMPage(
events_query,
page=page_num,
items_per_page=25,
url_maker=paginate_url_factory(request),
)
if events.page_count and page_num > events.page_count:
raise HTTPNotFound
user_service = request.find_service(IUserService, context=None)
return {
"events": events,
"get_user": user_service.get_user,
"team": team,
}
@view_config(
route_name="manage.project.change_team_project_role",
context=Project,
uses_session=True,
require_methods=["POST"],
permission=Permissions.ProjectsWrite,
has_translations=True,
require_reauth=True,
)
def change_team_project_role(project, request, _form_class=ChangeTeamProjectRoleForm):
form = _form_class(request.POST)
if form.validate():
role_id = request.POST["role_id"]
try:
role = (
request.db.query(TeamProjectRole)
.join(Team)
.filter(
TeamProjectRole.id == role_id, TeamProjectRole.project == project
)
.one()
)
if (
role.role_name == TeamProjectRoleType.Owner
and request.user in role.team.members
and request.user not in role.team.organization.owners
):
request.session.flash(
"Cannot remove your own team as Owner",
queue="error",
)
else:
# Add journal entry.
request.db.add(
JournalEntry(
name=project.name,
action="change {} {} to {}".format(
role.role_name.value,
role.team.name,
form.team_project_role_name.data.value,
),
submitted_by=request.user,
)
)
# Change team project role.
role.role_name = form.team_project_role_name.data
# Record events.
project.record_event(
tag=EventTag.Project.TeamProjectRoleChange,
request=request,
additional={
"submitted_by_user_id": str(request.user.id),
"role_name": role.role_name.value,
"target_team": role.team.name,
},
)
role.team.organization.record_event(
tag=EventTag.Organization.TeamProjectRoleChange,
request=request,
additional={
"submitted_by_user_id": str(request.user.id),
"project_name": role.project.name,
"role_name": role.role_name.value,
"target_team": role.team.name,
},
)
role.team.record_event(
tag=EventTag.Team.TeamProjectRoleChange,
request=request,
additional={
"submitted_by_user_id": str(request.user.id),
"project_name": role.project.name,
"role_name": role.role_name.value,
},
)
# Send notification emails.
member_users = set(role.team.members)
owner_users = set(project.owners + role.team.organization.owners)
owner_users -= member_users
send_team_collaborator_role_changed_email(
request,
owner_users,
team=role.team,
submitter=request.user,
project_name=project.name,
role=role.role_name.value,
)
send_role_changed_as_team_collaborator_email(
request,
member_users,
team=role.team,
submitter=request.user,
project_name=project.name,
role=role.role_name.value,
)
# Display notification message.
request.session.flash("Changed permissions", queue="success")
except NoResultFound:
request.session.flash("Could not find permissions", queue="error")
return HTTPSeeOther(
request.route_path("manage.project.roles", project_name=project.name)
)
@view_config(
route_name="manage.project.delete_team_project_role",
context=Project,
uses_session=True,
require_methods=["POST"],
permission=Permissions.ProjectsWrite,
has_translations=True,
require_reauth=True,
)
def delete_team_project_role(project, request):
try:
role = (
request.db.query(TeamProjectRole)
.join(Team)
.filter(TeamProjectRole.project == project)
.filter(TeamProjectRole.id == request.POST["role_id"])
.one()
)
removing_self = (
role.role_name == TeamProjectRoleType.Owner
and request.user in role.team.members
and request.user not in role.team.organization.owners
)
if removing_self:
request.session.flash("Cannot remove your own team as Owner", queue="error")
else:
role_name = role.role_name
team = role.team
# Delete role.
request.db.delete(role)
# Add journal entry.
request.db.add(
JournalEntry(
name=project.name,
action=f"remove {role_name.value} {team.name}",
submitted_by=request.user,
)
)
# Record event.
project.record_event(
tag=EventTag.Project.TeamProjectRoleRemove,
request=request,
additional={
"submitted_by_user_id": str(request.user.id),
"role_name": role_name.value,
"target_team": team.name,
},
)
team.organization.record_event(
tag=EventTag.Organization.TeamProjectRoleRemove,
request=request,
additional={
"submitted_by_user_id": str(request.user.id),
"project_name": project.name,
"role_name": role_name.value,
"target_team": team.name,
},
)
team.record_event(
tag=EventTag.Team.TeamProjectRoleRemove,
request=request,
additional={
"submitted_by_user_id": str(request.user.id),
"project_name": project.name,
"role_name": role_name.value,
},
)
# Send notification emails.
member_users = set(team.members)
owner_users = set(project.owners + team.organization.owners)
owner_users -= member_users
send_team_collaborator_removed_email(
request,
owner_users,
team=role.team,
submitter=request.user,
project_name=project.name,
)
send_removed_as_team_collaborator_email(
request,
member_users,
team=role.team,
submitter=request.user,
project_name=project.name,
)
# Display notification message.
request.session.flash("Removed permissions", queue="success")
except NoResultFound:
request.session.flash("Could not find permissions", queue="error")
return HTTPSeeOther(
request.route_path("manage.project.roles", project_name=project.name)
)
| ManageTeamRolesViews |
python | uqfoundation__dill | dill/tests/test_classdef.py | {
"start": 623,
"end": 723
} | class ____(object):
def _method(self):
pass
def ok(self):
return True
| _newclass |
python | python-jsonschema__jsonschema | jsonschema/tests/test_validators.py | {
"start": 9928,
"end": 27519
} | class ____(TestCase):
def message_for(self, instance, schema, *args, **kwargs):
cls = kwargs.pop("cls", validators._LATEST_VERSION)
cls.check_schema(schema)
validator = cls(schema, *args, **kwargs)
errors = list(validator.iter_errors(instance))
self.assertTrue(errors, msg=f"No errors were raised for {instance!r}")
self.assertEqual(
len(errors),
1,
msg=f"Expected exactly one error, found {errors!r}",
)
return errors[0].message
def test_single_type_failure(self):
message = self.message_for(instance=1, schema={"type": "string"})
self.assertEqual(message, "1 is not of type 'string'")
def test_single_type_list_failure(self):
message = self.message_for(instance=1, schema={"type": ["string"]})
self.assertEqual(message, "1 is not of type 'string'")
def test_multiple_type_failure(self):
types = "string", "object"
message = self.message_for(instance=1, schema={"type": list(types)})
self.assertEqual(message, "1 is not of type 'string', 'object'")
def test_object_with_named_type_failure(self):
schema = {"type": [{"name": "Foo", "minimum": 3}]}
message = self.message_for(
instance=1,
schema=schema,
cls=validators.Draft3Validator,
)
self.assertEqual(message, "1 is not of type 'Foo'")
def test_minimum(self):
message = self.message_for(instance=1, schema={"minimum": 2})
self.assertEqual(message, "1 is less than the minimum of 2")
def test_maximum(self):
message = self.message_for(instance=1, schema={"maximum": 0})
self.assertEqual(message, "1 is greater than the maximum of 0")
def test_dependencies_single_element(self):
depend, on = "bar", "foo"
schema = {"dependencies": {depend: on}}
message = self.message_for(
instance={"bar": 2},
schema=schema,
cls=validators.Draft3Validator,
)
self.assertEqual(message, "'foo' is a dependency of 'bar'")
def test_object_without_title_type_failure_draft3(self):
type = {"type": [{"minimum": 3}]}
message = self.message_for(
instance=1,
schema={"type": [type]},
cls=validators.Draft3Validator,
)
self.assertEqual(
message,
"1 is not of type {'type': [{'minimum': 3}]}",
)
def test_dependencies_list_draft3(self):
depend, on = "bar", "foo"
schema = {"dependencies": {depend: [on]}}
message = self.message_for(
instance={"bar": 2},
schema=schema,
cls=validators.Draft3Validator,
)
self.assertEqual(message, "'foo' is a dependency of 'bar'")
def test_dependencies_list_draft7(self):
depend, on = "bar", "foo"
schema = {"dependencies": {depend: [on]}}
message = self.message_for(
instance={"bar": 2},
schema=schema,
cls=validators.Draft7Validator,
)
self.assertEqual(message, "'foo' is a dependency of 'bar'")
def test_additionalItems_single_failure(self):
message = self.message_for(
instance=[2],
schema={"items": [], "additionalItems": False},
cls=validators.Draft3Validator,
)
self.assertIn("(2 was unexpected)", message)
def test_additionalItems_multiple_failures(self):
message = self.message_for(
instance=[1, 2, 3],
schema={"items": [], "additionalItems": False},
cls=validators.Draft3Validator,
)
self.assertIn("(1, 2, 3 were unexpected)", message)
def test_additionalProperties_single_failure(self):
additional = "foo"
schema = {"additionalProperties": False}
message = self.message_for(instance={additional: 2}, schema=schema)
self.assertIn("('foo' was unexpected)", message)
def test_additionalProperties_multiple_failures(self):
schema = {"additionalProperties": False}
message = self.message_for(
instance=dict.fromkeys(["foo", "bar"]),
schema=schema,
)
self.assertIn(repr("foo"), message)
self.assertIn(repr("bar"), message)
self.assertIn("were unexpected)", message)
def test_const(self):
schema = {"const": 12}
message = self.message_for(
instance={"foo": "bar"},
schema=schema,
)
self.assertIn("12 was expected", message)
def test_contains_draft_6(self):
schema = {"contains": {"const": 12}}
message = self.message_for(
instance=[2, {}, []],
schema=schema,
cls=validators.Draft6Validator,
)
self.assertEqual(
message,
"None of [2, {}, []] are valid under the given schema",
)
def test_invalid_format_default_message(self):
checker = FormatChecker(formats=())
checker.checks("thing")(lambda value: False)
schema = {"format": "thing"}
message = self.message_for(
instance="bla",
schema=schema,
format_checker=checker,
)
self.assertIn(repr("bla"), message)
self.assertIn(repr("thing"), message)
self.assertIn("is not a", message)
def test_additionalProperties_false_patternProperties(self):
schema = {"type": "object",
"additionalProperties": False,
"patternProperties": {
"^abc$": {"type": "string"},
"^def$": {"type": "string"},
}}
message = self.message_for(
instance={"zebra": 123},
schema=schema,
cls=validators.Draft4Validator,
)
self.assertEqual(
message,
"{} does not match any of the regexes: {}, {}".format(
repr("zebra"), repr("^abc$"), repr("^def$"),
),
)
message = self.message_for(
instance={"zebra": 123, "fish": 456},
schema=schema,
cls=validators.Draft4Validator,
)
self.assertEqual(
message,
"{}, {} do not match any of the regexes: {}, {}".format(
repr("fish"), repr("zebra"), repr("^abc$"), repr("^def$"),
),
)
def test_False_schema(self):
message = self.message_for(
instance="something",
schema=False,
)
self.assertEqual(message, "False schema does not allow 'something'")
def test_multipleOf(self):
message = self.message_for(
instance=3,
schema={"multipleOf": 2},
)
self.assertEqual(message, "3 is not a multiple of 2")
def test_minItems(self):
message = self.message_for(instance=[], schema={"minItems": 2})
self.assertEqual(message, "[] is too short")
def test_maxItems(self):
message = self.message_for(instance=[1, 2, 3], schema={"maxItems": 2})
self.assertEqual(message, "[1, 2, 3] is too long")
def test_minItems_1(self):
message = self.message_for(instance=[], schema={"minItems": 1})
self.assertEqual(message, "[] should be non-empty")
def test_maxItems_0(self):
message = self.message_for(instance=[1, 2, 3], schema={"maxItems": 0})
self.assertEqual(message, "[1, 2, 3] is expected to be empty")
def test_minLength(self):
message = self.message_for(
instance="",
schema={"minLength": 2},
)
self.assertEqual(message, "'' is too short")
def test_maxLength(self):
message = self.message_for(
instance="abc",
schema={"maxLength": 2},
)
self.assertEqual(message, "'abc' is too long")
def test_minLength_1(self):
message = self.message_for(instance="", schema={"minLength": 1})
self.assertEqual(message, "'' should be non-empty")
def test_maxLength_0(self):
message = self.message_for(instance="abc", schema={"maxLength": 0})
self.assertEqual(message, "'abc' is expected to be empty")
def test_minProperties(self):
message = self.message_for(instance={}, schema={"minProperties": 2})
self.assertEqual(message, "{} does not have enough properties")
def test_maxProperties(self):
message = self.message_for(
instance={"a": {}, "b": {}, "c": {}},
schema={"maxProperties": 2},
)
self.assertEqual(
message,
"{'a': {}, 'b': {}, 'c': {}} has too many properties",
)
def test_minProperties_1(self):
message = self.message_for(instance={}, schema={"minProperties": 1})
self.assertEqual(message, "{} should be non-empty")
def test_maxProperties_0(self):
message = self.message_for(
instance={1: 2},
schema={"maxProperties": 0},
)
self.assertEqual(message, "{1: 2} is expected to be empty")
def test_prefixItems_with_items(self):
message = self.message_for(
instance=[1, 2, "foo"],
schema={"items": False, "prefixItems": [{}, {}]},
)
self.assertEqual(
message,
"Expected at most 2 items but found 1 extra: 'foo'",
)
def test_prefixItems_with_multiple_extra_items(self):
message = self.message_for(
instance=[1, 2, "foo", 5],
schema={"items": False, "prefixItems": [{}, {}]},
)
self.assertEqual(
message,
"Expected at most 2 items but found 2 extra: ['foo', 5]",
)
def test_pattern(self):
message = self.message_for(
instance="bbb",
schema={"pattern": "^a*$"},
)
self.assertEqual(message, "'bbb' does not match '^a*$'")
def test_does_not_contain(self):
message = self.message_for(
instance=[],
schema={"contains": {"type": "string"}},
)
self.assertEqual(
message,
"[] does not contain items matching the given schema",
)
def test_contains_too_few(self):
message = self.message_for(
instance=["foo", 1],
schema={"contains": {"type": "string"}, "minContains": 2},
)
self.assertEqual(
message,
"Too few items match the given schema "
"(expected at least 2 but only 1 matched)",
)
def test_contains_too_few_both_constrained(self):
message = self.message_for(
instance=["foo", 1],
schema={
"contains": {"type": "string"},
"minContains": 2,
"maxContains": 4,
},
)
self.assertEqual(
message,
"Too few items match the given schema (expected at least 2 but "
"only 1 matched)",
)
def test_contains_too_many(self):
message = self.message_for(
instance=["foo", "bar", "baz"],
schema={"contains": {"type": "string"}, "maxContains": 2},
)
self.assertEqual(
message,
"Too many items match the given schema (expected at most 2)",
)
def test_contains_too_many_both_constrained(self):
message = self.message_for(
instance=["foo"] * 5,
schema={
"contains": {"type": "string"},
"minContains": 2,
"maxContains": 4,
},
)
self.assertEqual(
message,
"Too many items match the given schema (expected at most 4)",
)
def test_exclusiveMinimum(self):
message = self.message_for(
instance=3,
schema={"exclusiveMinimum": 5},
)
self.assertEqual(
message,
"3 is less than or equal to the minimum of 5",
)
def test_exclusiveMaximum(self):
message = self.message_for(instance=3, schema={"exclusiveMaximum": 2})
self.assertEqual(
message,
"3 is greater than or equal to the maximum of 2",
)
def test_required(self):
message = self.message_for(instance={}, schema={"required": ["foo"]})
self.assertEqual(message, "'foo' is a required property")
def test_dependentRequired(self):
message = self.message_for(
instance={"foo": {}},
schema={"dependentRequired": {"foo": ["bar"]}},
)
self.assertEqual(message, "'bar' is a dependency of 'foo'")
def test_oneOf_matches_none(self):
message = self.message_for(instance={}, schema={"oneOf": [False]})
self.assertEqual(
message,
"{} is not valid under any of the given schemas",
)
def test_oneOf_matches_too_many(self):
message = self.message_for(instance={}, schema={"oneOf": [True, True]})
self.assertEqual(message, "{} is valid under each of True, True")
def test_unevaluated_items(self):
schema = {"type": "array", "unevaluatedItems": False}
message = self.message_for(instance=["foo", "bar"], schema=schema)
self.assertIn(
message,
"Unevaluated items are not allowed ('foo', 'bar' were unexpected)",
)
def test_unevaluated_items_on_invalid_type(self):
schema = {"type": "array", "unevaluatedItems": False}
message = self.message_for(instance="foo", schema=schema)
self.assertEqual(message, "'foo' is not of type 'array'")
def test_unevaluated_properties_invalid_against_subschema(self):
schema = {
"properties": {"foo": {"type": "string"}},
"unevaluatedProperties": {"const": 12},
}
message = self.message_for(
instance={
"foo": "foo",
"bar": "bar",
"baz": 12,
},
schema=schema,
)
self.assertEqual(
message,
"Unevaluated properties are not valid under the given schema "
"('bar' was unevaluated and invalid)",
)
def test_unevaluated_properties_disallowed(self):
schema = {"type": "object", "unevaluatedProperties": False}
message = self.message_for(
instance={
"foo": "foo",
"bar": "bar",
},
schema=schema,
)
self.assertEqual(
message,
"Unevaluated properties are not allowed "
"('bar', 'foo' were unexpected)",
)
def test_unevaluated_properties_on_invalid_type(self):
schema = {"type": "object", "unevaluatedProperties": False}
message = self.message_for(instance="foo", schema=schema)
self.assertEqual(message, "'foo' is not of type 'object'")
def test_single_item(self):
schema = {"prefixItems": [{}], "items": False}
message = self.message_for(
instance=["foo", "bar", "baz"],
schema=schema,
)
self.assertEqual(
message,
"Expected at most 1 item but found 2 extra: ['bar', 'baz']",
)
def test_heterogeneous_additionalItems_with_Items(self):
schema = {"items": [{}], "additionalItems": False}
message = self.message_for(
instance=["foo", "bar", 37],
schema=schema,
cls=validators.Draft7Validator,
)
self.assertEqual(
message,
"Additional items are not allowed ('bar', 37 were unexpected)",
)
def test_heterogeneous_items_prefixItems(self):
schema = {"prefixItems": [{}], "items": False}
message = self.message_for(
instance=["foo", "bar", 37],
schema=schema,
)
self.assertEqual(
message,
"Expected at most 1 item but found 2 extra: ['bar', 37]",
)
def test_heterogeneous_unevaluatedItems_prefixItems(self):
schema = {"prefixItems": [{}], "unevaluatedItems": False}
message = self.message_for(
instance=["foo", "bar", 37],
schema=schema,
)
self.assertEqual(
message,
"Unevaluated items are not allowed ('bar', 37 were unexpected)",
)
def test_heterogeneous_properties_additionalProperties(self):
"""
Not valid deserialized JSON, but this should not blow up.
"""
schema = {"properties": {"foo": {}}, "additionalProperties": False}
message = self.message_for(
instance={"foo": {}, "a": "baz", 37: 12},
schema=schema,
)
self.assertEqual(
message,
"Additional properties are not allowed (37, 'a' were unexpected)",
)
def test_heterogeneous_properties_unevaluatedProperties(self):
"""
Not valid deserialized JSON, but this should not blow up.
"""
schema = {"properties": {"foo": {}}, "unevaluatedProperties": False}
message = self.message_for(
instance={"foo": {}, "a": "baz", 37: 12},
schema=schema,
)
self.assertEqual(
message,
"Unevaluated properties are not allowed (37, 'a' were unexpected)",
)
| TestValidationErrorMessages |
python | spyder-ide__spyder | spyder/plugins/remoteclient/widgets/connectionpages.py | {
"start": 30643,
"end": 43339
} | class ____(BaseConnectionPage):
"""Page to receive SSH credentials for a remote connection."""
MAX_WIDTH = 600 if MAC else 580
LOAD_FROM_CONFIG = False
NEW_CONNECTION = True
# ---- SidebarPage API
# -------------------------------------------------------------------------
def get_name(self):
return _("New connection")
def setup_page(self):
# Attributes
self.env_method_group = QButtonGroup(self)
self._radio_buttons_to_info_widgets: dict[
CreateEnvMethods, QWidget
] = {}
# Widgets
self.ssh_info_widget = self.create_ssh_connection_info_widget()
jupyterhub_info_widget = self.create_jupyterhub_connection_info_widget()
if ENV_MANAGER:
self.env_creation_widget = self._create_env_creation_widget()
self.env_packages_widget = self._create_env_packages_widget()
# Use a stacked widget/layout so we can hide the current widgets and
# create new ones in case users want to introduce more connections.
self.ssh_widget = QStackedWidget(self)
self.ssh_widget.addWidget(self.ssh_info_widget)
if ENV_MANAGER:
self.ssh_widget.addWidget(self.env_creation_widget)
self.ssh_widget.addWidget(self.env_packages_widget)
self.jupyterhub_widget = QWidget(self)
jupyterhub_layout = QStackedLayout()
jupyterhub_layout.addWidget(jupyterhub_info_widget)
self.jupyterhub_widget.setLayout(jupyterhub_layout)
self.create_tab("SSH", self.ssh_widget)
self.create_tab("JupyterHub", self.jupyterhub_widget)
def get_icon(self):
return self.create_icon("add_server")
# ---- SpyderConfigPage API
# -------------------------------------------------------------------------
def save_to_conf(self):
super().save_to_conf()
if self.NEW_CONNECTION:
# Set the client type for new connections following current tab
# index
client_type = self.get_client_type()
self.set_option(f"{self.host_id}/client_type", client_type)
if client_type == ClientType.JupyterHub:
# Set correct auth_method option following client type detected
self.set_option(
f"{self.host_id}/auth_method",
AuthenticationMethod.JupyterHub,
)
# ---- Public API
# -------------------------------------------------------------------------
def reset_page(self, clear=False):
"""Reset page to allow users to introduce a new connection."""
# Set a new host id
self.host_id = str(uuid.uuid4())
if clear:
# Reset tracked widgets
self.reset_widget_dicts()
# Add a new, clean set of widgets to the page
ssh_clean_info_widget = self.create_ssh_connection_info_widget()
self.ssh_widget.layout().addWidget(ssh_clean_info_widget)
self.ssh_widget.layout().setCurrentWidget(ssh_clean_info_widget)
jupyterhub_clean_info_widget = (
self.create_jupyterhub_connection_info_widget()
)
self.jupyterhub_widget.layout().addWidget(
jupyterhub_clean_info_widget
)
self.jupyterhub_widget.layout().setCurrentWidget(
jupyterhub_clean_info_widget
)
else:
# Change option names associated to all widgets present in the page
# to reference the new host_id
for widgets in [self.comboboxes, self.lineedits, self.spinboxes]:
for widget in widgets:
section, option, default = widgets[widget]
new_option = "/".join(
[self.host_id] + option.split("/")[1:]
)
widgets[widget] = (section, new_option, default)
def get_current_tab(self, index: int | None = None) -> str:
if index is None:
index = self.tabs.currentIndex()
if index == 0:
return "SSH"
else:
return "JupyterHub"
def show_ssh_info_widget(self):
self.ssh_widget.setCurrentWidget(self.ssh_info_widget)
def show_env_creation_widget(self):
self.ssh_widget.setCurrentWidget(self.env_creation_widget)
def show_env_packages_widget(self):
self.ssh_widget.setCurrentWidget(self.env_packages_widget)
def is_ssh_info_widget_shown(self) -> bool:
return self.ssh_widget.currentWidget() == self.ssh_info_widget
def is_env_creation_widget_shown(self) -> bool:
return self.ssh_widget.currentWidget() == self.env_creation_widget
def is_env_packages_widget_shown(self) -> bool:
return self.ssh_widget.currentWidget() == self.env_packages_widget
def selected_env_creation_method(self) -> CreateEnvMethods:
return self.env_method_group.checkedId()
def validate_env_creation(self):
method_id = self.env_method_group.checkedId()
if method_id != CreateEnvMethods.NoEnv:
env_method_widget = self._radio_buttons_to_info_widgets[method_id]
if env_method_widget.validate_contents(env_names=[]):
return True
else:
return False
return True
def get_create_env_info(self):
method_id = self.env_method_group.checkedId()
env_method_widget = self._radio_buttons_to_info_widgets[method_id]
if method_id == CreateEnvMethods.NewEnv:
return (
env_method_widget.get_env_name(),
env_method_widget.get_python_version()
)
elif method_id == CreateEnvMethods.ImportEnv:
return (
env_method_widget.get_zip_file(),
env_method_widget.get_env_name()
)
def get_env_packages_list(self):
return self._packages_info.get_changed_packages()
def setup_env_packages_widget(self):
env_name, python_version = self.get_create_env_info()
self._packages_info.setup(
env_name,
python_version,
f"~/.envs-manager/backends/pixi/{env_name}"
)
# ---- Private API
# -------------------------------------------------------------------------
def _create_env_creation_widget(self):
# Intro text
intro_label = QLabel(
_("Create a Python environment on the remote host")
)
intro_tip_text = _(
"Decide whether you want to create a remote environment to run "
"your code and how to do it"
)
intro_tip = TipWidget(
tip_text=intro_tip_text,
icon=ima.icon('info_tip'),
hover_icon=ima.icon('info_tip_hover'),
size=AppStyle.ConfigPageIconSize + 2,
wrap_text=True,
)
# Increase font size to make it more relevant
font = self.get_font(SpyderFontType.Interface)
font.setPointSize(font.pointSize() + 1)
intro_label.setFont(font)
# Layout
intro_layout = QHBoxLayout()
intro_layout.setContentsMargins(0, 0, 0, 0)
intro_layout.setSpacing(0)
intro_layout.setAlignment(Qt.AlignCenter)
intro_layout.addWidget(intro_label)
intro_layout.addWidget(intro_tip)
# Available methods
methods_group = QGroupBox(_("Available methods"))
self.env_method_group.idToggled.connect(
self._on_env_creation_method_changed
)
new_env_radio = self.create_radiobutton(
_("Create a new environment"),
option=None,
button_group=self.env_method_group,
id_=CreateEnvMethods.NewEnv,
)
import_env_radio = self.create_radiobutton(
_("Import an existing environment"),
option=None,
button_group=self.env_method_group,
id_=CreateEnvMethods.ImportEnv,
)
no_env_radio = self.create_radiobutton(
_("Don't create an environment"),
option=None,
button_group=self.env_method_group,
id_=CreateEnvMethods.NoEnv,
)
methods_layout = QVBoxLayout()
methods_layout.addSpacing(3)
methods_layout.addWidget(new_env_radio)
methods_layout.addWidget(import_env_radio)
methods_layout.addWidget(no_env_radio)
methods_group.setLayout(methods_layout)
# Required info
info_group = QGroupBox(_("Required information"))
new_env_info = NewEnvironment(
self,
max_width_for_content=470,
show_in_remote_connections_dialog=True
)
new_env_info.setMaximumWidth(470)
import_env_info = NewEnvironment(
self,
max_width_for_content=470,
import_env=True,
show_in_remote_connections_dialog=True
)
import_env_info.setMaximumWidth(470)
no_env_info = QLabel(
_(
"You can set up an environment later by going to the menu "
"entry <i>Tools > Environment manager</i>."
)
)
no_env_info.setWordWrap(True)
info_layout = QVBoxLayout()
info_layout.addWidget(new_env_info)
info_layout.addWidget(import_env_info)
info_layout.addWidget(no_env_info)
info_group.setLayout(info_layout)
# Hide all info widgets to only show the one that's checked
for widget in [new_env_info, import_env_info, no_env_info]:
widget.setVisible(False)
# Use the following mapping to show/hide info widgets when the
# corresponding radio button is toggled
self._radio_buttons_to_info_widgets = {
CreateEnvMethods.NewEnv: new_env_info,
CreateEnvMethods.ImportEnv: import_env_info,
CreateEnvMethods.NoEnv: no_env_info,
}
# Set new env as the default method
new_env_radio.radiobutton.setChecked(True)
# Final layout
layout = QVBoxLayout()
layout.setContentsMargins(
3 * AppStyle.MarginSize, 0, 3 * AppStyle.MarginSize, 0
)
layout.addLayout(intro_layout)
layout.addSpacing(8 * AppStyle.MarginSize)
layout.addWidget(methods_group)
layout.addWidget(info_group)
layout.addStretch()
env_creation_widget = QWidget(self)
env_creation_widget.setLayout(layout)
return env_creation_widget
def _on_env_creation_method_changed(
self, id_: CreateEnvMethods, checked: bool
):
self._radio_buttons_to_info_widgets[id_].setVisible(checked)
def _create_env_packages_widget(self):
# Intro text
intro_label = QLabel(_("Select packages for your remote environment"))
intro_tip_text = _(
"Choose the packages you want to install in your remote Python "
"environment"
)
intro_tip = TipWidget(
tip_text=intro_tip_text,
icon=ima.icon("info_tip"),
hover_icon=ima.icon("info_tip_hover"),
size=AppStyle.ConfigPageIconSize + 2,
wrap_text=True,
)
# Increase font size to make it more relevant
font = self.get_font(SpyderFontType.Interface)
font.setPointSize(font.pointSize() + 1)
intro_label.setFont(font)
# Layout
intro_layout = QHBoxLayout()
intro_layout.setContentsMargins(0, 0, 0, 0)
intro_layout.setSpacing(0)
intro_layout.setAlignment(Qt.AlignCenter)
intro_layout.addWidget(intro_label)
intro_layout.addWidget(intro_tip)
self._packages_info = EditEnvironment(
self, show_in_remote_connections_dialog=True
)
self._packages_info.set_empty_message_visible(True)
self._packages_info.setMaximumWidth(
525 if MAC else (485 if WIN else 500)
)
# Final layout
layout = QVBoxLayout()
layout.setContentsMargins(
3 * AppStyle.MarginSize,
0,
3 * AppStyle.MarginSize,
# Add bottom margin to let the packages table take the available
# vertical space
(2 if MAC else (3 if WIN else 4)) * AppStyle.MarginSize,
)
layout.addLayout(intro_layout)
layout.addSpacing(8 * AppStyle.MarginSize)
layout.addWidget(self._packages_info)
env_packages_widget = QWidget(self)
env_packages_widget.setLayout(layout)
return env_packages_widget
| NewConnectionPage |
python | sympy__sympy | sympy/series/sequences.py | {
"start": 31972,
"end": 35543
} | class ____(SeqExprOp):
r"""Represents term-wise multiplication of sequences.
Explanation
===========
Handles multiplication of sequences only. For multiplication
with other objects see :func:`SeqBase.coeff_mul`.
Rules:
* The interval on which sequence is defined is the intersection
of respective intervals of sequences.
* Anything \* :class:`EmptySequence` returns :class:`EmptySequence`.
* Other rules are defined in ``_mul`` methods of sequence classes.
Examples
========
>>> from sympy import EmptySequence, oo, SeqMul, SeqPer, SeqFormula
>>> from sympy.abc import n
>>> SeqMul(SeqPer((1, 2), (n, 0, oo)), EmptySequence)
EmptySequence
>>> SeqMul(SeqPer((1, 2), (n, 0, 5)), SeqPer((1, 2), (n, 6, 10)))
EmptySequence
>>> SeqMul(SeqPer((1, 2), (n, 0, oo)), SeqFormula(n**2))
SeqMul(SeqFormula(n**2, (n, 0, oo)), SeqPer((1, 2), (n, 0, oo)))
>>> SeqMul(SeqFormula(n**3), SeqFormula(n**2))
SeqFormula(n**5, (n, 0, oo))
See Also
========
sympy.series.sequences.SeqAdd
"""
def __new__(cls, *args, **kwargs):
evaluate = kwargs.get('evaluate', global_parameters.evaluate)
# flatten inputs
args = list(args)
# adapted from sympy.sets.sets.Union
def _flatten(arg):
if isinstance(arg, SeqBase):
if isinstance(arg, SeqMul):
return sum(map(_flatten, arg.args), [])
else:
return [arg]
elif iterable(arg):
return sum(map(_flatten, arg), [])
raise TypeError("Input must be Sequences or "
" iterables of Sequences")
args = _flatten(args)
# Multiplication of no sequences is EmptySequence
if not args:
return S.EmptySequence
if Intersection(*(a.interval for a in args)) is S.EmptySet:
return S.EmptySequence
# reduce using known rules
if evaluate:
return SeqMul.reduce(args)
args = list(ordered(args, SeqBase._start_key))
return Basic.__new__(cls, *args)
@staticmethod
def reduce(args):
"""Simplify a :class:`SeqMul` using known rules.
Explanation
===========
Iterates through all pairs and ask the constituent
sequences if they can simplify themselves with any other constituent.
Notes
=====
adapted from ``Union.reduce``
"""
new_args = True
while new_args:
for id1, s in enumerate(args):
new_args = False
for id2, t in enumerate(args):
if id1 == id2:
continue
new_seq = s._mul(t)
# This returns None if s does not know how to multiply
# with t. Returns the newly multiplied sequence otherwise
if new_seq is not None:
new_args = [a for a in args if a not in (s, t)]
new_args.append(new_seq)
break
if new_args:
args = new_args
break
if len(args) == 1:
return args.pop()
else:
return SeqMul(args, evaluate=False)
def _eval_coeff(self, pt):
"""multiplies the coefficients of all the sequences at point pt"""
val = 1
for a in self.args:
val *= a.coeff(pt)
return val
| SeqMul |
python | cython__cython | Cython/Compiler/Main.py | {
"start": 25666,
"end": 26708
} | class ____:
"""
Results from the Cython compiler:
c_file string or None The generated C source file
h_file string or None The generated C header file
i_file string or None The generated .pxi file
api_file string or None The generated C API .h file
listing_file string or None File of error messages
object_file string or None Result of compiling the C file
extension_file string or None Result of linking the object file
num_errors integer Number of compilation errors
compilation_source CompilationSource
"""
c_file = None
h_file = None
i_file = None
api_file = None
listing_file = None
object_file = None
extension_file = None
main_source_file = None
num_errors = 0
def get_generated_source_files(self):
return [
source_file for source_file in [self.c_file, self.h_file, self.i_file, self.api_file]
if source_file
]
| CompilationResult |
python | getsentry__sentry | tests/sentry/users/api/endpoints/test_user_emails_confirm.py | {
"start": 324,
"end": 8254
} | class ____(APITestCase):
endpoint = "sentry-api-0-user-emails-confirm"
method = "post"
def setUp(self) -> None:
super().setUp()
self.login_as(self.user)
@mock.patch("sentry.users.models.user.User.send_confirm_email_singular")
def test_can_confirm(self, send_confirm_email: mock.MagicMock) -> None:
email = UserEmail.objects.create(email="bar@example.com", is_verified=False, user=self.user)
email.save()
self.get_success_response(self.user.id, email="bar@example.com", status_code=204)
send_confirm_email.assert_called_once_with(UserEmail.objects.get(email="bar@example.com"))
@mock.patch("sentry.users.models.user.User.send_confirm_email_singular")
def test_can_confirm_with_uppercase(self, send_confirm_email: mock.MagicMock) -> None:
email = UserEmail.objects.create(email="Bar@example.com", is_verified=False, user=self.user)
email.save()
self.get_success_response(self.user.id, email="Bar@example.com", status_code=204)
send_confirm_email.assert_called_once_with(UserEmail.objects.get(email="Bar@example.com"))
@mock.patch("sentry.users.models.user.User.send_confirm_email_singular")
def test_cant_confirm_verified_email(self, send_confirm_email: mock.MagicMock) -> None:
email = UserEmail.objects.create(email="bar@example.com", is_verified=True, user=self.user)
email.save()
self.get_error_response(self.user.id, email="bar@example.com", status_code=400)
assert send_confirm_email.call_count == 0
@mock.patch("sentry.users.models.user.User.send_confirm_email_singular")
def test_validate_email(self, send_confirm_email: mock.MagicMock) -> None:
self.get_error_response(self.user.id, email="", status_code=400)
assert send_confirm_email.call_count == 0
@override_options(
{
"user-settings.signed-url-confirmation-emails": True,
"user-settings.signed-url-confirmation-emails-salt": "signed-url-confirmation-emails-salt",
}
)
def test_confirm_email_signed_url(self) -> None:
from sentry import options
EMAIL_CONFIRMATION_SALT = options.get("user-settings.signed-url-confirmation-emails-salt")
self.login_as(self.user)
new_email = "newemailfromsignedurl@example.com"
signed_data = sign(
user_id=self.user.id,
email=new_email,
salt=EMAIL_CONFIRMATION_SALT,
)
signed_url = reverse("sentry-account-confirm-signed-email", args=[signed_data])
resp = self.client.get(signed_url, follow=True)
assert resp.status_code == 200
assert resp.redirect_chain == [(reverse("sentry-account-settings-emails"), 302)]
new_user_email = UserEmail.objects.get(user=self.user, email=new_email)
assert new_user_email.is_verified
messages = list(resp.context["messages"])
assert len(messages) == 1
assert messages[0].message == "Thanks for confirming your email"
@override_options(
{
"user-settings.signed-url-confirmation-emails": True,
"user-settings.signed-url-confirmation-emails-salt": "signed-url-confirmation-emails-salt",
}
)
def test_confirm_email_invalid_signed_url(self) -> None:
self.login_as(self.user)
new_email = "newemailfromsignedurl@example.com"
signed_data = sign(
user_id=self.user.id,
email=new_email,
salt="invalid-salt",
)
signed_url = reverse("sentry-account-confirm-signed-email", args=[signed_data])
resp = self.client.get(signed_url, follow=True)
assert resp.status_code == 200
assert resp.redirect_chain == [(reverse("sentry-account-settings-emails"), 302)]
# the email should not be added
user_email_counts = UserEmail.objects.filter(user=self.user).count()
assert user_email_counts == 1
messages = list(resp.context["messages"])
assert len(messages) == 1
assert (
messages[0].message
== "There was an error confirming your email. Please try again or visit your Account Settings to resend the verification email."
)
@override_options(
{
"user-settings.signed-url-confirmation-emails": True,
"user-settings.signed-url-confirmation-emails-salt": "signed-url-confirmation-emails-salt",
}
)
def test_confirm_email_already_verified(self) -> None:
from sentry import options
EMAIL_CONFIRMATION_SALT = options.get("user-settings.signed-url-confirmation-emails-salt")
self.login_as(self.user)
new_email = "newemailfromsignedurl@example.com"
# Create already verified email
UserEmail.objects.create(
user=self.user,
email=new_email,
is_verified=True,
)
signed_data = sign(
user_id=self.user.id,
email=new_email,
salt=EMAIL_CONFIRMATION_SALT,
)
signed_url = reverse("sentry-account-confirm-signed-email", args=[signed_data])
resp = self.client.get(signed_url, follow=True)
assert resp.status_code == 200
assert resp.redirect_chain == [(reverse("sentry-account-settings-emails"), 302)]
messages = list(resp.context["messages"])
assert len(messages) == 1
assert (
messages[0].message == "The email you are trying to verify has already been verified."
)
@override_options(
{
"user-settings.signed-url-confirmation-emails": True,
"user-settings.signed-url-confirmation-emails-salt": "signed-url-confirmation-emails-salt",
}
)
def test_confirm_email_expired_signature(self) -> None:
from datetime import timedelta
from django.utils import timezone
self.login_as(self.user)
new_email = "newemailfromsignedurl@example.com"
with mock.patch("django.core.signing.time.time") as mock_time:
past_time = timezone.now() - timedelta(days=7)
mock_time.return_value = past_time.timestamp()
signed_data = sign(
user_id=self.user.id,
email=new_email,
salt="signed-url-confirmation-emails-salt",
)
signed_url = reverse("sentry-account-confirm-signed-email", args=[signed_data])
resp = self.client.get(signed_url, follow=True)
assert resp.status_code == 200
assert resp.redirect_chain == [(reverse("sentry-account-settings-emails"), 302)]
messages = list(resp.context["messages"])
assert len(messages) == 1
assert (
messages[0].message
== "The confirmation link has expired. Please visit your Account Settings to resend the verification email."
)
@override_options(
{
"user-settings.signed-url-confirmation-emails": False,
"user-settings.signed-url-confirmation-emails-salt": "signed-url-confirmation-emails-salt",
}
)
def test_confirm_email_signed_urls_disabled(self) -> None:
self.login_as(self.user)
new_email = "newemailfromsignedurl@example.com"
signed_data = sign(
user_id=self.user.id,
email=new_email,
salt="signed-url-confirmation-emails-salt",
)
resp = self.client.get(
reverse("sentry-account-confirm-signed-email", args=[signed_data]), follow=True
)
assert resp.status_code == 200
messages = list(resp.context["messages"])
assert len(messages) == 1
assert (
messages[0].message
== "There was an error confirming your email. Please try again or visit your Account Settings to resend the verification email."
)
| UserEmailsConfirmTest |
python | pytorch__pytorch | test/onnx/model_defs/emb_seq.py | {
"start": 350,
"end": 658
} | class ____(nn.Module):
def __init__(self, in_space=10, dim=3):
super().__init__()
self.embedding = nn.Embedding(in_space, dim)
self.seq = nn.Sequential(self.embedding, nn.Linear(dim, 1), nn.Sigmoid())
def forward(self, indices):
return self.seq(indices)
| EmbeddingNetwork2 |
python | scipy__scipy | benchmarks/benchmarks/go_benchmark_functions/go_funcs_D.py | {
"start": 3184,
"end": 4521
} | class ____(Benchmark):
r"""
Deb 3 objective function.
This class defines the Deb 3 [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Deb03}}(x) = - \frac{1}{N} \sum_{i=1}^n \sin^6 \left[ 5 \pi
\left ( x_i^{3/4} - 0.05 \right) \right ]
Here, :math:`n` represents the number of dimensions and :math:`x_i \in
[0, 1]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0.0`. The number of global minima is
:math:`5^n` that are evenly spaced in the function landscape, where
:math:`n` represents the dimension of the problem.
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
# lower limit changed to zero because of fractional power
self._bounds = list(zip([0.0] * self.N, [1.0] * self.N))
self.global_optimum = [[0.93388314, 0.68141781]]
self.fglob = -1.0
def fun(self, x, *args):
self.nfev += 1
return -(1.0 / self.N) * sum(sin(5 * pi * (x ** 0.75 - 0.05)) ** 6.0)
| Deb03 |
python | huggingface__transformers | src/transformers/modeling_gguf_pytorch_utils.py | {
"start": 4669,
"end": 6368
} | class ____(TensorProcessor):
def __init__(self, config=None):
super().__init__(config=config)
def process(self, weights, name, **kwargs):
if "attn_qkv" in name:
num_heads = self.config["n_head"]
n_embed = self.config["hidden_size"]
if "weight" in name:
weights = self._reverse_reshape_weights(weights, num_heads, n_embed)
else:
weights = self._reverse_reshape_bias(weights, num_heads, n_embed)
return GGUFTensor(weights, name, {})
def _reverse_reshape_weights(self, weights: np.ndarray, n_head: int, n_embed: int):
# Original reshape implementation
# https://github.com/ggerganov/llama.cpp/blob/master/convert_hf_to_gguf.py#L972-L985
q, k, v = np.array_split(weights, 3, axis=0)
q = q.reshape(n_head, n_embed // n_head, n_embed)
k = k.reshape(n_head, n_embed // n_head, n_embed)
v = v.reshape(n_head, n_embed // n_head, n_embed)
qkv_weights = np.stack([q, k, v], axis=1)
return qkv_weights.reshape(n_head * 3 * (n_embed // n_head), n_embed)
def _reverse_reshape_bias(self, weights: np.ndarray, n_head: int, n_embed: int):
# Original reshape implementation
# https://github.com/ggerganov/llama.cpp/blob/master/convert_hf_to_gguf.py#L986-L998
q_bias, k_bias, v_bias = np.array_split(weights, 3)
q_bias = q_bias.reshape(n_head, n_embed // n_head)
k_bias = k_bias.reshape(n_head, n_embed // n_head)
v_bias = v_bias.reshape(n_head, n_embed // n_head)
qkv_bias = np.stack([q_bias, k_bias, v_bias], axis=1).flatten()
return qkv_bias
| BloomTensorProcessor |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/distributions/util_test.py | {
"start": 28412,
"end": 29301
} | class ____(FillTriangularTest):
def _run_test(self, x_, use_deferred_shape=False, **kwargs):
x_ = np.asarray(x_)
with self.cached_session() as sess:
static_shape = None if use_deferred_shape else x_.shape
x_pl = array_ops.placeholder_with_default(x_, shape=static_shape)
zeros_like_x_pl = (x_pl * array_ops.stop_gradient(x_pl - 1.)
- array_ops.stop_gradient(x_pl * (x_pl - 1.)))
x = x_pl + zeros_like_x_pl
actual = du.fill_triangular(x, **kwargs)
inverse_actual = du.fill_triangular_inverse(actual, **kwargs)
inverse_actual_ = sess.run(
inverse_actual,
feed_dict={x_pl: x_})
if use_deferred_shape:
self.assertEqual(None, inverse_actual.shape)
else:
self.assertAllEqual(x_.shape, inverse_actual.shape)
self.assertAllEqual(x_, inverse_actual_)
| FillTriangularInverseTest |
python | wandb__wandb | wandb/automations/actions.py | {
"start": 2289,
"end": 2514
} | class ____(GQLBase):
typename__: Annotated[
Literal["GenericWebhookIntegration"],
Field(alias="__typename", frozen=True, repr=False),
] = "GenericWebhookIntegration"
id: GQLId
| _WebhookIntegrationStub |
python | pytest-dev__pytest | testing/test_tmpdir.py | {
"start": 11136,
"end": 13872
} | class ____:
PREFIX = "fun-"
def test_make(self, tmp_path):
for i in range(10):
d = make_numbered_dir(root=tmp_path, prefix=self.PREFIX)
assert d.name.startswith(self.PREFIX)
assert d.name.endswith(str(i))
symlink = tmp_path.joinpath(self.PREFIX + "current")
if symlink.exists():
# unix
assert symlink.is_symlink()
assert symlink.resolve() == d.resolve()
def test_cleanup_lock_create(self, tmp_path):
d = tmp_path.joinpath("test")
d.mkdir()
lockfile = create_cleanup_lock(d)
with pytest.raises(OSError, match=r"cannot create lockfile in .*"):
create_cleanup_lock(d)
lockfile.unlink()
def test_lock_register_cleanup_removal(self, tmp_path: Path) -> None:
lock = create_cleanup_lock(tmp_path)
registry: list[Callable[..., None]] = []
register_cleanup_lock_removal(lock, register=registry.append)
(cleanup_func,) = registry
assert lock.is_file()
cleanup_func(original_pid="intentionally_different")
assert lock.is_file()
cleanup_func()
assert not lock.exists()
cleanup_func()
assert not lock.exists()
def _do_cleanup(self, tmp_path: Path, keep: int = 2) -> None:
self.test_make(tmp_path)
cleanup_numbered_dir(
root=tmp_path,
prefix=self.PREFIX,
keep=keep,
consider_lock_dead_if_created_before=0,
)
def test_cleanup_keep(self, tmp_path):
self._do_cleanup(tmp_path)
a, b = (x for x in tmp_path.iterdir() if not x.is_symlink())
print(a, b)
def test_cleanup_keep_0(self, tmp_path: Path):
self._do_cleanup(tmp_path, 0)
dir_num = len(list(tmp_path.iterdir()))
assert dir_num == 0
def test_cleanup_locked(self, tmp_path):
p = make_numbered_dir(root=tmp_path, prefix=self.PREFIX)
create_cleanup_lock(p)
assert not pathlib.ensure_deletable(
p, consider_lock_dead_if_created_before=p.stat().st_mtime - 1
)
assert pathlib.ensure_deletable(
p, consider_lock_dead_if_created_before=p.stat().st_mtime + 1
)
def test_cleanup_ignores_symlink(self, tmp_path):
the_symlink = tmp_path / (self.PREFIX + "current")
attempt_symlink_to(the_symlink, tmp_path / (self.PREFIX + "5"))
self._do_cleanup(tmp_path)
def test_removal_accepts_lock(self, tmp_path):
folder = make_numbered_dir(root=tmp_path, prefix=self.PREFIX)
create_cleanup_lock(folder)
maybe_delete_a_numbered_dir(folder)
assert folder.is_dir()
| TestNumberedDir |
python | dagster-io__dagster | python_modules/dagster/dagster/components/resolved/base.py | {
"start": 1017,
"end": 1148
} | class ____(Enum):
SEQUENCE = auto()
OPTIONAL = auto()
DICT = auto()
_DERIVED_MODEL_REGISTRY = {}
@public
| _TypeContainer |
python | tensorflow__tensorflow | tensorflow/python/distribute/values.py | {
"start": 14861,
"end": 16034
} | class ____(type_spec.TypeSpec):
"""Type specification for a `PerReplica`."""
__slots__ = ["_value_specs"]
value_type = property(lambda self: PerReplica)
def __init__(self, *value_specs):
self._value_specs = tuple(value_specs)
def _serialize(self):
return self._value_specs
@property
def _component_specs(self):
return self._value_specs
def _to_components(self, value):
replica_context = distribute_lib.get_replica_context()
if replica_context is not None and replica_context.num_replicas_in_sync > 1:
raise ValueError(
"Flattening a PerReplica to components is not supported in replica "
"context.")
return value._values # pylint: disable=protected-access
def _from_components(self, tensor_list):
return PerReplica(tensor_list)
nested_structure_coder.register_codec(
nested_structure_coder.BuiltInTypeSpecCodec(
PerReplicaSpec, struct_pb2.TypeSpecProto.PER_REPLICA_SPEC
)
)
# Note that unlike PerReplica, Mirrored values inherit from
# DistributedDelegate and so can be used directly in cross-replica mode.
# TODO(tomhennigan) Should this extend CompositeTensor?
| PerReplicaSpec |
python | pandas-dev__pandas | pandas/tests/series/test_ufunc.py | {
"start": 8384,
"end": 14988
} | class ____:
# TODO: cases with NAs, axis kwarg for DataFrame
def test_multiply(self, values_for_np_reduce, box_with_array, request):
box = box_with_array
values = values_for_np_reduce
with tm.assert_produces_warning(None):
obj = box(values)
if isinstance(values, pd.core.arrays.SparseArray):
mark = pytest.mark.xfail(reason="SparseArray has no 'prod'")
request.applymarker(mark)
if values.dtype.kind in "iuf":
result = np.multiply.reduce(obj)
if box is pd.DataFrame:
expected = obj.prod(numeric_only=False)
tm.assert_series_equal(result, expected)
elif box is pd.Index:
# Index has no 'prod'
expected = obj._values.prod()
assert result == expected
else:
expected = obj.prod()
assert result == expected
else:
msg = "|".join(
[
"does not support operation",
"unsupported operand type",
"ufunc 'multiply' cannot use operands",
]
)
with pytest.raises(TypeError, match=msg):
np.multiply.reduce(obj)
def test_add(self, values_for_np_reduce, box_with_array):
box = box_with_array
values = values_for_np_reduce
with tm.assert_produces_warning(None):
obj = box(values)
if values.dtype.kind in "miuf":
result = np.add.reduce(obj)
if box is pd.DataFrame:
expected = obj.sum(numeric_only=False)
tm.assert_series_equal(result, expected)
elif box is pd.Index:
# Index has no 'sum'
expected = obj._values.sum()
assert result == expected
else:
expected = obj.sum()
assert result == expected
else:
msg = "|".join(
[
"does not support operation",
"unsupported operand type",
"ufunc 'add' cannot use operands",
]
)
with pytest.raises(TypeError, match=msg):
np.add.reduce(obj)
def test_max(self, values_for_np_reduce, box_with_array):
box = box_with_array
values = values_for_np_reduce
same_type = True
if box is pd.Index and values.dtype.kind in ["i", "f"]:
# ATM Index casts to object, so we get python ints/floats
same_type = False
with tm.assert_produces_warning(None):
obj = box(values)
result = np.maximum.reduce(obj)
if box is pd.DataFrame:
# TODO: cases with axis kwarg
expected = obj.max(numeric_only=False)
tm.assert_series_equal(result, expected)
else:
expected = values[1]
assert result == expected
if same_type:
# check we have e.g. Timestamp instead of dt64
assert type(result) == type(expected)
def test_min(self, values_for_np_reduce, box_with_array):
box = box_with_array
values = values_for_np_reduce
same_type = True
if box is pd.Index and values.dtype.kind in ["i", "f"]:
# ATM Index casts to object, so we get python ints/floats
same_type = False
with tm.assert_produces_warning(None):
obj = box(values)
result = np.minimum.reduce(obj)
if box is pd.DataFrame:
expected = obj.min(numeric_only=False)
tm.assert_series_equal(result, expected)
else:
expected = values[0]
assert result == expected
if same_type:
# check we have e.g. Timestamp instead of dt64
assert type(result) == type(expected)
@pytest.mark.parametrize("type_", [list, deque, tuple])
def test_binary_ufunc_other_types(type_):
a = pd.Series([1, 2, 3], name="name")
b = type_([3, 4, 5])
result = np.add(a, b)
expected = pd.Series(np.add(a.to_numpy(), b), name="name")
tm.assert_series_equal(result, expected)
def test_object_dtype_ok():
class Thing:
def __init__(self, value) -> None:
self.value = value
def __add__(self, other):
other = getattr(other, "value", other)
return type(self)(self.value + other)
def __eq__(self, other) -> bool:
return type(other) is Thing and self.value == other.value
def __repr__(self) -> str:
return f"Thing({self.value})"
s = pd.Series([Thing(1), Thing(2)])
result = np.add(s, Thing(1))
expected = pd.Series([Thing(2), Thing(3)])
tm.assert_series_equal(result, expected)
def test_outer():
# https://github.com/pandas-dev/pandas/issues/27186
ser = pd.Series([1, 2, 3])
obj = np.array([1, 2, 3])
with pytest.raises(NotImplementedError, match="^$"):
np.subtract.outer(ser, obj)
def test_np_matmul():
# GH26650
df1 = pd.DataFrame(data=[[-1, 1, 10]])
df2 = pd.DataFrame(data=[-1, 1, 10])
expected = pd.DataFrame(data=[102])
result = np.matmul(df1, df2)
tm.assert_frame_equal(expected, result)
@pytest.mark.parametrize("box", [pd.Index, pd.Series])
def test_np_matmul_1D(box):
result = np.matmul(box([1, 2]), box([2, 3]))
assert result == 8
assert isinstance(result, np.int64)
def test_array_ufuncs_for_many_arguments():
# GH39853
def add3(x, y, z):
return x + y + z
ufunc = np.frompyfunc(add3, 3, 1)
ser = pd.Series([1, 2])
result = ufunc(ser, ser, 1)
expected = pd.Series([3, 5], dtype=object)
tm.assert_series_equal(result, expected)
df = pd.DataFrame([[1, 2]])
msg = (
"Cannot apply ufunc <ufunc 'add3 (vectorized)'> "
"to mixed DataFrame and Series inputs."
)
with pytest.raises(NotImplementedError, match=re.escape(msg)):
ufunc(ser, ser, df)
def test_np_trunc():
# This used to test np.fix, which is not a ufunc but is composed of
# several ufunc calls under the hood with `out` and `where` keywords. But numpy
# is deprecating that (or at least discussing deprecating) in favor of np.trunc,
# which _is_ a ufunc without the out keyword usage.
ser = pd.Series([-1.5, -0.5, 0.5, 1.5])
result = np.trunc(ser)
expected = pd.Series([-1.0, -0.0, 0.0, 1.0])
tm.assert_series_equal(result, expected)
| TestNumpyReductions |
python | wandb__wandb | wandb/vendor/pygments/lexers/configs.py | {
"start": 7539,
"end": 9669
} | class ____(RegexLexer):
"""
Lexer for `CFEngine3 <http://cfengine.org>`_ policy files.
.. versionadded:: 1.5
"""
name = 'CFEngine3'
aliases = ['cfengine3', 'cf3']
filenames = ['*.cf']
mimetypes = []
tokens = {
'root': [
(r'#.*?\n', Comment),
(r'(body)(\s+)(\S+)(\s+)(control)',
bygroups(Keyword, Text, Keyword, Text, Keyword)),
(r'(body|bundle)(\s+)(\S+)(\s+)(\w+)(\()',
bygroups(Keyword, Text, Keyword, Text, Name.Function, Punctuation),
'arglist'),
(r'(body|bundle)(\s+)(\S+)(\s+)(\w+)',
bygroups(Keyword, Text, Keyword, Text, Name.Function)),
(r'(")([^"]+)(")(\s+)(string|slist|int|real)(\s*)(=>)(\s*)',
bygroups(Punctuation, Name.Variable, Punctuation,
Text, Keyword.Type, Text, Operator, Text)),
(r'(\S+)(\s*)(=>)(\s*)',
bygroups(Keyword.Reserved, Text, Operator, Text)),
(r'"', String, 'string'),
(r'(\w+)(\()', bygroups(Name.Function, Punctuation)),
(r'([\w.!&|()]+)(::)', bygroups(Name.Class, Punctuation)),
(r'(\w+)(:)', bygroups(Keyword.Declaration, Punctuation)),
(r'@[{(][^)}]+[})]', Name.Variable),
(r'[(){},;]', Punctuation),
(r'=>', Operator),
(r'->', Operator),
(r'\d+\.\d+', Number.Float),
(r'\d+', Number.Integer),
(r'\w+', Name.Function),
(r'\s+', Text),
],
'string': [
(r'\$[{(]', String.Interpol, 'interpol'),
(r'\\.', String.Escape),
(r'"', String, '#pop'),
(r'\n', String),
(r'.', String),
],
'interpol': [
(r'\$[{(]', String.Interpol, '#push'),
(r'[})]', String.Interpol, '#pop'),
(r'[^${()}]+', String.Interpol),
],
'arglist': [
(r'\)', Punctuation, '#pop'),
(r',', Punctuation),
(r'\w+', Name.Variable),
(r'\s+', Text),
],
}
| Cfengine3Lexer |
python | openai__openai-python | src/openai/types/responses/response_code_interpreter_tool_call_param.py | {
"start": 812,
"end": 1723
} | class ____(TypedDict, total=False):
id: Required[str]
"""The unique ID of the code interpreter tool call."""
code: Required[Optional[str]]
"""The code to run, or null if not available."""
container_id: Required[str]
"""The ID of the container used to run the code."""
outputs: Required[Optional[Iterable[Output]]]
"""
The outputs generated by the code interpreter, such as logs or images. Can be
null if no outputs are available.
"""
status: Required[Literal["in_progress", "completed", "incomplete", "interpreting", "failed"]]
"""The status of the code interpreter tool call.
Valid values are `in_progress`, `completed`, `incomplete`, `interpreting`, and
`failed`.
"""
type: Required[Literal["code_interpreter_call"]]
"""The type of the code interpreter tool call. Always `code_interpreter_call`."""
| ResponseCodeInterpreterToolCallParam |
python | Netflix__metaflow | metaflow/plugins/azure/azure_credential.py | {
"start": 0,
"end": 2174
} | class ____(object):
name = "azure-default"
@staticmethod
def create_cacheable_azure_credential(*args, **kwargs):
"""azure.identity.DefaultAzureCredential is not readily cacheable in a dictionary
because it does not have a content based hash and equality implementations.
We implement a subclass CacheableDefaultAzureCredential to add them.
We need this because credentials will be part of the cache key in _ClientCache.
"""
from azure.identity import DefaultAzureCredential
class CacheableDefaultAzureCredential(DefaultAzureCredential):
def __init__(self, *args, **kwargs):
super(CacheableDefaultAzureCredential, self).__init__(*args, **kwargs)
# Just hashing all the kwargs works because they are all individually
# hashable as of 7/15/2022.
#
# What if Azure adds unhashable things to kwargs?
# - We will have CI to catch this (it will always install the latest Azure SDKs)
# - In Metaflow usage today we never specify any kwargs anyway. (see last line
# of the outer function.
self._hash_code = hash((args, tuple(sorted(kwargs.items()))))
def __hash__(self):
return self._hash_code
def __eq__(self, other):
return hash(self) == hash(other)
return CacheableDefaultAzureCredential(*args, **kwargs)
cached_provider_class = None
def create_cacheable_azure_credential():
global cached_provider_class
if cached_provider_class is None:
from metaflow.metaflow_config import DEFAULT_AZURE_CLIENT_PROVIDER
from metaflow.plugins import AZURE_CLIENT_PROVIDERS
for p in AZURE_CLIENT_PROVIDERS:
if p.name == DEFAULT_AZURE_CLIENT_PROVIDER:
cached_provider_class = p
break
else:
raise ValueError(
"Cannot find Azure Client provider %s" % DEFAULT_AZURE_CLIENT_PROVIDER
)
return cached_provider_class.create_cacheable_azure_credential()
| AzureDefaultClientProvider |
python | allegroai__clearml | clearml/utilities/pigar/modules.py | {
"start": 539,
"end": 1692
} | class ____(Modules):
def __init__(self) -> None:
super(ImportedModules, self).__init__()
def add(self, name: str, file: str, lineno: int) -> None:
if name is None:
return
names = list()
special_name = ".".join(name.split(".")[:2])
# Flask extension.
if name.startswith("flask.ext."):
names.append("flask")
names.append("flask_" + name.split(".")[2])
# Special cases..
elif special_name in _special_cases:
names.append(_special_cases[special_name])
# Other.
elif "." in name and not name.startswith("."):
names.append(name.split(".")[0])
else:
names.append(name)
for nm in names:
if nm not in self:
self[nm] = _Locations()
self[nm].add(file, lineno)
def __or__(self, obj: "ImportedModules") -> "ImportedModules":
for name, locations in obj.items():
for file, linenos in locations.items():
for lineno in linenos:
self.add(name, file, lineno)
return self
| ImportedModules |
python | marshmallow-code__apispec | tests/test_ext_marshmallow.py | {
"start": 51199,
"end": 52853
} | class ____:
def test_dict_values_resolve_to_additional_properties(self, spec):
class SchemaWithDict(Schema):
dict_field = Dict(values=String())
spec.components.schema("SchemaWithDict", schema=SchemaWithDict)
result = get_schemas(spec)["SchemaWithDict"]["properties"]["dict_field"]
assert result == {"type": "object", "additionalProperties": {"type": "string"}}
def test_dict_with_empty_values_field(self, spec):
class SchemaWithDict(Schema):
dict_field = Dict()
spec.components.schema("SchemaWithDict", schema=SchemaWithDict)
result = get_schemas(spec)["SchemaWithDict"]["properties"]["dict_field"]
assert result == {"type": "object", "additionalProperties": {}}
def test_dict_with_empty_values_field_and_metadata(self, spec):
class SchemaWithDict(Schema):
dict_field = Dict(metadata={"additionalProperties": True})
spec.components.schema("SchemaWithDict", schema=SchemaWithDict)
result = get_schemas(spec)["SchemaWithDict"]["properties"]["dict_field"]
assert result == {"type": "object", "additionalProperties": True}
def test_dict_with_nested(self, spec):
class SchemaWithDict(Schema):
dict_field = Dict(values=Nested(PetSchema))
spec.components.schema("SchemaWithDict", schema=SchemaWithDict)
assert len(get_schemas(spec)) == 2
result = get_schemas(spec)["SchemaWithDict"]["properties"]["dict_field"]
assert result == {
"additionalProperties": build_ref(spec, "schema", "Pet"),
"type": "object",
}
| TestDictValues |
python | doocs__leetcode | solution/1500-1599/1505.Minimum Possible Integer After at Most K Adjacent Swaps On Digits/Solution.py | {
"start": 445,
"end": 1148
} | class ____:
def minInteger(self, num: str, k: int) -> str:
pos = defaultdict(deque)
for i, v in enumerate(num, 1):
pos[int(v)].append(i)
ans = []
n = len(num)
tree = BinaryIndexedTree(n)
for i in range(1, n + 1):
for v in range(10):
q = pos[v]
if q:
j = q[0]
dist = tree.query(n) - tree.query(j) + j - i
if dist <= k:
k -= dist
q.popleft()
ans.append(str(v))
tree.update(j, 1)
break
return ''.join(ans)
| Solution |
python | joke2k__faker | tests/providers/test_currency.py | {
"start": 9776,
"end": 10671
} | class ____:
"""Test es_ES currency provider"""
num_samples = 100
@classmethod
def setup_class(cls):
from faker.providers.currency.es_ES import Provider as EsEsCurrencyProvider
cls.provider = EsEsCurrencyProvider
cls.currencies = cls.provider.currencies
cls.currency_codes, cls.currency_names = tuple(zip(*cls.currencies))
def test_currency(self, faker, num_samples):
for _ in range(num_samples):
cur = faker.currency()
assert cur in self.currencies
def test_currency_name(self, faker, num_samples):
for _ in range(num_samples):
name = faker.currency_name()
assert name in self.currency_names
def test_pricetag(self, faker, num_samples):
for _ in range(num_samples):
pricetag = faker.pricetag()
assert isinstance(pricetag, str)
| TestEsEs |
python | facebook__pyre-check | client/commands/pyre_language_server.py | {
"start": 17962,
"end": 26619
} | class ____:
"""
The dispatcher provides the top-level, "foreground" logic for a Pyre
language server. Its only job is to read requests from standard input,
parse them, and dispatch to the appropriate lower-level logic.
There are two compontents to which we might dispatch:
- We'll dispatch to the PyreLanguageServer for all request handling,
which includes querying the daemon, sending responses to the client,
and reporting telemetry.
- We also may check that the background task used to start/restart the
daemon and get type error notifications over subscriptions is alive.
The daemon can go down, for example if a critical file change occurs,
so it is important for us to periodically check whether it is up.
"""
# I/O channels. Output channel is used *exclusively* to report parse errors.
input_channel: connections.AsyncTextReader
output_channel: connections.AsyncTextWriter
# State: used *exclusively* to track restart failures.
server_state: state.ServerState
daemon_manager: background_tasks.TaskManager
api: PyreLanguageServerApi
# A set of outstanding (not "done") asyncio tasks (like requests being processed). This is necessary to retain strong references to those tasks
# to avoid them being collected mid-execution by gc. See https://docs.python.org/3/library/asyncio-task.html#asyncio.create_task
outstanding_tasks: Set[asyncio.Task[None]]
def __init__(
self,
input_channel: connections.AsyncTextReader,
output_channel: connections.AsyncTextWriter,
server_state: state.ServerState,
daemon_manager: background_tasks.TaskManager,
api: PyreLanguageServerApi,
) -> None:
self.input_channel = input_channel
self.output_channel = output_channel
self.server_state = server_state
self.daemon_manager = daemon_manager
self.api = api
self.outstanding_tasks = set()
async def wait_for_exit(self) -> commands.ExitCode:
await _wait_for_exit(self.input_channel, self.output_channel)
return commands.ExitCode.SUCCESS
async def _restart_if_needed(
self, error_source: Optional[Exception] = None
) -> None:
if (
self.server_state.consecutive_start_failure
>= CONSECUTIVE_START_ATTEMPT_THRESHOLD
):
LOG.info(
"Not restarting Pyre since failed consecutive start attempt limit"
" has been reached."
)
return
if isinstance(
error_source,
(
connections.ConnectionFailure,
asyncio.IncompleteReadError,
ConnectionError,
),
): # do we think the daemon is probably down at this point?
# Terminate any existing daemon processes before starting a new one
LOG.info("Forcing pyre daemon restart...")
await self.daemon_manager.ensure_task_stop() # make sure it's down
# restart if needed
if not self.daemon_manager.is_task_running():
# Just check to ensure that the daemon is running and restart if needed
await self.daemon_manager.ensure_task_running()
async def dispatch_nonblocking_request(self, request: json_rpc.Request) -> None:
if request.method == "exit" or request.method == "shutdown":
raise Exception("Exit and shutdown requests should be blocking")
dispatch_request_timer = timer.Timer()
await self._restart_if_needed()
dispatch_request_duration = ( # noqa: F841
dispatch_request_timer.stop_in_millisecond()
)
if self.server_state.client_register_event is not None:
await self.server_state.client_register_event.wait()
elif request.method == "textDocument/didOpen":
await self.api.process_open_request(
lsp.DidOpenTextDocumentParameters.from_json_rpc_parameters(
request.extract_parameters()
),
request.activity_key,
)
await self._restart_if_needed()
elif request.method == "textDocument/didChange":
await self.api.process_did_change_request(
lsp.DidChangeTextDocumentParameters.from_json_rpc_parameters(
request.extract_parameters()
)
)
await self._restart_if_needed()
elif request.method == "textDocument/didClose":
await self.api.process_close_request(
lsp.DidCloseTextDocumentParameters.from_json_rpc_parameters(
request.extract_parameters()
)
)
elif request.method == "textDocument/didSave":
await self.api.process_did_save_request(
lsp.DidSaveTextDocumentParameters.from_json_rpc_parameters(
request.extract_parameters()
),
request.activity_key,
)
await self._restart_if_needed()
elif request.method == "textDocument/typeCoverage":
await self.api.process_type_coverage_request(
lsp.TypeCoverageParameters.from_json_rpc_parameters(
request.extract_parameters()
),
request.id,
request.activity_key,
)
elif request.id is not None:
raise lsp.RequestCancelledError(
f"{request.method} Request not supported yet"
)
async def dispatch_request(
self, request: json_rpc.Request
) -> Optional[commands.ExitCode]:
"""
The top-level request dispatcher has two parts:
- Forward the request to the appropriate handler method
- For some types of requests, check that the background task is running; this
is how we ensure the daemon connection is live (the background task will
crash if the daemon goes down and closes the socket).
"""
if request.method == "exit":
LOG.info(
"Received exit request without a shutdown request, exiting as FAILURE."
)
return commands.ExitCode.LANGUAGE_SERVER_EXIT
elif request.method == "shutdown":
await self.api.process_shutdown_request(request.id)
return await self.wait_for_exit()
else:
request_task = asyncio.create_task(
self.dispatch_nonblocking_request(request)
)
self.outstanding_tasks.add(request_task)
request_task.add_done_callback(self.outstanding_tasks.discard)
async def serve_requests(self) -> int:
while True:
request = await read_lsp_request(self.input_channel, self.output_channel)
LOG.debug(f"Received LSP request: {log.truncate(str(request), 400)}")
try:
return_code = await self.dispatch_request(request)
if return_code is not None:
return return_code
except json_rpc.JSONRPCException as json_rpc_error:
LOG.error(
f"Exception occurred while processing request: {json_rpc_error}"
)
await lsp.write_json_rpc_ignore_connection_error(
self.output_channel,
json_rpc.ErrorResponse(
id=request.id,
activity_key=request.activity_key,
code=json_rpc_error.error_code(),
message=str(json_rpc_error),
),
)
async def run(self) -> int:
"""
Launch the background tasks that deal with starting and subscribing
to a pyre server and managing a queue of requests, then run the
language server itself.
"""
try:
await self.daemon_manager.ensure_task_running()
return await self.serve_requests()
except lsp.ReadChannelClosedError:
# This error can happen when the connection gets closed unilaterally
# from the language client, which causes issue when we try to access the
# input channel. This usually signals that the language client has exited,
# which implies that the language server should do that as well.
LOG.info("Connection closed by LSP client.")
return commands.ExitCode.SUCCESS
finally:
await self.daemon_manager.ensure_task_stop()
| PyreLanguageServerDispatcher |
python | pytorch__pytorch | test/dynamo/cpython/3_13/test_with.py | {
"start": 3222,
"end": 6446
} | class ____(__TestCase):
def testNameError(self):
def fooNotDeclared():
with foo: pass
self.assertRaises(NameError, fooNotDeclared)
def testEnterAttributeError1(self):
with torch._dynamo.error_on_graph_break(False):
class LacksEnter(object):
def __exit__(self, type, value, traceback):
pass
def fooLacksEnter():
foo = LacksEnter()
with foo: pass
self.assertRaisesRegex(TypeError, 'the context manager', fooLacksEnter)
def testEnterAttributeError2(self):
with torch._dynamo.error_on_graph_break(False):
class LacksEnterAndExit(object):
pass
def fooLacksEnterAndExit():
foo = LacksEnterAndExit()
with foo: pass
self.assertRaisesRegex(TypeError, 'the context manager', fooLacksEnterAndExit)
def testExitAttributeError(self):
with torch._dynamo.error_on_graph_break(False):
class LacksExit(object):
def __enter__(self):
pass
def fooLacksExit():
foo = LacksExit()
with foo: pass
self.assertRaisesRegex(TypeError, 'the context manager.*__exit__', fooLacksExit)
def assertRaisesSyntaxError(self, codestr):
def shouldRaiseSyntaxError(s):
compile(s, '', 'single')
self.assertRaises(SyntaxError, shouldRaiseSyntaxError, codestr)
def testAssignmentToNoneError(self):
self.assertRaisesSyntaxError('with mock as None:\n pass')
self.assertRaisesSyntaxError(
'with mock as (None):\n'
' pass')
def testAssignmentToTupleOnlyContainingNoneError(self):
self.assertRaisesSyntaxError('with mock as None,:\n pass')
self.assertRaisesSyntaxError(
'with mock as (None,):\n'
' pass')
def testAssignmentToTupleContainingNoneError(self):
self.assertRaisesSyntaxError(
'with mock as (foo, None, bar):\n'
' pass')
def testEnterThrows(self):
with torch._dynamo.error_on_graph_break(False):
class EnterThrows(object):
def __enter__(self):
raise RuntimeError("Enter threw")
def __exit__(self, *args):
pass
def shouldThrow():
ct = EnterThrows()
self.foo = None
# Ruff complains that we're redefining `self.foo` here,
# but the whole point of the test is to check that `self.foo`
# is *not* redefined (because `__enter__` raises)
with ct as self.foo: # noqa: F811
pass
self.assertRaises(RuntimeError, shouldThrow)
self.assertEqual(self.foo, None)
def testExitThrows(self):
with torch._dynamo.error_on_graph_break(False):
class ExitThrows(object):
def __enter__(self):
return
def __exit__(self, *args):
raise RuntimeError(42)
def shouldThrow():
with ExitThrows():
pass
self.assertRaises(RuntimeError, shouldThrow)
| FailureTestCase |
python | Textualize__textual | src/textual/scrollbar.py | {
"start": 645,
"end": 756
} | class ____(Message, bubble=False):
"""Base class for all scrollbar messages."""
@rich.repr.auto
| ScrollMessage |
python | huggingface__transformers | src/transformers/models/lfm2_moe/modular_lfm2_moe.py | {
"start": 9753,
"end": 9883
} | class ____(LlamaForCausalLM):
pass
__all__ = ["Lfm2MoeForCausalLM", "Lfm2MoeModel", "Lfm2MoePreTrainedModel"]
| Lfm2MoeForCausalLM |
python | tox-dev__tox | src/tox/tox_env/package.py | {
"start": 1367,
"end": 3842
} | class ____(ToxEnv, ABC):
def __init__(self, create_args: ToxEnvCreateArgs) -> None:
self._thread_lock = RLock()
self._file_lock: FileLock | None = None
super().__init__(create_args)
self._envs: set[str] = set()
def __getattribute__(self, name: str) -> Any:
# the packaging class might be used by multiple environments in parallel, hold a lock for operations on it
obj = object.__getattribute__(self, name)
if isinstance(obj, MethodType):
obj = _lock_method(self._thread_lock, self._file_lock, obj)
return obj
def register_config(self) -> None:
super().register_config()
file_lock_path: Path = self.conf["env_dir"] / "file.lock"
self._file_lock = FileLock(file_lock_path)
file_lock_path.parent.mkdir(parents=True, exist_ok=True)
self.core.add_config(
keys=["package_root", "setupdir"],
of_type=Path,
default=cast("Path", self.core["tox_root"]),
desc="indicates where the packaging root file exists (historically setup.py file or pyproject.toml now)",
)
self.conf.add_config(
keys=["package_root", "setupdir"],
of_type=Path,
default=cast("Path", self.core["package_root"]),
desc="indicates where the packaging root file exists (historically setup.py file or pyproject.toml now)",
)
def _recreate_default(self, conf: Config, value: str | None) -> bool:
return self.options.no_recreate_pkg is False and super()._recreate_default(conf, value)
@abstractmethod
def perform_packaging(self, for_env: EnvConfigSet) -> list[Package]:
raise NotImplementedError
def register_run_env(self, run_env: RunToxEnv) -> Generator[tuple[str, str], PackageToxEnv, None]: # noqa: ARG002, PLR6301
yield from () # empty generator by default
def mark_active_run_env(self, run_env: RunToxEnv) -> None:
self._envs.add(run_env.conf.name)
def teardown_env(self, conf: EnvConfigSet) -> None:
if conf.name in self._envs:
# conf.name (".tox") may be missing in self._envs in the case of an automatically provisioned environment
self._envs.remove(conf.name)
if len(self._envs) == 0:
self._teardown()
@abstractmethod
def child_pkg_envs(self, run_conf: EnvConfigSet) -> Iterator[PackageToxEnv]:
raise NotImplementedError
| PackageToxEnv |
python | django__django | tests/decorators/test_vary.py | {
"start": 210,
"end": 1360
} | class ____(SimpleTestCase):
def test_wrapped_sync_function_is_not_coroutine_function(self):
def sync_view(request):
return HttpResponse()
wrapped_view = vary_on_headers()(sync_view)
self.assertIs(iscoroutinefunction(wrapped_view), False)
def test_wrapped_async_function_is_coroutine_function(self):
async def async_view(request):
return HttpResponse()
wrapped_view = vary_on_headers()(async_view)
self.assertIs(iscoroutinefunction(wrapped_view), True)
def test_vary_on_headers_decorator(self):
@vary_on_headers("Header", "Another-header")
def sync_view(request):
return HttpResponse()
response = sync_view(HttpRequest())
self.assertEqual(response.get("Vary"), "Header, Another-header")
async def test_vary_on_headers_decorator_async_view(self):
@vary_on_headers("Header", "Another-header")
async def async_view(request):
return HttpResponse()
response = await async_view(HttpRequest())
self.assertEqual(response.get("Vary"), "Header, Another-header")
| VaryOnHeadersTests |
python | scrapy__scrapy | tests/test_spidermiddleware_output_chain.py | {
"start": 797,
"end": 1192
} | class ____(_BaseSpiderMiddleware):
def process_spider_exception(self, response, exception):
self.crawler.spider.logger.info(
"Middleware: %s exception caught", exception.__class__.__name__
)
return [
{"from": "process_spider_exception"},
Request(response.url, meta={"dont_fail": True}, dont_filter=True),
]
| RecoveryMiddleware |
python | realpython__materials | python-class/person.py | {
"start": 327,
"end": 537
} | class ____:
def __init__(self, name):
self.name = name
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value.upper()
| Person |
python | altair-viz__altair | altair/vegalite/v6/theme.py | {
"start": 2605,
"end": 3823
} | class ____:
"""Implementation of a builtin vega theme."""
def __init__(self, theme: str) -> None:
self.theme = theme
def __call__(self) -> ThemeConfig:
return {
"usermeta": {"embedOptions": {"theme": self.theme}},
"config": {"view": {"continuousWidth": 300, "continuousHeight": 300}},
}
def __repr__(self) -> str:
return f"VegaTheme({self.theme!r})"
# The entry point group that can be used by other packages to declare other
# themes that will be auto-detected. Explicit registration is also
# allowed by the PluginRegistry API.
ENTRY_POINT_GROUP: Final = "altair.vegalite.v6.theme"
# NOTE: `themes` def has an entry point group
themes = ThemeRegistry(entry_point_group=ENTRY_POINT_GROUP)
themes.register(
"default",
lambda: {"config": {"view": {"continuousWidth": 300, "continuousHeight": 300}}},
)
themes.register(
"opaque",
lambda: {
"config": {
"background": "white",
"view": {"continuousWidth": 300, "continuousHeight": 300},
}
},
)
themes.register("none", ThemeConfig)
for theme in VEGA_THEMES:
themes.register(theme, VegaTheme(theme))
themes.enable("default")
| VegaTheme |
python | apache__airflow | providers/openai/src/airflow/providers/openai/operators/openai.py | {
"start": 3468,
"end": 7230
} | class ____(BaseOperator):
"""
Operator that triggers an OpenAI Batch API endpoint and waits for the batch to complete.
:param file_id: Required. The ID of the batch file to trigger.
:param endpoint: Required. The OpenAI Batch API endpoint to trigger.
:param conn_id: Optional. The OpenAI connection ID to use. Defaults to 'openai_default'.
:param deferrable: Optional. Run operator in the deferrable mode.
:param wait_seconds: Optional. Number of seconds between checks. Only used when ``deferrable`` is False.
Defaults to 3 seconds.
:param timeout: Optional. The amount of time, in seconds, to wait for the request to complete.
Only used when ``deferrable`` is False. Defaults to 24 hour, which is the SLA for OpenAI Batch API.
:param wait_for_completion: Optional. Whether to wait for the batch to complete. If set to False, the operator
will return immediately after triggering the batch. Defaults to True.
.. seealso::
For more information on how to use this operator, please take a look at the guide:
:ref:`howto/operator:OpenAITriggerBatchOperator`
"""
template_fields: Sequence[str] = ("file_id",)
def __init__(
self,
file_id: str,
endpoint: Literal["/v1/chat/completions", "/v1/embeddings", "/v1/completions"],
conn_id: str = OpenAIHook.default_conn_name,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
wait_seconds: float = 3,
timeout: float = 24 * 60 * 60,
wait_for_completion: bool = True,
**kwargs: Any,
):
super().__init__(**kwargs)
self.conn_id = conn_id
self.file_id = file_id
self.endpoint = endpoint
self.deferrable = deferrable
self.wait_seconds = wait_seconds
self.timeout = timeout
self.wait_for_completion = wait_for_completion
self.batch_id: str | None = None
@cached_property
def hook(self) -> OpenAIHook:
"""Return an instance of the OpenAIHook."""
return OpenAIHook(conn_id=self.conn_id)
def execute(self, context: Context) -> str | None:
batch = self.hook.create_batch(file_id=self.file_id, endpoint=self.endpoint)
self.batch_id = batch.id
if self.wait_for_completion:
if self.deferrable:
self.defer(
timeout=self.execution_timeout,
trigger=OpenAIBatchTrigger(
conn_id=self.conn_id,
batch_id=self.batch_id,
poll_interval=60,
end_time=time.time() + self.timeout,
),
method_name="execute_complete",
)
else:
self.log.info("Waiting for batch %s to complete", self.batch_id)
self.hook.wait_for_batch(self.batch_id, wait_seconds=self.wait_seconds, timeout=self.timeout)
return self.batch_id
def execute_complete(self, context: Context, event: Any = None) -> str:
"""
Invoke this callback when the trigger fires; return immediately.
Relies on trigger to throw an exception, otherwise it assumes execution was
successful.
"""
if event["status"] == "error":
raise OpenAIBatchJobException(event["message"])
self.log.info("%s completed successfully.", self.task_id)
return event["batch_id"]
def on_kill(self) -> None:
"""Cancel the batch if task is cancelled."""
if self.batch_id:
self.log.info("on_kill: cancel the OpenAI Batch %s", self.batch_id)
self.hook.cancel_batch(self.batch_id)
| OpenAITriggerBatchOperator |
python | ray-project__ray | rllib/algorithms/algorithm_config.py | {
"start": 319329,
"end": 320356
} | class ____(str, Enum):
"""Enumerates schemes of what parts of the TorchLearner can be compiled.
This can be either the entire update step of the learner or only the forward
methods (and therein the forward_train method) of the RLModule.
.. note::
- torch.compiled code can become slow on graph breaks or even raise
errors on unsupported operations. Empirically, compiling
`forward_train` should introduce little graph breaks, raise no
errors but result in a speedup comparable to compiling the
complete update.
- Using `complete_update` is experimental and may result in errors.
"""
# Compile the entire update step of the learner.
# This includes the forward pass of the RLModule, the loss computation, and the
# optimizer step.
COMPLETE_UPDATE = "complete_update"
# Only compile the forward methods (and therein the forward_train method) of the
# RLModule.
FORWARD_TRAIN = "forward_train"
| TorchCompileWhatToCompile |
python | tiangolo__fastapi | scripts/people.py | {
"start": 2311,
"end": 2385
} | class ____(BaseModel):
data: DiscussionsResponseData
| DiscussionsResponse |
python | pennersr__django-allauth | allauth/headless/mfa/views.py | {
"start": 3931,
"end": 4589
} | class ____(AuthenticatedAPIView):
input_class = GenerateRecoveryCodesInput
def get(self, request, *args, **kwargs):
authenticator = recovery_codes_flows.view_recovery_codes(request)
if not authenticator:
return response.RecoveryCodesNotFoundResponse(request)
return response.RecoveryCodesResponse(request, authenticator)
def post(self, request, *args, **kwargs):
authenticator = recovery_codes_flows.generate_recovery_codes(request)
return response.RecoveryCodesResponse(request, authenticator)
def get_input_kwargs(self):
return {"user": self.request.user}
| ManageRecoveryCodesView |
python | doocs__leetcode | solution/0200-0299/0294.Flip Game II/Solution.py | {
"start": 0,
"end": 548
} | class ____:
def canWin(self, currentState: str) -> bool:
@cache
def dfs(mask):
for i in range(n - 1):
if (mask & (1 << i)) == 0 or (mask & (1 << (i + 1)) == 0):
continue
if dfs(mask ^ (1 << i) ^ (1 << (i + 1))):
continue
return True
return False
mask, n = 0, len(currentState)
for i, c in enumerate(currentState):
if c == '+':
mask |= 1 << i
return dfs(mask)
| Solution |
python | kamyu104__LeetCode-Solutions | Python/the-employee-that-worked-on-the-longest-task.py | {
"start": 37,
"end": 322
} | class ____(object):
def hardestWorker(self, n, logs):
"""
:type n: int
:type logs: List[List[int]]
:rtype: int
"""
return logs[max(xrange(len(logs)), key=lambda x: (logs[x][1]-(logs[x-1][1] if x-1 >= 0 else 0), -logs[x][0]))][0]
| Solution |
python | PrefectHQ__prefect | src/prefect/server/schemas/filters.py | {
"start": 5284,
"end": 6184
} | class ____(PrefectOperatorFilterBaseModel):
"""Filter by `Flow.tags`."""
all_: Optional[list[str]] = Field(
default=None,
examples=[["tag-1", "tag-2"]],
description=(
"A list of tags. Flows will be returned only if their tags are a superset"
" of the list"
),
)
is_null_: Optional[bool] = Field(
default=None, description="If true, only include flows without tags"
)
def _get_filter_list(
self, db: "PrefectDBInterface"
) -> Iterable[sa.ColumnExpressionArgument[bool]]:
filters: list[sa.ColumnExpressionArgument[bool]] = []
if self.all_ is not None:
filters.append(db.Flow.tags.has_all(_as_array(self.all_)))
if self.is_null_ is not None:
filters.append(db.Flow.tags == [] if self.is_null_ else db.Flow.tags != [])
return filters
| FlowFilterTags |
python | tensorflow__tensorflow | tensorflow/dtensor/python/tests/spmd_test.py | {
"start": 5805,
"end": 93281
} | class ____(test_util.DTensorBaseTest):
def setUp(self):
super(DTensorSPMDTest, self).setUp()
self.skipForDeviceType(['TPU'],
'all tests require 8 TPU cores.',
unless_device_count_equals_to=8)
global_ids = test_util.create_device_ids_array((2, 4))
local_ids = np.ravel(global_ids).tolist()
mesh_dict = dict()
for device in ('CPU', 'GPU', 'TPU'):
mesh_dict[device] = Mesh(
[_MESH_DIM_X, _MESH_DIM_Y],
global_ids,
local_ids,
test_util.create_device_list((2, 4), device),
use_xla_spmd=test_util.get_use_xla_spmd(device),
)
self.mesh = self.configTestMesh(mesh_dict)
# Creates a bunch of common layouts used by tests later.
# - 0-d
self.scalar_replicated_layout = Layout.replicated(self.mesh, rank=0)
# - 1-d
self.replicated_layout_1d = Layout.replicated(self.mesh, rank=1)
self.first_dimension_sharded_layout_1d = Layout.batch_sharded(
self.mesh, _MESH_DIM_X, rank=1)
# - 2-d
self.replicated_layout_2d = Layout.replicated(self.mesh, rank=2)
self.first_dimension_sharded_layout = Layout.batch_sharded(
self.mesh, _MESH_DIM_X, rank=2)
self.last_dimension_sharded_layout = Layout.inner_sharded(
self.mesh, _MESH_DIM_X, rank=2)
self.layouts_2d = [
self.replicated_layout_2d, self.first_dimension_sharded_layout,
self.last_dimension_sharded_layout
]
# - 3-d
self.replicated_layout_3d = Layout.replicated(self.mesh, rank=3)
self.first_dimension_sharded_layout_3d = Layout.batch_sharded(
self.mesh, _MESH_DIM_X, rank=3)
self.middle_dimension_sharded_layout_3d = Layout(
[layout_lib.UNSHARDED, _MESH_DIM_X, layout_lib.UNSHARDED], self.mesh)
self.last_dimension_sharded_layout_3d = Layout.inner_sharded(
self.mesh, _MESH_DIM_X, rank=3)
self.layouts_3d = [
self.replicated_layout_3d, self.first_dimension_sharded_layout_3d,
self.middle_dimension_sharded_layout_3d,
self.last_dimension_sharded_layout_3d
]
self.shardings = {
'batch': Layout.batch_sharded,
'inner': Layout.inner_sharded
}
@parameterized.named_parameters(
('unsharded_unsharded', [layout_lib.UNSHARDED, layout_lib.UNSHARDED]),
('x_unsharded', [_MESH_DIM_X, layout_lib.UNSHARDED]),
('unsharded_x', [layout_lib.UNSHARDED, _MESH_DIM_X]),
('x,y', [_MESH_DIM_X, _MESH_DIM_Y]),
)
@mock.patch.dict(
os.environ, {'DTENSOR_ENABLE_REPLICATED_SPMD_AS_DEFAULT_TF.MOD': '1'}
)
def testDefaultReplicatedSpmd(self, shard_specs):
if test_util.is_gpu_present():
dtype = dtypes.int32
else:
dtype = dtypes.float32
x = stateless_random_ops.stateless_random_uniform(
shape=[4, 8], seed=[0, 1], maxval=7, dtype=dtype
)
y = constant_op.constant(7, dtype=dtype)
expected_result = math_ops.Mod(x=x, y=y)
expected_layout = Layout.replicated(self.mesh, rank=2)
dtensor_result = math_ops.Mod(
x=api.relayout(x, layout=Layout(shard_specs, self.mesh)),
y=api.relayout(y, layout=Layout([], self.mesh)),
)
self.assertDTensorEqual(expected_result, expected_layout, dtensor_result)
@parameterized.product(
shard_type=['replicated', 'batch_sharded'], full_matrices=[True, False])
def testQR(self, shard_type, full_matrices):
np.random.seed(123)
inputs = constant_op.constant(
np.random.normal(0.0, 1.0, 8 * 9 * 10).reshape([8, 9, 10]),
dtype=dtypes.float32)
expected_result = gen_linalg_ops.qr(
input=inputs, full_matrices=True, name=None)
if shard_type == 'replicated':
layout = self.first_dimension_sharded_layout_3d
else:
layout = self.replicated_layout_3d
inputs = api.relayout(inputs, layout)
got = gen_linalg_ops.qr(
input=inputs, full_matrices=full_matrices, name=None)
self.assertDTensorEqual(expected_result[0], layout, got[0])
self.assertDTensorEqual(expected_result[1], layout, got[1])
def testReduceScatter(self,):
# Generates an AllReduce due to sharding of inner dimensions of Matmul
# and a Scatter due to the Relayout. The AllReduce+Scatter can be combined
# to a single ReduceScatter.
a, b, c = 128, 128, 128
seed = [0, 1]
first_dim_sharded = self.first_dimension_sharded_layout
second_dim_sharded = self.last_dimension_sharded_layout
with api.default_mesh(self.mesh):
m1 = numpy_util.stateless_random_uniform(
layout=second_dim_sharded, shape=[a, b], seed=seed
)
m2 = numpy_util.stateless_random_uniform(
layout=first_dim_sharded, shape=[b, c], seed=seed
)
@polymorphic_function.function
def func():
m3 = math_ops.matmul(m1, m2)
return m3
@polymorphic_function.function
def scattered_func():
m3 = math_ops.matmul(m1, m2)
return api.relayout(m3, self.first_dimension_sharded_layout)
dtensor_result = func()
dtensor_scattered_result = scattered_func()
self.assertDTensorEqual(dtensor_result, self.first_dimension_sharded_layout,
dtensor_scattered_result)
def testReduceScatterLastDimSharded(
self,
):
# ReduceScatter on non-0th dimension which requires a transpose.
a, b, c = 128, 128, 128
seed = [0, 1]
first_dim_sharded = self.first_dimension_sharded_layout
second_dim_sharded = self.last_dimension_sharded_layout
@polymorphic_function.function
def uniform(shape, seed, layout):
return api.relayout(
stateless_random_ops.stateless_random_uniform(shape=shape, seed=seed),
layout=layout,
)
with api.default_mesh(self.mesh):
m1 = uniform(layout=second_dim_sharded, shape=[a, b], seed=seed)
m2 = uniform(layout=first_dim_sharded, shape=[b, c], seed=seed)
@polymorphic_function.function
def func():
m3 = math_ops.matmul(m1, m2)
return m3
@polymorphic_function.function
def scattered_func():
m3 = math_ops.matmul(m1, m2)
return api.relayout(m3, self.last_dimension_sharded_layout)
dtensor_result = func()
dtensor_scattered_result = scattered_func()
self.assertDTensorEqual(
dtensor_result,
self.last_dimension_sharded_layout,
dtensor_scattered_result,
)
@parameterized.named_parameters(
(
'xu_ux',
[_MESH_DIM_X, layout_lib.UNSHARDED],
[layout_lib.UNSHARDED, _MESH_DIM_X],
),
(
'ux_xu',
[layout_lib.UNSHARDED, _MESH_DIM_X],
[_MESH_DIM_X, layout_lib.UNSHARDED],
),
(
'yu_uy',
[_MESH_DIM_Y, layout_lib.UNSHARDED],
[layout_lib.UNSHARDED, _MESH_DIM_Y],
),
(
'uy_yu',
[layout_lib.UNSHARDED, _MESH_DIM_Y],
[_MESH_DIM_Y, layout_lib.UNSHARDED],
),
)
def testAllToAll2D(self, src_spec, tgt_spec):
a = constant_op.constant(
np.arange(
8 * 8,
).reshape((8, 8)),
dtype=dtypes.float32,
)
sharded_a = numpy_util.pack_numpy(a, layout=Layout(src_spec, self.mesh))
@polymorphic_function.function
def func(a):
return api.relayout(a, Layout(tgt_spec, self.mesh))
dtensor_result = func(sharded_a)
self.assertDTensorEqual(a, Layout(tgt_spec, self.mesh), dtensor_result)
@parameterized.named_parameters(
(
'yuu_uuy',
[_MESH_DIM_Y, layout_lib.UNSHARDED, layout_lib.UNSHARDED],
[layout_lib.UNSHARDED, layout_lib.UNSHARDED, _MESH_DIM_Y],
),
(
'xuu_uux',
[_MESH_DIM_X, layout_lib.UNSHARDED, layout_lib.UNSHARDED],
[layout_lib.UNSHARDED, layout_lib.UNSHARDED, _MESH_DIM_X],
),
(
'uux_xuu',
[layout_lib.UNSHARDED, layout_lib.UNSHARDED, _MESH_DIM_X],
[_MESH_DIM_X, layout_lib.UNSHARDED, layout_lib.UNSHARDED],
),
(
'xuu_uxu',
[_MESH_DIM_X, layout_lib.UNSHARDED, layout_lib.UNSHARDED],
[layout_lib.UNSHARDED, _MESH_DIM_X, layout_lib.UNSHARDED],
),
(
'uxu_xuu',
[layout_lib.UNSHARDED, _MESH_DIM_X, layout_lib.UNSHARDED],
[_MESH_DIM_X, layout_lib.UNSHARDED, layout_lib.UNSHARDED],
),
(
'xuy_uxy',
[_MESH_DIM_X, layout_lib.UNSHARDED, _MESH_DIM_Y],
[layout_lib.UNSHARDED, _MESH_DIM_X, _MESH_DIM_Y],
),
(
'uxy_xuy',
[layout_lib.UNSHARDED, _MESH_DIM_X, _MESH_DIM_Y],
[_MESH_DIM_X, layout_lib.UNSHARDED, _MESH_DIM_Y],
),
(
'xyu_uyx',
[_MESH_DIM_X, _MESH_DIM_Y, layout_lib.UNSHARDED],
[layout_lib.UNSHARDED, _MESH_DIM_Y, _MESH_DIM_X],
),
# Requires additional transpose
(
'uxu_uux',
[layout_lib.UNSHARDED, _MESH_DIM_X, layout_lib.UNSHARDED],
[layout_lib.UNSHARDED, layout_lib.UNSHARDED, _MESH_DIM_X],
),
(
'uux_uxu',
[layout_lib.UNSHARDED, layout_lib.UNSHARDED, _MESH_DIM_X],
[layout_lib.UNSHARDED, _MESH_DIM_X, layout_lib.UNSHARDED],
),
(
'xyu_xuy',
[_MESH_DIM_X, _MESH_DIM_Y, layout_lib.UNSHARDED],
[_MESH_DIM_X, layout_lib.UNSHARDED, _MESH_DIM_Y],
),
(
'xuy_xyu',
[_MESH_DIM_X, layout_lib.UNSHARDED, _MESH_DIM_Y],
[_MESH_DIM_X, _MESH_DIM_Y, layout_lib.UNSHARDED],
),
(
'yxu_yux',
[_MESH_DIM_Y, _MESH_DIM_X, layout_lib.UNSHARDED],
[_MESH_DIM_Y, layout_lib.UNSHARDED, _MESH_DIM_X],
),
(
'yux_yxu',
[_MESH_DIM_Y, layout_lib.UNSHARDED, _MESH_DIM_X],
[_MESH_DIM_Y, _MESH_DIM_X, layout_lib.UNSHARDED],
),
)
def testAllToAll3D(self, src_spec, tgt_spec):
a = constant_op.constant(
np.arange(8 * 8 * 8).reshape((8, 8, 8)), dtype=dtypes.float32
)
sharded_a = numpy_util.pack_numpy(a, layout=Layout(src_spec, self.mesh))
@polymorphic_function.function
def func(a):
return api.relayout(a, Layout(tgt_spec, self.mesh))
dtensor_result = func(sharded_a)
self.assertDTensorEqual(a, Layout(tgt_spec, self.mesh), dtensor_result)
def testExpandDimsDifferentInputAndOutputLayouts(self,):
src_numpy = np.random.uniform(size=[10, 10])
src = constant_op.constant(src_numpy, dtype=dtypes.float32)
expected = array_ops.expand_dims_v2(src, axis=-1)
src = api.relayout(src, self.replicated_layout_2d)
@polymorphic_function.function
def expand_dims_fn(src):
expanded = array_ops.expand_dims_v2(src, axis=-1)
return api.relayout(expanded, self.first_dimension_sharded_layout_3d)
dtensor_result = expand_dims_fn(src)
self.assertDTensorEqual(expected, self.first_dimension_sharded_layout_3d,
dtensor_result)
@polymorphic_function.function
def expand_dims_list_axis_fn(src):
expanded = array_ops.expand_dims_v2(src, axis=[-1])
return api.relayout(expanded, self.first_dimension_sharded_layout_3d)
dtensor_result_2 = expand_dims_list_axis_fn(src)
self.assertDTensorEqual(expected, self.first_dimension_sharded_layout_3d,
dtensor_result_2)
def testPackAndUnpackAssertion(self):
layout = Layout.replicated(self.mesh, rank=3)
# Due to Perf concerns, `pack` does not check the compatibility of
# components and layout. Here, we inject a wrong value components.
with api.default_mesh(self.mesh):
b = api.pack(
[constant_op.constant([[[(x + 1) * 1.0]]]) for x in range(8)],
layout=layout)
assert b.shape == [1, 1, 1]
# `to_numpy` assumes all unpacked tensors are compatible with the
# layout. So, it picks any component to use if that dimension is replicated.
# In this case, it picks the final one.
result_dtensor = numpy_util.to_numpy(b)
self.assertAllEqual(constant_op.constant([[[8.]]]), result_dtensor)
# assertDTensorEqual does more aggressive check, which respects the layout.
with self.assertRaisesRegex(AssertionError, 'Mismatched value'):
self.assertDTensorEqual(constant_op.constant([[[8.]]]), layout, b)
@parameterized.named_parameters(test_util_ops.UNARY_OPS)
def testUnaryOpsWithTwoShardedAndOneReplicatedDimension(self, op):
a = constant_op.constant([[[1.], [2.], [3.], [4.]], [[5.], [6.], [7.],
[8.]]])
assert a.shape == [2, 4, 1]
expected_result = op(a)
layout = Layout([_MESH_DIM_X, _MESH_DIM_Y, layout_lib.UNSHARDED], self.mesh)
a = api.relayout(a, layout)
dtensor_result = op(a)
tol = select_tol(op, self.mesh, test_util.DEFAULT_TOL, 1e-4)
self.assertDTensorEqual(expected_result, layout, dtensor_result, tol=tol)
@parameterized.named_parameters(test_util_ops.UNARY_OPS)
def testUnaryOpsWithFullyReplicatedInputs(self, op):
a = constant_op.constant([[1., 2.], [3., 4.]])
assert a.shape == [2, 2]
expected_result = op(a)
a = api.copy_to_mesh(a, self.replicated_layout_2d)
dtensor_result = op(a)
tol = select_tol(op, self.mesh, test_util.DEFAULT_TOL, 1e-4)
self.assertDTensorEqual(
expected_result, self.replicated_layout_2d, dtensor_result, tol=tol)
@parameterized.named_parameters(test_util_ops.UNARY_OPS)
def testUnaryOpsWithFullyShardedInputs(self, op):
a = constant_op.constant(
np.arange(16).reshape((2, 4, 2)), dtype=dtypes.float32)
expected_result = op(a)
sharded_layout = Layout([_MESH_DIM_X, _MESH_DIM_Y, layout_lib.UNSHARDED],
self.mesh)
a = api.relayout(a, sharded_layout)
dtensor_result = op(a)
tol = select_tol(op, self.mesh, test_util.DEFAULT_TOL, 1e-4)
self.assertDTensorEqual(
expected_result, sharded_layout, dtensor_result, tol=tol)
@parameterized.named_parameters(test_util_ops.UNARY_OPS)
def testUnaryOpsWithBatchShardedInputs(self, op):
tol = select_tol(op, self.mesh, test_util.DEFAULT_TOL, 1e-3)
a = constant_op.constant(np.arange(6).reshape((2, 3)), dtype=dtypes.float32)
expected_result = op(a)
a = api.relayout(a, self.first_dimension_sharded_layout)
dtensor_result = op(a)
self.assertDTensorEqual(
expected_result,
self.first_dimension_sharded_layout,
dtensor_result,
tol=tol)
def testInvertOpsWithFullyShardedInputs(self):
# Invert only support int inputs.
op = lambda x: gen_bitwise_ops.invert(x=x, name='Invert')
a = constant_op.constant(
np.arange(16).reshape((2, 4, 2)), dtype=dtypes.int32)
expected_result = op(a)
sharded_layout = Layout([_MESH_DIM_X, _MESH_DIM_Y, layout_lib.UNSHARDED],
self.mesh)
a = api.relayout(a, sharded_layout)
dtensor_result = op(a)
tol = select_tol(op, self.mesh, test_util.DEFAULT_TOL, 1e-4)
self.assertDTensorEqual(
expected_result, sharded_layout, dtensor_result, tol=tol)
@parameterized.named_parameters(('replicated', layout_lib.UNSHARDED),
('sharded', _MESH_DIM_X))
def testInvertPermutationOp(self, shard):
self.skipForDeviceType(['GPU', 'TPU'],
'Invert Permutation runs in CPU only.')
op_input = constant_op.constant([3, 4, 0, 2, 1, 5])
expected_result = gen_array_ops.invert_permutation(op_input)
# We should always expected the output to be replicated as the
# expander should relayout both inputs and outputs to replicated.
expected_layout = Layout.replicated(self.mesh, rank=1)
self.assertDTensorEqual(
expected_result,
expected_layout,
gen_array_ops.invert_permutation(
api.relayout(op_input, Layout([shard], self.mesh))
),
)
def testErfcInvOpsWithFullyShardedInputs(self):
# By official doc, math_ops.erfcinv is defined on (0, 2]. In addition,
# math_ops.erfcinv internally calls ndtri internally. So to test the op for
# spmd expanding, we call raw op here.
op = lambda x: gen_math_ops.erfinv(x=x, name='erfinv')
a = constant_op.constant(
np.arange(16).reshape((2, 4, 2)) / 30 + 0.1, dtype=dtypes.float32)
expected_result = op(a)
sharded_layout = Layout([_MESH_DIM_X, _MESH_DIM_Y, layout_lib.UNSHARDED],
self.mesh)
a = api.relayout(a, sharded_layout)
dtensor_result = op(a)
tol = select_tol(op, self.mesh, test_util.DEFAULT_TOL, 1e-4)
self.assertDTensorEqual(
expected_result, sharded_layout, dtensor_result, tol=tol)
def testPopulationCountWithFullyShardedInputs(self):
# By official doc, gen_bitwise_ops.population_count only supports int
# inputs.
op = lambda x: gen_bitwise_ops.population_count(x=x, name='pc')
a = constant_op.constant(
np.arange(16).reshape((2, 4, 2)), dtype=dtypes.int32)
expected_result = op(a)
sharded_layout = Layout([_MESH_DIM_X, _MESH_DIM_Y, layout_lib.UNSHARDED],
self.mesh)
a = api.relayout(a, sharded_layout)
dtensor_result = op(a)
tol = select_tol(op, self.mesh, test_util.DEFAULT_TOL, 1e-4)
self.assertDTensorEqual(
expected_result, sharded_layout, dtensor_result, tol=tol)
def testIgammacOpsWithFullyShardedInputs(self):
# Igammac has super low precision on TPU. So we test it as a separated unit
# tests to avoid lower the tol of other tests.
#
# In addition, according to wiki link below, for s=4, all values are not
# inf/nan.
#
# https://en.wikipedia.org/wiki/Incomplete_gamma_function
tol = 1e-2
op = lambda x: gen_math_ops.igammac(4, x)
a = constant_op.constant(
np.arange(16).reshape((2, 4, 2)), dtype=dtypes.float32)
expected_result = op(a)
sharded_layout = Layout([_MESH_DIM_X, _MESH_DIM_Y, layout_lib.UNSHARDED],
self.mesh)
a = api.relayout(a, sharded_layout)
dtensor_result = op(a)
self.assertDTensorEqual(
expected_result, sharded_layout, dtensor_result, tol=tol)
@parameterized.parameters(('replicated',), ('sharded',))
def testBiasAdd2D(self, shard_type):
value = np.array([[1., 2.], [3., 4.]])
bias = np.array([0.1, 0.2])
expected_result = nn_ops.bias_add(value, bias)
if shard_type == 'replicated':
layout = self.replicated_layout_2d
else:
layout = self.first_dimension_sharded_layout
value = api.relayout(value, layout)
bias = api.relayout(bias, self.replicated_layout_1d)
dtensor_result = nn_ops.bias_add(value, bias)
self.assertDTensorEqual(expected_result, layout, dtensor_result)
@parameterized.product(
shard_type=['replicated', 'batch_sharded'],
data_format=['N...C', 'NC...'])
def testBiasAdd4D(self, shard_type, data_format):
value = np.ones(shape=(6, 2, 4, 2), dtype=np.float32)
bias = np.array([0.1, 0.2], dtype=np.float32)
expected_result = nn_ops.bias_add(value, bias, data_format=data_format)
if shard_type == 'replicated':
layout = Layout.replicated(self.mesh, rank=4)
else:
layout = Layout.batch_sharded(self.mesh, _MESH_DIM_X, rank=4)
value = api.relayout(value, layout)
bias = api.relayout(bias, self.replicated_layout_1d)
dtensor_result = nn_ops.bias_add(value, bias, data_format=data_format)
self.assertDTensorEqual(expected_result, layout, dtensor_result)
@parameterized.product(
data_format=['N...C', 'NC...'],
bias_sharding=['x', 'y', layout_lib.UNSHARDED],
c_dim_sharding=['x', layout_lib.UNSHARDED])
def testBiasAddDataFormatTest(self, data_format, bias_sharding,
c_dim_sharding):
if data_format == 'N...C':
c_dim = 3
input_sharding = [
layout_lib.UNSHARDED, layout_lib.UNSHARDED, 'y', c_dim_sharding
]
a = np.ones(shape=(1, 1, 4, 4), dtype=np.float32)
layout = Layout(input_sharding, self.mesh)
else:
c_dim = 1
input_sharding = [
layout_lib.UNSHARDED, c_dim_sharding, 'y', layout_lib.UNSHARDED
]
a = np.ones(shape=(1, 4, 4, 1), dtype=np.float32)
layout = Layout(input_sharding, self.mesh)
bias = np.array([0.1, 0.2, 0.3, 0.4], dtype=np.float32)
expected_result = nn_ops.bias_add(a, bias, data_format=data_format)
expected_result_sharding = input_sharding
if c_dim_sharding == layout_lib.UNSHARDED and bias_sharding != 'y':
expected_result_sharding[c_dim] = bias_sharding
expected_layout = Layout(expected_result_sharding, self.mesh)
a = api.relayout(a, layout)
bias = api.relayout(bias, Layout([bias_sharding], self.mesh))
result = nn_ops.bias_add(a, bias=bias, data_format=data_format)
self.assertDTensorEqual(expected_result, expected_layout, result)
@parameterized.parameters(('replicated',), ('batch_sharded',))
def testBiasAddGrad2D(self, shard_type):
value = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)
expected_result = gen_nn_ops.bias_add_grad(out_backprop=value)
if shard_type == 'replicated':
layout = self.replicated_layout_2d
else:
layout = self.first_dimension_sharded_layout
expected_layout = self.replicated_layout_1d
value = api.relayout(value, layout)
dtensor_result = gen_nn_ops.bias_add_grad(out_backprop=value)
self.assertDTensorEqual(expected_result, expected_layout, dtensor_result)
@parameterized.product(
shard_type=['replicated', 'batch_sharded'], data_format=['NHWC', 'NCHW'])
def testBiasAddGrad4D(self, shard_type, data_format):
value = np.ones(shape=(2, 3, 4, 5), dtype=np.float32)
expected_result = gen_nn_ops.bias_add_grad(
out_backprop=value, data_format=data_format)
if shard_type == 'replicated':
layout = Layout.replicated(self.mesh, rank=4)
else:
layout = Layout.batch_sharded(self.mesh, _MESH_DIM_X, rank=4)
expected_layout = self.replicated_layout_1d
value = api.relayout(value, layout)
dtensor_result = gen_nn_ops.bias_add_grad(
out_backprop=value, data_format=data_format)
self.assertDTensorEqual(expected_result, expected_layout, dtensor_result)
@parameterized.named_parameters(test_util_ops.BINARY_FLOAT_OPS)
def testBinaryOpsWithFullyReplicatedInputs(self, op):
tol = select_tol(op, self.mesh, test_util.DEFAULT_TOL, low_res_tol=1e-2)
a = constant_op.constant([[1., 2.], [3., 4.]])
b = constant_op.constant([[10., 20.], [30., 40.]])
expected_result = op(a, b)
a = api.copy_to_mesh(a, self.replicated_layout_2d)
b = api.copy_to_mesh(b, self.replicated_layout_2d)
dtensor_result = op(a, b)
self.assertDTensorEqual(
expected_result, self.replicated_layout_2d, dtensor_result, tol=tol)
@parameterized.named_parameters(test_util_ops.BINARY_FLOAT_OPS)
def testBinaryFloatOpsWithFullyShardedInputs(self, op):
tol = select_tol(op, self.mesh, test_util.DEFAULT_TOL, low_res_tol=1e-2)
a = constant_op.constant(np.arange(8).reshape((2, 4)), dtype=dtypes.float32)
b = constant_op.constant(
np.arange(8).reshape((2, 4)) + 10.0, dtype=dtypes.float32)
expected_result = op(a, b)
sharded_layout_2d = Layout([_MESH_DIM_X, _MESH_DIM_Y], self.mesh)
a = api.relayout(a, sharded_layout_2d)
b = api.relayout(b, sharded_layout_2d)
dtensor_result = op(a, b)
self.assertDTensorEqual(
expected_result, sharded_layout_2d, dtensor_result, tol=tol)
@parameterized.named_parameters(test_util_ops.BINARY_BOOL_OPS)
def testBinaryBoolOpsWithFullyShardedInputs(self, op):
a = array_ops.reshape(
constant_op.constant(
[True, False, True, False, True, False, True, False]), [2, 4])
b = array_ops.reshape(
constant_op.constant(
[True, True, True, True, False, False, False, False]), [2, 4])
expected_result = op(a, b)
sharded_layout_2d = Layout([_MESH_DIM_X, _MESH_DIM_Y], self.mesh)
a = api.relayout(a, sharded_layout_2d)
b = api.relayout(b, sharded_layout_2d)
dtensor_result = op(a, b)
self.assertDTensorEqual(expected_result, sharded_layout_2d, dtensor_result)
@parameterized.named_parameters(test_util_ops.BINARY_INT_OPS)
def testBinaryIntOpsWithFullyShardedInputs(self, op):
dtype = dtypes.int64
if test_util.is_gpu_present() and op is gen_math_ops.truncate_mod:
dtype = dtypes.int32
a = constant_op.constant(np.arange(8).reshape((2, 4)), dtype=dtype)
b = constant_op.constant(np.arange(8).reshape((2, 4)) + 1, dtype=dtype)
expected_result = op(a, b)
sharded_layout_2d = Layout([_MESH_DIM_X, _MESH_DIM_Y], self.mesh)
a = api.relayout(a, sharded_layout_2d)
b = api.relayout(b, sharded_layout_2d)
dtensor_result = op(a, b)
self.assertDTensorEqual(expected_result, sharded_layout_2d, dtensor_result)
@parameterized.named_parameters(test_util_ops.BINARY_FLOAT_OPS)
def testBinaryFloatOpsWithBatchShardedInputs(self, op):
tol = select_tol(op, self.mesh, test_util.DEFAULT_TOL, low_res_tol=1e-2)
a = constant_op.constant(
np.array([[1., 2.], [3., 4.]]), dtype=dtypes.float32)
b = constant_op.constant(
np.array([[10., 20.], [30., 40.]]), dtype=dtypes.float32)
expected_result = op(a, b)
a = api.relayout(a, self.first_dimension_sharded_layout)
b = api.relayout(b, self.first_dimension_sharded_layout)
dtensor_result = op(a, b)
self.assertDTensorEqual(
expected_result,
self.first_dimension_sharded_layout,
dtensor_result,
tol=tol)
@parameterized.named_parameters(test_util_ops.BINARY_INT_OPS)
def testBinaryIntOpsWithBatchShardedInputs(self, op):
dtype = dtypes.int64
if test_util.is_gpu_present() and op is gen_math_ops.truncate_mod:
dtype = dtypes.int32
a = constant_op.constant(np.array([[1, 2], [3, 4]]), dtype=dtype)
b = constant_op.constant(np.array([[5, 6], [7, 4]]), dtype=dtype)
expected_result = op(a, b)
a = api.relayout(a, self.first_dimension_sharded_layout)
b = api.relayout(b, self.first_dimension_sharded_layout)
dtensor_result = op(a, b)
self.assertDTensorEqual(expected_result,
self.first_dimension_sharded_layout, dtensor_result)
@parameterized.named_parameters(
test_util_ops.BINARY_FLOAT_OPS_WITH_BROADCASTING_SUPPORT
)
def testBinaryFloatOpsWithFullyReplicatedBroadcastableInputs(self, op):
tol = select_tol(op, self.mesh, test_util.DEFAULT_TOL, low_res_tol=1e-2)
# Currently we only support scalar.
a = constant_op.constant(23.4)
b = constant_op.constant([[10., 20.], [30., 40.]])
expected_result = op(a, b)
a = api.copy_to_mesh(a, Layout.replicated(self.mesh, rank=a.ndim))
b = api.copy_to_mesh(b, Layout.replicated(self.mesh, rank=b.ndim))
dtensor_result = op(a, b)
self.assertDTensorEqual(
expected_result, self.replicated_layout_2d, dtensor_result, tol=tol)
@parameterized.named_parameters(
test_util_ops.BINARY_INT_OPS_WITH_BROADCASTING_SUPPORT
)
def testBinaryIntOpsWithFullyReplicatedBroadcastableInputs(self, op):
tol = select_tol(op, self.mesh, test_util.DEFAULT_TOL, low_res_tol=1e-2)
# Currently we only support scalar.
a = constant_op.constant(3)
b = constant_op.constant([[0, 1], [2, 3]])
a, b = order_broadcastable_operands(op, a, b)
expected_result = op(a, b)
a = api.copy_to_mesh(a, Layout.replicated(self.mesh, rank=a.ndim))
b = api.copy_to_mesh(b, Layout.replicated(self.mesh, rank=b.ndim))
dtensor_result = op(a, b)
self.assertDTensorEqual(
expected_result, self.replicated_layout_2d, dtensor_result, tol=tol)
@parameterized.named_parameters(
test_util_ops.BINARY_FLOAT_OPS_WITH_BROADCASTING_SUPPORT
)
def testBinaryOpsWithFullyShardedBroadcastableInputs(self, op):
tol = select_tol(op, self.mesh, test_util.DEFAULT_TOL, low_res_tol=1e-2)
# Currently we only support scalar.
a = constant_op.constant(23.4)
b = constant_op.constant(
10.0 * np.arange(8).reshape((2, 4)), dtype=dtypes.float32)
expected_result = op(a, b)
a = api.copy_to_mesh(a, self.scalar_replicated_layout)
sharded_layout_2d = Layout([_MESH_DIM_X, _MESH_DIM_Y], self.mesh)
b = api.relayout(b, sharded_layout_2d)
dtensor_result = op(a, b)
self.assertDTensorEqual(
expected_result, sharded_layout_2d, dtensor_result, tol=tol)
@parameterized.named_parameters(
test_util_ops.BINARY_FLOAT_OPS_WITH_BROADCASTING_SUPPORT
)
def testBinaryOpsWithBatchShardedBroadcastableInputs(self, op):
tol = select_tol(op, self.mesh, test_util.DEFAULT_TOL, low_res_tol=1e-2)
# Currently we only support scalar.
a = constant_op.constant(23.4)
b = constant_op.constant(
np.array([[10., 20.], [30., 40.]]), dtype=dtypes.float32)
expected_result = op(a, b)
a = api.copy_to_mesh(a, self.scalar_replicated_layout)
b = api.relayout(b, self.first_dimension_sharded_layout)
dtensor_result = op(a, b)
self.assertDTensorEqual(
expected_result,
self.first_dimension_sharded_layout,
dtensor_result,
tol=tol)
@parameterized.named_parameters(
test_util_ops.expand_test_config(
[
{
'testcase_name': 'Concat',
'op': (lambda v: array_ops.concat(values=v, axis=1)),
},
{
'testcase_name':
'ConcatV1',
'op':
(lambda v: gen_array_ops.concat(concat_dim=1, values=v)),
},
{
'testcase_name': 'ConcatV2',
'op': (lambda v: gen_array_ops.concat_v2(values=v, axis=1)),
},
],
[
{
'shard_type': 'replicated',
},
{
'shard_type': 'sharded',
},
{
'shard_type': 'mixed',
},
],
))
def testConcatOpSPMD(self, op, shard_type):
layout_a = self.replicated_layout_2d
layout_b = self.replicated_layout_2d
layout_output = self.replicated_layout_2d
if shard_type == 'sharded':
layout_a = self.first_dimension_sharded_layout
layout_b = self.first_dimension_sharded_layout
layout_output = self.first_dimension_sharded_layout
elif shard_type == 'mixed':
layout_b = self.first_dimension_sharded_layout
layout_output = self.first_dimension_sharded_layout
a = constant_op.constant([[1., 2.], [3., 4.]])
b = constant_op.constant([[1., 2.], [3., 4.]])
expected_result = op([a, b])
with api.default_mesh(self.mesh):
a = api.relayout(a, layout_a)
b = api.relayout(b, layout_b)
c = op([a, b])
self.assertDTensorEqual(expected_result, layout_output, c)
@parameterized.named_parameters([{
'testcase_name': 'ConcatV1',
'op': (lambda v: gen_array_ops.concat(concat_dim=1, values=v))
}, {
'testcase_name': 'ConcatV2',
'op': (lambda v: gen_array_ops.concat_v2(values=v, axis=1))
}])
def testConcatOpShardedOnConcatDim(self, op):
a = constant_op.constant(
np.arange(16).reshape((2, 2, 4)), dtype=dtypes.float32)
b = constant_op.constant(
np.arange(16).reshape((2, 2, 4)), dtype=dtypes.float32)
expected_result = op([a, b])
a_layout = Layout([layout_lib.UNSHARDED, _MESH_DIM_X, _MESH_DIM_Y],
self.mesh)
b_layout = Layout([_MESH_DIM_X, layout_lib.UNSHARDED, layout_lib.UNSHARDED],
self.mesh)
# If any input is sharded on the concat dim, then the concat dim is
# replicated in the output. Dim 0 in the output is replicated because of
# broadcast compatibility, mesh dimension X is already used in dim 1 of
# input a.
output_layout = Layout(
[layout_lib.UNSHARDED, layout_lib.UNSHARDED, _MESH_DIM_Y], self.mesh)
a = api.relayout(a, a_layout)
b = api.relayout(b, b_layout)
@polymorphic_function.function
def concat_fn(a, b):
return op([a, b])
dtensor_result = concat_fn(a, b)
self.assertDTensorEqual(expected_result, output_layout, dtensor_result)
def testPackWithDifferentInputLayouts(self):
a = constant_op.constant([[1., 2.], [3., 4.]])
b = constant_op.constant([[1., 2.], [3., 4.]])
expected_result = gen_array_ops.pack(values=[a, b], axis=-1)
a = api.relayout(a, self.replicated_layout_2d)
b = api.relayout(b, self.first_dimension_sharded_layout)
@polymorphic_function.function
def pack_fn(a, b):
c = gen_array_ops.pack(values=[a, b], axis=-1)
return api.relayout(c, self.first_dimension_sharded_layout_3d)
dtensor_result = pack_fn(a, b)
self.assertDTensorEqual(expected_result,
self.first_dimension_sharded_layout_3d,
dtensor_result)
@parameterized.named_parameters(test_util_ops.REDUCTION_OPS)
def testReductionOpsWithFullyReplicatedInputs(self, op):
for axis, expected_layout in [([0], self.replicated_layout_1d),
([1], self.replicated_layout_1d),
([0, 1], self.scalar_replicated_layout),
(None, self.scalar_replicated_layout)]:
# Disable the pylint as the cell var is used for this iteration only.
# pylint: disable=cell-var-from-loop
reduction_op = lambda x: op(x, axis=axis)
# pylint: enable=cell-var-from-loop
a = constant_op.constant([[1., 2.], [3., 4.]])
expected_result = reduction_op(a)
a = api.copy_to_mesh(a, self.replicated_layout_2d)
with api.default_mesh(self.mesh):
dtensor_result = reduction_op(a)
self.assertDTensorEqual(expected_result, expected_layout, dtensor_result)
@parameterized.named_parameters(test_util_ops.REDUCTION_OPS)
def testReductionOpsWithBatchParallelInputs(self, op):
sharded_layout_1d = Layout([_MESH_DIM_X], self.mesh)
for axis, expected_layout in [
(
[0],
self.replicated_layout_1d,
),
([1], sharded_layout_1d),
(
[0, 1],
self.scalar_replicated_layout,
),
(
None,
self.scalar_replicated_layout,
),
]:
# Disable the pylint as the cell var is used for this iteration only.
# pylint: disable=cell-var-from-loop
reduction_op = lambda x: op(x, axis=axis)
# pylint: enable=cell-var-from-loop
a = constant_op.constant(
np.array([[1., 2.], [3., 4.], [5.0, 6.0], [7.0, 8.0]]),
dtype=dtypes.float32)
expected_result = reduction_op(a)
a = api.relayout(a, self.first_dimension_sharded_layout)
with api.default_mesh(self.mesh):
dtensor_result = reduction_op(a)
self.assertDTensorEqual(expected_result, expected_layout,
dtensor_result)
def testReduceLogSumExpWithBatchParallelInputs(self):
a = constant_op.constant(
np.array([[1., 2.], [3., 4.], [5.0, 6.0], [7.0, 8.0]]),
dtype=dtypes.float32)
expected_result = math_ops.reduce_logsumexp(a, axis=-1)
a = api.relayout(a, self.first_dimension_sharded_layout)
with api.default_mesh(self.mesh):
dtensor_result = math_ops.reduce_logsumexp(a, axis=-1)
self.assertDTensorEqual(expected_result,
self.first_dimension_sharded_layout_1d,
dtensor_result)
@parameterized.named_parameters(test_util_ops.REDUCTION_OPS)
def testReductionOpsWithBatchParallelInputsWithInt64Dtype(self, op):
self.skipForDeviceType(['TPU'], 'reduce on TPU only supports int32')
# TODO(b/303662238): Replicate soft placement (or implement these kernels).
if test_util.use_multi_device_mode() and (
op is math_ops.reduce_min or op is math_ops.reduce_mean
):
self.skipForDeviceType(['GPU'], 'reduce on GPU only supports floats')
sharded_layout_1d = Layout([_MESH_DIM_X], self.mesh)
for axis, expected_layout in [
(
[0],
self.replicated_layout_1d,
),
(
[1],
sharded_layout_1d,
),
(
[0, 1],
self.scalar_replicated_layout,
),
(
None,
self.scalar_replicated_layout,
),
]:
# Disable the pylint as the cell var is used for this iteration only.
# pylint: disable=cell-var-from-loop
reduction_op = lambda x: op(x, axis=axis)
# pylint: enable=cell-var-from-loop
a = constant_op.constant(
np.array([[1, 2], [3, 4], [5, 6], [7, 8]]), dtype=dtypes.int64)
expected_result = reduction_op(a)
# pylint: disable=g-long-lambda
a = api.relayout(a, self.first_dimension_sharded_layout)
with api.default_mesh(self.mesh):
dtensor_result = reduction_op(a)
self.assertDTensorEqual(expected_result, expected_layout,
dtensor_result)
@parameterized.named_parameters(test_util_ops.REDUCTION_OPS)
def testReductionOpsWithBatchParallelInputsWithInt32(self, op):
self.skipForDeviceType(['GPU'], 'reduce on GPU only supports int64')
sharded_layout_1d = Layout([_MESH_DIM_X], self.mesh)
for axis, expected_layout in [
(
[0],
self.replicated_layout_1d,
),
(
[1],
sharded_layout_1d,
),
(
[0, 1],
self.scalar_replicated_layout,
),
(
None,
self.scalar_replicated_layout,
),
]:
# Disable the pylint as the cell var is used for this iteration only.
# pylint: disable=cell-var-from-loop
reduction_op = lambda x: op(x, axis=axis)
# pylint: enable=cell-var-from-loop
a = constant_op.constant(
np.array([[1, 2], [3, 4], [5, 6], [7, 8]]), dtype=dtypes.int32)
expected_result = reduction_op(a)
# pylint: disable=g-long-lambda
a = api.relayout(a, self.first_dimension_sharded_layout)
with api.default_mesh(self.mesh):
dtensor_result = reduction_op(a)
self.assertDTensorEqual(expected_result, expected_layout,
dtensor_result)
@parameterized.named_parameters(
test_util_ops.expand_test_config(
test_util_ops.REDUCTION_OPS,
[
{
'dtype': dtypes.float32
},
{
'dtype': dtypes.int32
},
],
))
def testReductionOpsWithReplicatedWithDtypes(self, op, dtype):
self.skipForDeviceType(['GPU'], 'b/169353279: int32 caused segfault on GPU')
axis = [0]
# Disable the pylint as the cell var is used for this iteration only.
# pylint: disable=cell-var-from-loop
reduction_op = lambda x: op(x, axis=axis)
# pylint: enable=cell-var-from-loop
a = constant_op.constant(
np.array([[1., 2.], [3., 4.], [5.0, 6.0], [7.0, 8.0]]), dtype=dtype)
expected_result = reduction_op(a)
a = api.relayout(a, self.replicated_layout_2d)
with api.default_mesh(self.mesh):
dtensor_result = reduction_op(a)
self.assertDTensorEqual(expected_result, self.replicated_layout_1d,
dtensor_result)
@parameterized.named_parameters(
test_util_ops.expand_test_config(
test_util_ops.REDUCTION_OPS,
[
{
'dtype': dtypes.float32
},
{
'dtype': dtypes.int32
},
],
))
def testReductionOpsWithBatchShardingWithDTypes(self, op, dtype):
self.skipForDeviceType(['GPU'], 'b/169353279: int32 caused segfault on GPU')
axis = [1]
# Disable the pylint as the cell var is used for this iteration only.
# pylint: disable=cell-var-from-loop
reduction_op = lambda x: op(x, axis=axis)
# pylint: enable=cell-var-from-loop
a = constant_op.constant(
np.array([[1., 2.], [3., 4.], [5.0, 6.0], [7.0, 8.0]]), dtype=dtype)
expected_result = reduction_op(a)
a = api.relayout(a, self.first_dimension_sharded_layout)
with api.default_mesh(self.mesh):
dtensor_result = reduction_op(a)
self.assertDTensorEqual(expected_result,
self.first_dimension_sharded_layout_1d,
dtensor_result)
@parameterized.named_parameters(
test_util_ops.expand_test_config(
test_util_ops.REDUCTION_OPS,
[
{
'axis': [0, 1],
'dtype': dtypes.float32
},
{
'axis': [0, 1],
'dtype': dtypes.int32
},
{
'axis': None,
'dtype': dtypes.float32
},
{
'axis': None,
'dtype': dtypes.int32
},
],
))
def testReductionOpsWithReplicatedLayoutAndDTypes(self, op, axis, dtype):
self.skipForDeviceType(['GPU'], 'b/169353279: int32 caused segfault on GPU')
# Disable the pylint as the cell var is used for this iteration only.
# pylint: disable=cell-var-from-loop
reduction_op = lambda x: op(x, axis=axis)
# pylint: enable=cell-var-from-loop
a = constant_op.constant(
np.array([[1., 2.], [3., 4.], [5.0, 6.0], [7.0, 8.0]]), dtype=dtype)
expected_result = reduction_op(a)
a = api.relayout(a, self.first_dimension_sharded_layout)
with api.default_mesh(self.mesh):
dtensor_result = reduction_op(a)
self.assertDTensorEqual(expected_result, self.scalar_replicated_layout,
dtensor_result)
@parameterized.named_parameters(
test_util_ops.expand_test_config(
[
{
'testcase_name': 'FullyReplicatedInputs',
'shard_type': 'replicated',
},
{
'testcase_name': 'BatchShardedInputs',
'shard_type': 'batch_sharded',
},
],
[
{
'axis': -1,
},
{
'axis': 0,
},
{
'axis': 1,
},
],
)
)
def testOneHotSPMDWith(self, shard_type, axis):
if axis != -1:
self.skipTest('b/177569789: fix this test with layout propagation v2')
indices = constant_op.constant([[1, 2], [3, 4]], dtype=dtypes.int32)
depth = constant_op.constant(10, dtype=dtypes.int32)
indices_layout = (
self.replicated_layout_2d
if shard_type == 'replicated' else self.first_dimension_sharded_layout)
output_layout = (
Layout.replicated(self.mesh, rank=3) if shard_type == 'replicated' else
Layout.batch_sharded(self.mesh, _MESH_DIM_X, rank=3))
expected_result = array_ops.one_hot(indices, depth, axis=axis)
indices = api.relayout(indices, indices_layout)
depth = api.copy_to_mesh(depth, self.scalar_replicated_layout)
dtensor_result = array_ops.one_hot(indices, depth, axis=axis)
if axis == 0 and shard_type == 'batch_sharded':
output_layout = self.middle_dimension_sharded_layout_3d
self.assertDTensorEqual(expected_result, output_layout, dtensor_result)
def testOneHotSPMDWithDifferentLayout(self):
indices = constant_op.constant([[1, 2], [3, 4]], dtype=dtypes.int32)
depth = constant_op.constant(10, dtype=dtypes.int32)
expected_result = array_ops.one_hot(indices, depth, axis=2)
indices = api.relayout(indices, self.replicated_layout_2d)
depth = api.copy_to_mesh(depth, self.scalar_replicated_layout)
@polymorphic_function.function
def one_hot_fn(indices, depth):
result = array_ops.one_hot(indices, depth, axis=2)
return api.relayout(result, self.first_dimension_sharded_layout_3d)
dtensor_result = one_hot_fn(indices, depth)
self.assertDTensorEqual(expected_result,
self.first_dimension_sharded_layout_3d,
dtensor_result)
def testL2LossOpsWithFullyReplicatedInputs(self):
loss_op = gen_nn_ops.l2_loss
a = constant_op.constant([[1., 2.], [3., 4.]])
expected_result = loss_op(a)
expected_layout = self.scalar_replicated_layout
a = api.copy_to_mesh(a, self.replicated_layout_2d)
dtensor_result = loss_op(a)
self.assertDTensorEqual(expected_result, expected_layout, dtensor_result)
def testL2LossOpsWithFullyShardedInputs(self):
loss_op = gen_nn_ops.l2_loss
a = constant_op.constant([[1., 2.], [3., 4.]])
expected_result = loss_op(a)
expected_layout = self.scalar_replicated_layout
a = api.relayout(a, self.first_dimension_sharded_layout)
dtensor_result = loss_op(a)
self.assertDTensorEqual(expected_result, expected_layout, dtensor_result)
@parameterized.named_parameters(test_util_ops.EXPANSION_OPS)
def testExpansionOpsReplicatedLayout(self, inputs, op):
self.skipTest('b/177569789: fix this test with layout propagation v2')
global_op_args = inputs()
expected_result = op(*global_op_args)
with api.default_mesh(self.mesh):
dtensor_op_args = inputs()
def _broadcast_to_replicated(x):
x = constant_op.constant(x)
return api.copy_to_mesh(
x, Layout.replicated(self.mesh, rank=x.shape.ndims))
dtensor_op_args = nest.map_structure(_broadcast_to_replicated,
dtensor_op_args)
with api._dtensor_device()._default_layout(self.replicated_layout_2d):
dtensor_result = op(*dtensor_op_args)
self.assertDTensorEqual(expected_result, self.replicated_layout_2d,
dtensor_result)
@parameterized.named_parameters(test_util_ops.EXPANSION_OPS)
def testExpansionOpsFullySharded(self, inputs, op):
self.skipTest('b/177569789: fix this test with layout propagation v2')
global_op_args = inputs()
expected_result = op(*global_op_args)
with api.default_mesh(self.mesh):
dtensor_op_args = inputs()
def _broadcast_to_replicated(x):
x = constant_op.constant(x)
return api.copy_to_mesh(
x, Layout.replicated(self.mesh, rank=x.shape.ndims))
dtensor_op_args = nest.map_structure(_broadcast_to_replicated,
dtensor_op_args)
sharded_layout_2d = Layout([_MESH_DIM_X, _MESH_DIM_Y], self.mesh)
with api._dtensor_device()._default_layout(sharded_layout_2d):
dtensor_result = op(*dtensor_op_args)
self.assertDTensorEqual(expected_result, sharded_layout_2d, dtensor_result)
@parameterized.named_parameters(test_util_ops.EXPANSION_OPS)
def testExpansionOpsBatchSharded(self, inputs, op):
self.skipTest('b/177569789: fix this test with layout propagation v2')
global_op_args = inputs()
expected_result = op(*global_op_args)
first_d_shard_layout = Layout([_MESH_DIM_X, layout_lib.UNSHARDED],
self.mesh)
with api.default_mesh(self.mesh):
dtensor_op_args = inputs()
def _broadcast_to_replicated(x):
x = constant_op.constant(x)
return api.copy_to_mesh(
x, Layout.replicated(self.mesh, rank=x.shape.ndims))
dtensor_op_args = nest.map_structure(_broadcast_to_replicated,
dtensor_op_args)
with api._dtensor_device().default_layout(first_d_shard_layout):
dtensor_result = op(*dtensor_op_args)
self.assertDTensorEqual(expected_result, first_d_shard_layout,
dtensor_result)
def testSliceOpsWithFullyReplicatedInputs(self):
t = constant_op.constant([[1., 2., 3., 4.], [5., 6., 7., 8.]])
expected_result = array_ops.slice(t, [0, 0], [-1, 2])
a = api.copy_to_mesh(t, self.replicated_layout_2d)
with api.default_mesh(self.mesh):
dtensor_result = array_ops.slice(a, [0, 0], [-1, 2])
self.assertDTensorEqual(expected_result, self.replicated_layout_2d,
dtensor_result)
@parameterized.named_parameters(('_minus_one_size', -1), ('_pos_size', 2))
def testSliceOpsWithFullSlicingOnShardedInputs(self, size):
t = constant_op.constant([[1., 2., 3., 4.], [5., 6., 7., 8.]])
expected_result = array_ops.slice(t, [0, 0], [size, 2])
sharded_layout = self.first_dimension_sharded_layout
t = api.relayout(t, sharded_layout)
with api.default_mesh(self.mesh):
dtensor_result = array_ops.slice(t, [0, 0], [size, 2])
self.assertDTensorEqual(expected_result, sharded_layout, dtensor_result)
def testSliceOpsWithDynamicBeginFullSlicingOnShardedInputs(self):
tensor = constant_op.constant([[1., 2., 3., 4.], [5., 6., 7., 8.]])
begins = constant_op.constant([0, 0], dtype=dtypes.int32)
@polymorphic_function.function
def slice_fn(tensor, begins):
return array_ops.slice(tensor, begins, [2, 2])
expected_result = slice_fn(tensor, begins)
sharded_layout = self.first_dimension_sharded_layout
tensor = api.relayout(tensor, sharded_layout)
begins = api.relayout(begins, self.replicated_layout_1d)
dtensor_result = slice_fn(tensor, begins)
self.assertDTensorEqual(expected_result, sharded_layout, dtensor_result)
@parameterized.named_parameters(
(
'FullyReplicatedInputs',
{'begin': [0, 0], 'end': [-1, 2], 'strides': [1, 2]},
[layout_lib.UNSHARDED] * 2,
),
(
'NewAxisMask',
{
'begin': [0, 0, 0, 0],
'end': [0, 0, 2, 4],
'strides': [1, 1, 1, 1],
'new_axis_mask': 3,
},
[layout_lib.UNSHARDED] * 2,
[layout_lib.UNSHARDED] * 4,
),
(
'ShrinkAxisMask',
{
'begin': [0, 0],
'end': [-1, 2],
'strides': [1, 1],
'shrink_axis_mask': 2,
},
[layout_lib.UNSHARDED] * 2,
[layout_lib.UNSHARDED],
),
(
'EllipsisAxisMask',
{
'begin': [0, 0, 0],
'end': [0, 0, 0],
'strides': [1, 1, 1],
'ellipsis_mask': 1,
'new_axis_mask': 6,
},
[layout_lib.UNSHARDED] * 2,
[layout_lib.UNSHARDED] * 4,
),
(
'MoreAxis',
{
'begin': [0],
'end': [2],
'strides': [1],
},
[_MESH_DIM_X, layout_lib.UNSHARDED],
[_MESH_DIM_X, layout_lib.UNSHARDED],
),
(
'ShardingOnNonSlicedDimension',
{'begin': [0, 0], 'end': [2, 2], 'strides': [1, 2]},
[_MESH_DIM_X, layout_lib.UNSHARDED],
),
(
'StrideOnShardedDimensionNoRelayout1',
{'begin': [0, 0], 'end': [2, 4], 'strides': [1, 2]},
[layout_lib.UNSHARDED, _MESH_DIM_X],
),
(
'StrideOnShardedDimensionNoRelayout2',
{'begin': [0, 1], 'end': [2, 4], 'strides': [1, 2]},
[layout_lib.UNSHARDED, _MESH_DIM_X],
),
(
'StrideOnShardedDimensionNoRelayout3',
{'begin': [0, 0], 'end': [2, 3], 'strides': [1, 2]},
[layout_lib.UNSHARDED, _MESH_DIM_X],
),
(
'StrideOnShardedDimensionNeedRelayout',
{'begin': [0, 0], 'end': [-1, 4], 'strides': [1, 3]},
[_MESH_DIM_X, layout_lib.UNSHARDED],
[layout_lib.UNSHARDED] * 2,
),
(
'DynamicSliceWithBeginEndMask',
{
'begin': lambda: array_ops.fill([2], 0),
'end': [-1, 4],
'strides': [1, 3],
'begin_mask': 3,
'end_mask': 3,
},
[_MESH_DIM_X, layout_lib.UNSHARDED],
[_MESH_DIM_X, layout_lib.UNSHARDED],
),
(
'DynamicSliceNoMask',
{
'begin': lambda: array_ops.fill([2], 0),
'end': [-1, 4],
'strides': [1, 3],
'begin_mask': 0,
'end_mask': 0,
},
[_MESH_DIM_X, layout_lib.UNSHARDED],
[layout_lib.UNSHARDED, layout_lib.UNSHARDED],
),
)
def testStridedSliceOps(self, args, input_layout, expected_layout=None):
input_tensor = constant_op.constant([[1., 2., 3., 4.], [5., 6., 7., 8.]])
@polymorphic_function.function
def func(input_tensor):
newargs = {}
for key, value in args.items():
newargs[key] = value() if hasattr(value, '__call__') else value
return gen_array_ops.strided_slice(input=input_tensor, **newargs)
expected_result = func(input_tensor)
input_layout = Layout(input_layout, self.mesh)
if expected_layout is None:
expected_layout = input_layout
else:
expected_layout = Layout(expected_layout, self.mesh)
dtensor_input_tensor = api.relayout(input_tensor, input_layout)
dtensor_result = func(dtensor_input_tensor)
self.assertDTensorEqual(expected_result, expected_layout, dtensor_result)
@parameterized.named_parameters(
(
'FullyReplicatedInputs',
{'begin': [0, 0], 'end': [-1, 2], 'strides': [1, 2]},
[layout_lib.UNSHARDED] * 2,
),
(
'NewAxisMask',
{
'begin': [0, 0, 0, 0],
'end': [0, 0, 2, 4],
'strides': [1, 1, 1, 1],
'new_axis_mask': 3,
},
[layout_lib.UNSHARDED] * 2,
[layout_lib.UNSHARDED] * 4,
),
(
'ShrinkAxisMask',
{
'begin': [0, 0],
'end': [-1, 2],
'strides': [1, 1],
'shrink_axis_mask': 2,
},
[layout_lib.UNSHARDED, layout_lib.UNSHARDED],
[layout_lib.UNSHARDED],
),
(
'EllipsisAxisMask',
{
'begin': [0, 0, 0],
'end': [0, 0, 0],
'strides': [1, 1, 1],
'ellipsis_mask': 1,
'new_axis_mask': 6,
},
[layout_lib.UNSHARDED] * 2,
[layout_lib.UNSHARDED] * 4,
),
(
'MoreAxis',
{
'begin': [0],
'end': [2],
'strides': [1],
},
[_MESH_DIM_X, layout_lib.UNSHARDED],
[_MESH_DIM_X, layout_lib.UNSHARDED],
),
(
'ShardingOnNonSlicedDimension',
{'begin': [0, 0], 'end': [2, 2], 'strides': [1, 2]},
[_MESH_DIM_X, layout_lib.UNSHARDED],
),
(
'StrideOnShardedDimensionNoRelayout1',
{'begin': [0, 0], 'end': [2, 4], 'strides': [1, 2]},
[layout_lib.UNSHARDED, _MESH_DIM_X],
),
(
'StrideOnShardedDimensionNoRelayout2',
{'begin': [0, 1], 'end': [2, 4], 'strides': [1, 2]},
[layout_lib.UNSHARDED, _MESH_DIM_X],
),
(
'StrideOnShardedDimensionNoRelayout3',
{'begin': [0, 0], 'end': [2, 3], 'strides': [1, 2]},
[layout_lib.UNSHARDED, _MESH_DIM_X],
),
(
'StrideOnShardedDimensionNeedRelayout',
{'begin': [0, 0], 'end': [-1, 4], 'strides': [1, 3]},
[layout_lib.UNSHARDED, layout_lib.UNSHARDED],
[layout_lib.UNSHARDED] * 2,
),
(
'DynamicSliceWithBeginEndMask',
{
'begin': lambda: array_ops.fill([2], 0),
'end': [-1, 4],
'strides': [1, 3],
'begin_mask': 3,
'end_mask': 3,
},
[_MESH_DIM_X, layout_lib.UNSHARDED],
[_MESH_DIM_X, layout_lib.UNSHARDED],
),
(
'DynamicSliceNoMask',
{
'begin': lambda: array_ops.fill([2], 0),
'end': [-1, 4],
'strides': [1, 3],
'begin_mask': 0,
'end_mask': 0,
},
[layout_lib.UNSHARDED, layout_lib.UNSHARDED],
[layout_lib.UNSHARDED, layout_lib.UNSHARDED],
),
)
def testStridedSliceGradOps(self, args, expected_layout, value_layout=None):
input_tensor = constant_op.constant([[1., 2., 3., 4.], [5., 6., 7., 8.]])
shape = input_tensor.shape.as_list()
expected_layout = Layout(expected_layout, self.mesh)
if value_layout is None:
value_layout = expected_layout
else:
value_layout = Layout(value_layout, self.mesh)
def get_newargs():
newargs = {}
for key, value in args.items():
newargs[key] = value() if hasattr(value, '__call__') else value
return newargs
@polymorphic_function.function
def func(grad):
return gen_array_ops.strided_slice_grad(
shape=shape, **get_newargs(), dy=grad
)
grad = gen_array_ops.strided_slice(input=input_tensor, **get_newargs())
expected_result = func(grad)
dtensor_grad = api.relayout(grad, value_layout)
dtensor_result = func(dtensor_grad)
self.assertDTensorEqual(expected_result, expected_layout, dtensor_result)
@parameterized.named_parameters(
(
'FullyReplicatedInputs',
{'begin': [0, 0], 'end': [-1, 2], 'strides': [1, 2]},
[layout_lib.UNSHARDED] * 2,
[layout_lib.UNSHARDED] * 2,
),
(
'NewAxisMask',
{
'begin': [0, 0, 0, 0],
'end': [0, 0, 2, 4],
'strides': [1, 1, 1, 1],
'new_axis_mask': 3,
},
[layout_lib.UNSHARDED] * 2,
[layout_lib.UNSHARDED] * 4,
),
(
'ShrinkAxisMask',
{
'begin': [0, 0],
'end': [-1, 2],
'strides': [1, 1],
'shrink_axis_mask': 2,
},
[layout_lib.UNSHARDED] * 2,
[layout_lib.UNSHARDED],
),
(
'EllipsisAxisMask',
{
'begin': [0, 0, 0],
'end': [0, 0, 0],
'strides': [1, 1, 1],
'ellipsis_mask': 1,
'new_axis_mask': 6,
},
[layout_lib.UNSHARDED] * 2,
[layout_lib.UNSHARDED] * 4,
[layout_lib.UNSHARDED] * 2,
),
(
'MoreAxis',
{
'begin': [0],
'end': [2],
'strides': [1],
},
[_MESH_DIM_X, layout_lib.UNSHARDED],
[_MESH_DIM_X, layout_lib.UNSHARDED],
[_MESH_DIM_X, layout_lib.UNSHARDED],
),
(
'ShardingOnNonSlicedDimension',
{'begin': [0, 0], 'end': [2, 2], 'strides': [1, 2]},
[_MESH_DIM_X, layout_lib.UNSHARDED],
[_MESH_DIM_X, layout_lib.UNSHARDED],
),
(
'StrideOnShardedDimensionNoRelayout1',
{'begin': [0, 0], 'end': [2, 4], 'strides': [1, 2]},
[layout_lib.UNSHARDED, _MESH_DIM_X],
[layout_lib.UNSHARDED, _MESH_DIM_X],
),
(
'StrideOnShardedDimensionNoRelayout2',
{'begin': [0, 1], 'end': [2, 4], 'strides': [1, 2]},
[layout_lib.UNSHARDED, _MESH_DIM_X],
[layout_lib.UNSHARDED, _MESH_DIM_X],
),
(
'StrideOnShardedDimensionNoRelayout3',
{'begin': [0, 0], 'end': [2, 3], 'strides': [1, 2]},
[layout_lib.UNSHARDED, _MESH_DIM_X],
[layout_lib.UNSHARDED, _MESH_DIM_X],
),
(
'StrideOnShardedDimensionNeedRelayout',
{'begin': [0, 0], 'end': [-1, 4], 'strides': [1, 3]},
[_MESH_DIM_X, layout_lib.UNSHARDED],
[layout_lib.UNSHARDED] * 2,
[layout_lib.UNSHARDED] * 2,
),
(
'DynamicSliceWithBeginEndMask',
{
'begin': lambda: array_ops.fill([2], 0),
'end': [-1, 4],
'strides': [1, 3],
'begin_mask': 3,
'end_mask': 3,
},
[_MESH_DIM_X, layout_lib.UNSHARDED],
[_MESH_DIM_X, layout_lib.UNSHARDED],
[_MESH_DIM_X, layout_lib.UNSHARDED],
),
(
'DynamicSliceNoMask',
{
'begin': lambda: array_ops.fill([2], 0),
'end': [-1, 4],
'strides': [1, 3],
'begin_mask': 0,
'end_mask': 0,
},
[layout_lib.UNSHARDED, layout_lib.UNSHARDED],
[layout_lib.UNSHARDED, layout_lib.UNSHARDED],
[layout_lib.UNSHARDED, layout_lib.UNSHARDED],
),
)
def testStridedSliceUpdateOps(self,
args,
input_layout,
value_layout,
expected_layout=None):
self.skipForDeviceType(['TPU'], 'b/123559667; op has no XLA implementation')
input_tensor = constant_op.constant([[1., 2., 3., 4.], [5., 6., 7., 8.]])
def get_newargs():
newargs = {}
for key, value in args.items():
newargs[key] = value() if hasattr(value, '__call__') else value
return newargs
value_tensor = (
gen_array_ops.strided_slice(input=input_tensor, **get_newargs()) * 10.0
)
@polymorphic_function.function
def func(input_tensor, value_tensor):
return gen_array_ops.tensor_strided_slice_update(
input=input_tensor, value=value_tensor, **get_newargs()
)
expected_result = func(input_tensor, value_tensor)
input_layout = Layout(input_layout, self.mesh)
value_layout = Layout(value_layout, self.mesh)
if expected_layout is None:
expected_layout = input_layout
else:
expected_layout = Layout(expected_layout, self.mesh)
dtensor_input_tensor = api.relayout(input_tensor, input_layout)
dtensor_value_tensor = api.relayout(value_tensor, value_layout)
dtensor_result = func(dtensor_input_tensor, dtensor_value_tensor)
self.assertDTensorEqual(expected_result, expected_layout, dtensor_result)
def testBroadcastGradientArgs(self):
a = constant_op.constant([128, 10])
b = constant_op.constant([128, 10])
ea, eb = gen_array_ops.broadcast_gradient_args(s0=a, s1=b)
a = api.copy_to_mesh(a, self.replicated_layout_1d)
b = api.copy_to_mesh(b, self.replicated_layout_1d)
da, db = gen_array_ops.broadcast_gradient_args(s0=a, s1=b)
self.assertDTensorEqual(ea, self.replicated_layout_1d, da)
self.assertDTensorEqual(eb, self.replicated_layout_1d, db)
def _transpose_shape(self, transpose, shape):
if transpose:
shape[-1], shape[-2] = shape[-2:]
return shape
def _merge_layouts_for_matmul(self, layout_a, layout_b, transpose_a,
transpose_b):
# This merge does no error checking and assumes that mesh dimensions
# are compatible and that layout_a and b are on the same mesh.
# Prepend enough layout_lib.UNSHARDED to give both lists the same size.
a_sharding_spec = (
[layout_lib.UNSHARDED] * max(0, layout_b.rank - layout_a.rank) +
layout_a.sharding_specs)
b_sharding_spec = (
[layout_lib.UNSHARDED] * max(0, layout_a.rank - layout_b.rank) +
layout_b.sharding_specs)
if transpose_a:
a_sharding_spec[-1], a_sharding_spec[-2] = a_sharding_spec[-2:]
if transpose_b:
b_sharding_spec[-1], b_sharding_spec[-2] = b_sharding_spec[-2:]
def _get_mesh_dim(i):
if b_sharding_spec[i] == layout_lib.UNSHARDED:
return a_sharding_spec[i]
return b_sharding_spec[i]
final_layout = [_get_mesh_dim(i) for i in range(len(a_sharding_spec) - 2)]
final_layout.append(a_sharding_spec[-2])
final_layout.append(b_sharding_spec[-1])
if final_layout[-2] == final_layout[-1]:
final_layout[-2] = layout_lib.UNSHARDED
final_layout[-1] = layout_lib.UNSHARDED
for i in range(len(final_layout) - 2):
if (final_layout[i] == a_sharding_spec[-2] or
final_layout[i] == a_sharding_spec[-1] or
final_layout[i] == b_sharding_spec[-2] or
final_layout[i] == b_sharding_spec[-1]):
final_layout[i] = layout_lib.UNSHARDED
return Layout(final_layout, layout_a.mesh)
@parameterized.named_parameters(*test_util.product(_MATMUL_IMPLEMENTED,
_MATMUL_TRANSPOSE))
def testMatMul(self, a_layout, b_layout, transpose_a, transpose_b):
# Swap layout 1 and 2, so that test name is correct (contracting and
# non_contracting dims switch when transposed).
if transpose_a and a_layout > 0:
a_layout = 3 - a_layout
if transpose_b and b_layout > 0:
b_layout = 3 - b_layout
a_layout = self.layouts_2d[a_layout]
b_layout = self.layouts_2d[b_layout]
a_numpy = np.random.uniform(size=self._transpose_shape(transpose_a, [4, 8]))
b_numpy = np.random.uniform(
size=self._transpose_shape(transpose_b, [8, 12]))
a = constant_op.constant(a_numpy, dtype=dtypes.float32)
b = constant_op.constant(b_numpy, dtype=dtypes.float32)
expected = math_ops.matmul(
a, b, transpose_a=transpose_a, transpose_b=transpose_b)
a = api.relayout(a, a_layout)
b = api.relayout(b, b_layout)
dtensor_result = math_ops.matmul(
a, b, transpose_a=transpose_a, transpose_b=transpose_b)
expected_layout = self._merge_layouts_for_matmul(a_layout, b_layout,
transpose_a, transpose_b)
self.assertDTensorEqual(expected, expected_layout, dtensor_result)
@parameterized.named_parameters(*test_util.product(_BATCH_MATMUL_IMPLEMENTED,
_MATMUL_TRANSPOSE))
def testBatchMatMul(self, a_layout, b_layout, transpose_a, transpose_b):
# Swap layout 2 and 3, so that test name is correct (contracting and
# non_contracting dims switch when transposed).
if transpose_a and a_layout > 1:
a_layout = 5 - a_layout
if transpose_b and b_layout > 1:
b_layout = 5 - b_layout
a_layout = self.layouts_3d[a_layout]
b_layout = self.layouts_3d[b_layout]
a_numpy = np.random.uniform(
size=self._transpose_shape(transpose_a, [2, 4, 8]))
b_numpy = np.random.uniform(
size=self._transpose_shape(transpose_b, [2, 8, 12]))
a = constant_op.constant(a_numpy, dtype=dtypes.float32) # 2x4x8
b = constant_op.constant(b_numpy, dtype=dtypes.float32) # 2x8x12
# math_ops.matmul should emit a BatchMatMulV2 op here.
expected = math_ops.matmul(
a, b, transpose_a=transpose_a, transpose_b=transpose_b)
a = api.relayout(a, a_layout)
b = api.relayout(b, b_layout)
dtensor_result = math_ops.matmul(
a, b, transpose_a=transpose_a, transpose_b=transpose_b)
expected_layout = self._merge_layouts_for_matmul(a_layout, b_layout,
transpose_a, transpose_b)
self.assertDTensorEqual(expected, expected_layout, dtensor_result)
@parameterized.named_parameters(
('_a_unsharded_b_unsharded', 0, 0), ('_a_batch_b_unsharded', 1, 0),
('_a_non_contracting_b_unsharded', 2, 0),
('_a_contracting_b_unsharded', 3, 0),
('_a_unsharded_b_non_contracting', 0, 2),
('_a_unsharded_b_contracting', 0, 1),
('_a_contracting_b_contracting', 3, 1),
('_a_contracting_b_non_contracting', 3, 2),
('_a_non_contracting_b_non_contracting', 2, 2),
('_a_non_contracting_b_contracting', 2, 1),
('_a_batch_b_non_contracting', 1, 2), ('_a_batch_b_contracting', 1, 1))
def testBatchMatMulWithBroadcasting(self, a_layout, b_layout):
a_layout = self.layouts_3d[a_layout]
b_layout = self.layouts_2d[b_layout]
a_numpy = np.random.uniform(size=[2, 2, 4])
b_numpy = np.random.uniform(size=[4, 6])
a = constant_op.constant(a_numpy, dtype=dtypes.float32) # 2x2x4
b = constant_op.constant(b_numpy, dtype=dtypes.float32) # 4x6
# math_ops.matmul should emit a BatchMatMulV2 op here.
expected = math_ops.matmul(a, b)
a = api.relayout(a, a_layout)
b = api.relayout(b, b_layout)
dtensor_result = math_ops.matmul(a, b)
expected_layout = self._merge_layouts_for_matmul(a_layout, b_layout, False,
False)
self.assertDTensorEqual(expected, expected_layout, dtensor_result)
@parameterized.named_parameters(('_positive_axis_negative_batch', 0, -1),
('_negative_axis_positive_batch', -2, 0))
def testGather(self, axis, batch_dims):
params = np.arange(1000 * 4).reshape((1000, 4))
# "batch" size = 2, num_indices = 3 per example
indices = np.random.randint(0, 1000, size=4 * 3).reshape((4, 3))
expected = array_ops.gather_v2(
params, indices, axis=axis, batch_dims=batch_dims)
params = api.relayout(params, layout=Layout.replicated(self.mesh, 2))
indices = api.relayout(
indices, Layout.batch_sharded(self.mesh, _MESH_DIM_Y, rank=2)
)
dtensor_result = array_ops.gather_v2(
params, indices, axis=axis, batch_dims=batch_dims)
expected_layout = Layout.batch_sharded(self.mesh, _MESH_DIM_Y, rank=3)
self.assertDTensorEqual(expected, expected_layout, dtensor_result)
def testResourceGather(self):
if self.mesh.use_xla_spmd():
self.skipTest('Variables not supported yet with DTensor Xla Spmd.')
params = np.arange(1000 * 4).reshape((1000, 4))
indices = np.random.randint(0, 1000, size=1000 * 3).reshape((1000, 3))
expected = array_ops.gather_v2(variables.Variable(params), indices)
params = api.relayout(params, layout=Layout.replicated(self.mesh, 2))
indices = api.relayout(
indices, Layout.batch_sharded(self.mesh, _MESH_DIM_Y, rank=2)
)
dtensor_result = array_ops.gather_v2(d_variable.DVariable(params), indices)
expected_layout = Layout.batch_sharded(self.mesh, _MESH_DIM_Y, rank=3)
self.assertDTensorEqual(expected, expected_layout, dtensor_result)
def testResourceGatherRaisesErrorWhenResourceZeroDimSharded(self):
if self.mesh.use_xla_spmd():
self.skipTest('Variables not supported yet with DTensor Xla Spmd.')
sharded_tensor = api.relayout(
np.arange(1000 * 4).reshape((1000, 4)),
layout=Layout.batch_sharded(self.mesh, _MESH_DIM_Y, 2),
)
# "batch" size = 2, num_indices = 3 per example
indices = api.copy_to_mesh(
np.random.randint(0, 1000, size=4 * 3).reshape((4, 3)),
Layout.replicated(self.mesh, rank=2))
with self.assertRaisesRegex(
errors_impl.UnknownError,
'DTensor does not support sharded 0th dimension for the resource tensor'
):
array_ops.gather_v2(d_variable.DVariable(sharded_tensor), indices)
def testUnsortedSegmentSum(self):
self.skipForDeviceType(['TPU'], 'waiting for cl/344197900')
num_segments = 12
data = np.random.uniform(size=[num_segments, 4])
segment_ids = np.random.randint(0, num_segments, size=num_segments)
expected = gen_math_ops.unsorted_segment_sum(data, segment_ids,
num_segments)
data = api.relayout(data, Layout.replicated(self.mesh, 2))
segment_ids = api.relayout(
segment_ids, Layout.batch_sharded(self.mesh, _MESH_DIM_Y, rank=1)
)
with api.default_mesh(self.mesh):
dtensor_result = gen_math_ops.unsorted_segment_sum(
data, segment_ids, num_segments)
expected_layout = Layout.replicated(self.mesh, 2)
self.assertDTensorEqual(expected, expected_layout, dtensor_result)
def testUnsortedSegmentSumWithFullyShardedIndices(self):
self.skipForDeviceType(['TPU'], 'waiting for cl/344197900')
num_segments = 8
data = np.random.uniform(size=[2, 4, 3])
segment_ids = np.random.randint(0, num_segments, size=[2, 4])
expected = gen_math_ops.unsorted_segment_sum(data, segment_ids,
num_segments)
data = api.relayout(data, Layout.replicated(self.mesh, 3))
segment_ids = api.relayout(
segment_ids, Layout([_MESH_DIM_X, _MESH_DIM_Y], self.mesh)
)
with api.default_mesh(self.mesh):
dtensor_result = gen_math_ops.unsorted_segment_sum(
data, segment_ids, num_segments)
expected_layout = Layout.replicated(self.mesh, 2)
self.assertDTensorEqual(expected, expected_layout, dtensor_result)
@parameterized.named_parameters(
('_same_rank', [2, 2]),
('_adding_one_rank', [2, 2, 1]),
('_adding_one_rank_and_broadcasting', [2, 2, 2]),
)
def testBroadcastOpsWithFullyReplicatedInputs(self, new_shape):
op = gen_array_ops.broadcast_to
a = constant_op.constant([[1.], [3.]])
assert a.shape == [2, 1]
expected_result = op(a, new_shape)
a = api.copy_to_mesh(a, self.replicated_layout_2d)
dtensor_result = op(a, new_shape)
self.assertDTensorEqual(expected_result,
Layout.replicated(self.mesh, len(new_shape)),
dtensor_result)
def testBooleanMask(self):
if self.mesh.use_xla_spmd():
self.skipTest('Boolean mask not supported yet with DTensor Xla Spmd.')
self.skipForDeviceType(['TPU'], 'int64 XlaAllReduce not supported.')
for input_layout, expected_output_layout in [
(
self.first_dimension_sharded_layout,
self.first_dimension_sharded_layout_1d,
),
(
Layout([_MESH_DIM_X, _MESH_DIM_Y], self.mesh),
self.first_dimension_sharded_layout_1d,
),
(self.last_dimension_sharded_layout, self.replicated_layout_1d),
(self.replicated_layout_2d, self.replicated_layout_1d),
]:
tensor = constant_op.constant(np.arange(8).reshape(2, 4))
mask = constant_op.constant(
np.array([True, True, False, False, True, False, True, True]).reshape(
2, 4
)
)
expected = array_ops.boolean_mask(tensor, mask)
tensor = api.relayout(tensor, input_layout)
mask = api.relayout(mask, input_layout)
@polymorphic_function.function
def boolean_mask_func(t, m):
return array_ops.boolean_mask(t, m)
result = boolean_mask_func(tensor, mask)
self.assertDTensorEqual(
expected, expected_output_layout.to_parted(), result
)
def testRawWhere(self):
if self.mesh.use_xla_spmd():
self.skipTest('Where op not supported yet with DTensor Xla Spmd.')
condition = constant_op.constant(
np.array([True, True, False, False, True, False, True, True])
)
condition = api.relayout(condition, self.first_dimension_sharded_layout_1d)
@polymorphic_function.function
def func(c):
return gen_array_ops.where(c)
result = func(condition)
# With parted layout, the raw where op will output local index instead of
# global index. So the second half of test expectation ([0], [2], [3]) has
# an offset of 4.
expected = constant_op.constant(
np.array([[0], [1], [0], [2], [3]]), dtype=dtypes.int64
)
self.assertDTensorEqual(
expected, self.first_dimension_sharded_layout.to_parted(), result
)
@parameterized.named_parameters([
{
'testcase_name': 'FullyReplicatedInputs',
'op': array_ops.where_v2,
'shard_type': 'replicated',
},
{
'testcase_name': 'BatchShardedInputs',
'op': array_ops.where_v2,
'shard_type': 'batch_sharded',
},
])
def testWhere(self, op, shard_type):
layout = (
self.replicated_layout_2d
if shard_type == 'replicated' else self.first_dimension_sharded_layout)
a = constant_op.constant([[True, False], [False, True]])
b = constant_op.constant([[10., 20.], [30., 40.]])
c = constant_op.constant([[50., 60.], [70., 80.]])
expected_result = op(a, b, c)
a = api.relayout(a, layout)
b = api.relayout(b, layout)
c = api.relayout(c, layout)
dtensor_result = op(a, b, c)
self.assertDTensorEqual(expected_result, layout, dtensor_result)
def testSqueezeOp(self):
t = array_ops.ones([1, 2, 1])
expected_result0 = array_ops.squeeze_v2(t)
expected_result1 = array_ops.squeeze_v2(t, axis=0)
expected_result2 = array_ops.squeeze_v2(t, axis=-1)
# t will have [1,1,1] as locally sharded shape, this covers the case that
# we should not squeeze the dim that's sharded.
t = api.relayout(
t,
Layout(
[layout_lib.UNSHARDED, _MESH_DIM_X, layout_lib.UNSHARDED], self.mesh
),
)
dtensor_result0 = array_ops.squeeze_v2(t)
dtensor_result1 = array_ops.squeeze_v2(t, axis=0)
dtensor_result2 = array_ops.squeeze_v2(t, axis=-1)
self.assertDTensorEqual(expected_result0, Layout([_MESH_DIM_X], self.mesh),
dtensor_result0)
self.assertDTensorEqual(
expected_result1, Layout([_MESH_DIM_X, layout_lib.UNSHARDED],
self.mesh), dtensor_result1)
self.assertDTensorEqual(
expected_result2, Layout([layout_lib.UNSHARDED, _MESH_DIM_X],
self.mesh), dtensor_result2)
@parameterized.parameters(('replicated',), ('sharded',))
def testDiagPart(self, shard_type):
x = stateless_random_ops.stateless_random_uniform(
shape=(16, 16), seed=[0, 1])
expected = gen_array_ops.diag_part(input=x)
if shard_type == 'replicated':
layout = Layout([_MESH_DIM_X, _MESH_DIM_Y], self.mesh)
else:
layout = Layout.replicated(self.mesh, 2)
x = api.relayout(x, layout)
got = gen_array_ops.diag_part(input=x)
self.assertDTensorEqual(expected, Layout.replicated(self.mesh, 1), got)
@parameterized.product(
axis_dim=[-3, -2, -1, 0, 1, 2],
shard_type=['replicated', 'batch_sharded'],
reverse=[True, False])
def testCumSum(self, axis_dim, shard_type, reverse):
input_tensor = stateless_random_ops.stateless_random_uniform(
shape=(16, 16, 16), seed=[0, 1])
expected = math_ops.cumsum(x=input_tensor, axis=axis_dim, reverse=reverse)
if shard_type == 'replicated':
layout = Layout.replicated(self.mesh, rank=3)
expected_layout = layout
else:
layout = Layout.batch_sharded(self.mesh, batch_dim=_MESH_DIM_X, rank=3)
# Axis dimension should always be replicated, even on sharding dim.
if axis_dim in [-3, 0]:
expected_layout = Layout.replicated(self.mesh, rank=3)
else:
expected_layout = layout
input_tensor = api.relayout(input_tensor, layout)
got = math_ops.cumsum(x=input_tensor, axis=axis_dim, reverse=reverse)
self.assertDTensorEqual(expected, expected_layout, got)
@parameterized.named_parameters(('Sharded', 'sharded'),
('Replicated', 'replicated'))
def testStringFormat(self, shard_spec):
self.skipForDeviceType(['TPU'], 'StringFormat not supported on TPU.')
# TODO(b/303662238): See whether we can replicate soft placement here.
if test_util.use_multi_device_mode():
self.skipForDeviceType(['GPU'], 'StringFormat not supported on GPU.')
np.random.seed(123)
inputs = constant_op.constant(
np.random.normal(0.0, 1.0, 8 * 9 * 9).reshape([8, 9, 9, 1]),
dtype=dtypes.float32)
expected_result = gen_string_ops.string_format(inputs=[inputs])
if shard_spec == 'sharded':
layout = Layout.batch_sharded(self.mesh, _MESH_DIM_X, rank=4)
else:
layout = Layout.replicated(self.mesh, rank=4)
inputs = api.relayout(inputs, layout)
got = gen_string_ops.string_format(inputs=[inputs])
# Manually compare instead of assertDTensorEqual since outputs are strings.
self.assertEqual(
api.fetch_layout(got), Layout.replicated(self.mesh, rank=0))
for got_tensor in api.unpack(got):
self.assertEqual(expected_result, got_tensor)
@parameterized.named_parameters(('Sharded', 'sharded'),
('Replicated', 'replicated'))
def testStringFormatOnTPURequiresCopyToMeshToCPU(self, shard_spec):
self.skipForDeviceType(['CPU', 'GPU'], 'Testing only for TPU.')
global_ids = test_util.create_device_ids_array((2, 4))
local_ids = np.ravel(global_ids).tolist()
tpu_mesh = Mesh([_MESH_DIM_X, _MESH_DIM_Y], global_ids, local_ids,
test_util.create_device_list((2, 4), 'TPU'))
cpu_mesh = Mesh([_MESH_DIM_X, _MESH_DIM_Y], global_ids, local_ids,
test_util.create_device_list((2, 4), 'CPU'))
cpu_layout = Layout.replicated(cpu_mesh, rank=4)
if shard_spec == 'sharded':
tpu_layout = Layout.batch_sharded(tpu_mesh, _MESH_DIM_X, rank=4)
else:
tpu_layout = Layout.replicated(tpu_mesh, rank=4)
inputs = stateless_random_ops.stateless_random_uniform(
shape=(8, 9, 9, 1), seed=[0, 1])
expected_result = gen_string_ops.string_format(inputs=[inputs])
inputs = api.relayout(inputs, tpu_layout)
# StringFormat is not supported on TPU, so copy_to_mesh to the CPU.
# Since we cannot eager copy_to_mesh from an input with non-replicated
# layout yet, relayout to replicated layout first, and then transfer to CPU.
inputs = api.copy_to_mesh(
api.relayout(inputs, Layout.replicated(tpu_mesh, rank=4)), cpu_layout)
got = gen_string_ops.string_format(inputs=[inputs])
# Manually compare instead of assertDTensorEqual since outputs are strings.
self.assertEqual(api.fetch_layout(got), Layout.replicated(cpu_mesh, rank=0))
for got_tensor in api.unpack(got):
self.assertEqual(expected_result, got_tensor)
@parameterized.named_parameters(
# TODO(feyu): to_hash_bucket and to_hash_bucket_strong are not defined
# in the tf MLIR dialect.
('ShardedFast', gen_string_ops.string_to_hash_bucket_fast, 'sharded'),
('ReplicatedFast', gen_string_ops.string_to_hash_bucket_fast,
'replicated'),
)
def testStringToHashBucket(self, to_hash_bucket_fn, shard_spec):
self.skipForDeviceType(
['GPU', 'TPU'],
'StringToHashBucket functions not supported on TPU or GPU.')
inputs = constant_op.constant(['a', 'b', 'c', 'd'], dtype=dtypes.string)
expected_result = to_hash_bucket_fn(inputs, num_buckets=32)
if shard_spec == 'sharded':
layout = Layout.batch_sharded(self.mesh, _MESH_DIM_X, rank=1)
else:
layout = Layout.replicated(self.mesh, rank=1)
inputs = api.relayout(inputs, layout)
got = to_hash_bucket_fn(inputs, num_buckets=32)
self.assertDTensorEqual(expected_result, layout, got)
@parameterized.named_parameters(
{
'testcase_name': 'Replicated',
'shard_type': 'replicated',
}, {
'testcase_name': 'BatchSharded',
'shard_type': 'batch_sharded',
})
def testTensorListReserveSetAndGetRetrievesCorrectTensor(self, shard_type):
self.skipForDeviceType(['TPU', 'GPU'], 'Testing only for CPU.')
input_tensor = array_ops.ones([4, 4], dtype=dtypes.int32)
if shard_type == 'replicated':
layout = Layout.replicated(self.mesh, rank=2)
else:
layout = Layout.batch_sharded(self.mesh, _MESH_DIM_X, rank=2)
@polymorphic_function.function
def f(input_tensor):
list_handle = gen_list_ops.tensor_list_reserve(
element_shape=constant_op.constant([4, 4], dtype=dtypes.int32),
num_elements=constant_op.constant(4, dtype=dtypes.int32),
element_dtype=dtypes.int32)
list_handle = gen_list_ops.tensor_list_set_item(
input_handle=list_handle,
index=constant_op.constant(0, dtype=dtypes.int32),
item=input_tensor)
return gen_list_ops.tensor_list_get_item(
input_handle=list_handle,
index=constant_op.constant(0, dtype=dtypes.int32),
element_shape=constant_op.constant([4, 4], dtype=dtypes.int32),
element_dtype=dtypes.int32)
got_tensor = f(api.relayout(input_tensor, layout))
self.assertDTensorEqual(input_tensor, Layout.replicated(self.mesh, rank=2),
got_tensor)
@parameterized.named_parameters(
('x_unsharded', [_MESH_DIM_X, layout_lib.UNSHARDED]),
('unsharded_x', [layout_lib.UNSHARDED, _MESH_DIM_X]),
('x_y', [_MESH_DIM_X, _MESH_DIM_Y]),
('unsharded_unsharded', [layout_lib.UNSHARDED, layout_lib.UNSHARDED]))
def testDisableCopyOnRead(self, sharding_specs):
self.skipForDeviceType(['TPU'], 'Op not supported on TPUs.')
def f(d_var):
gen_resource_variable_ops.disable_copy_on_read(resource=d_var.handle)
return d_var
layout = Layout(sharding_specs, self.mesh)
variable = d_variable.DVariable(
initial_value=api.relayout(
array_ops.ones([4, 8], dtype=dtypes.float32), layout
)
)
# Eager
self.assertEqual(api.fetch_layout(f(variable)), layout)
# Function
self.assertEqual(
api.fetch_layout(polymorphic_function.function(f)(variable)), layout)
def testShardedFilename(self):
self.skipForDeviceType(['TPU', 'GPU'],
'Strings only for CPUs, this is a host-only op.')
basename = constant_op.constant('dtensor-file')
shard = constant_op.constant(1, dtype=dtypes.int32)
num_shards = constant_op.constant(16, dtype=dtypes.int32)
layout = Layout.replicated(self.mesh, rank=0)
expected = gen_io_ops.sharded_filename(
basename=basename, shard=shard, num_shards=num_shards, name=None)
result = gen_io_ops.sharded_filename(
basename=api.relayout(basename, layout),
shard=api.relayout(shard, layout),
num_shards=api.relayout(num_shards, layout),
)
self.assertEqual(api.fetch_layout(result), layout)
for result_tensor in api.unpack(result):
self.assertEqual(expected, result_tensor)
@parameterized.named_parameters(*test_util.product(
(('_indices_unsharded', [layout_lib.UNSHARDED, layout_lib.UNSHARDED]),
('_indices_x', [_MESH_DIM_X, layout_lib.UNSHARDED])),
(('_updates_unsharded_unsharded',
[layout_lib.UNSHARDED, layout_lib.UNSHARDED, layout_lib.UNSHARDED]),
('_updates_x_unsharded',
[layout_lib.UNSHARDED, _MESH_DIM_X, layout_lib.UNSHARDED]),
('_updates_unsharded_y',
[layout_lib.UNSHARDED, layout_lib.UNSHARDED, _MESH_DIM_Y]),
('_updates_x_y', [layout_lib.UNSHARDED, _MESH_DIM_X, _MESH_DIM_Y]))))
def testScatterNd(self, indices_spec, updates_spec):
indices_layout = Layout(indices_spec, self.mesh)
updates_layout = Layout(updates_spec, self.mesh)
indices = constant_op.constant([[0], [15]])
updates = constant_op.constant([[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7],
[8, 8, 8, 8]],
[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7],
[8, 8, 8, 8]]])
shape = [16, 4, 4]
expected_result = gen_array_ops.scatter_nd(indices, updates, shape)
got_result = gen_array_ops.scatter_nd(
api.relayout(indices, indices_layout),
api.relayout(updates, updates_layout),
shape,
)
self.assertDTensorEqual(expected_result, updates_layout, got_result)
| DTensorSPMDTest |
python | great-expectations__great_expectations | great_expectations/core/expectation_diagnostics/supporting_types.py | {
"start": 4912,
"end": 5015
} | class ____(TypedDict):
message: str
passed: bool
@dataclass
| ExpectationDiagnosticCheckMessageDict |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pyflakes/F841_0.py | {
"start": 2779,
"end": 2873
} | class ____:
class B:
def set_class(self, cls):
__class__ = cls # F841
| A |
python | numpy__numpy | numpy/random/tests/test_generator_mt19937.py | {
"start": 11929,
"end": 13268
} | class ____:
def _create_rng(self):
seed = 1234567890
rg = Generator(MT19937(seed))
bit_generator = rg.bit_generator
state = bit_generator.state
legacy_state = (state['bit_generator'],
state['state']['key'],
state['state']['pos'])
return rg, bit_generator, state
def test_gaussian_reset(self):
# Make sure the cached every-other-Gaussian is reset.
rg, bit_generator, state = self._create_rng()
old = rg.standard_normal(size=3)
bit_generator.state = state
new = rg.standard_normal(size=3)
assert_(np.all(old == new))
def test_gaussian_reset_in_media_res(self):
# When the state is saved with a cached Gaussian, make sure the
# cached Gaussian is restored.
rg, bit_generator, state = self._create_rng()
rg.standard_normal()
state = bit_generator.state
old = rg.standard_normal(size=3)
bit_generator.state = state
new = rg.standard_normal(size=3)
assert_(np.all(old == new))
def test_negative_binomial(self):
# Ensure that the negative binomial results take floating point
# arguments without truncation.
rg, _, _ = self._create_rng()
rg.negative_binomial(0.5, 0.5)
| TestSetState |
python | allegroai__clearml | clearml/backend_api/services/v2_9/events.py | {
"start": 101007,
"end": 103110
} | class ____(Response):
"""
Response of events.vector_metrics_iter_histogram endpoint.
:param images:
:type images: Sequence[dict]
"""
_service = "events"
_action = "vector_metrics_iter_histogram"
_version = "2.9"
_schema = {
"definitions": {},
"properties": {"images": {"items": {"type": "object"}, "type": ["array", "null"]}},
"type": "object",
}
def __init__(self, images: Optional[List[dict]] = None, **kwargs: Any) -> None:
super(VectorMetricsIterHistogramResponse, self).__init__(**kwargs)
self.images = images
@schema_property("images")
def images(self) -> Optional[List[dict]]:
return self._property_images
@images.setter
def images(self, value: Optional[List[dict]]) -> None:
if value is None:
self._property_images = None
return
self.assert_isinstance(value, "images", (list, tuple))
self.assert_isinstance(value, "images", (dict,), is_array=True)
self._property_images = value
response_mapping = {
AddRequest: AddResponse,
AddBatchRequest: AddBatchResponse,
DeleteForTaskRequest: DeleteForTaskResponse,
DebugImagesRequest: DebugImagesResponse,
GetTaskMetricsRequest: GetTaskMetricsResponse,
GetTaskLogRequest: GetTaskLogResponse,
GetTaskEventsRequest: GetTaskEventsResponse,
DownloadTaskLogRequest: DownloadTaskLogResponse,
GetTaskPlotsRequest: GetTaskPlotsResponse,
GetMultiTaskPlotsRequest: GetMultiTaskPlotsResponse,
GetVectorMetricsAndVariantsRequest: GetVectorMetricsAndVariantsResponse,
VectorMetricsIterHistogramRequest: VectorMetricsIterHistogramResponse,
ScalarMetricsIterHistogramRequest: ScalarMetricsIterHistogramResponse,
MultiTaskScalarMetricsIterHistogramRequest: MultiTaskScalarMetricsIterHistogramResponse,
GetTaskLatestScalarValuesRequest: GetTaskLatestScalarValuesResponse,
GetScalarMetricsAndVariantsRequest: GetScalarMetricsAndVariantsResponse,
GetScalarMetricDataRequest: GetScalarMetricDataResponse,
}
| VectorMetricsIterHistogramResponse |
python | ray-project__ray | python/ray/autoscaler/_private/node_tracker.py | {
"start": 85,
"end": 2744
} | class ____:
"""Map nodes to their corresponding logs.
We need to be a little careful here. At an given point in time, node_id <->
ip can be interchangeably used, but the node_id -> ip relation is not
bijective _across time_ since IP addresses can be reused. Therefore, we
should treat node_id as the only unique identifier.
"""
def __init__(self):
# Mapping from node_id -> (ip, node type, stdout_path, process runner)
self.node_mapping = {}
# A quick, inefficient FIFO cache implementation.
self.lru_order = []
def _add_node_mapping(self, node_id: str, value: str):
if node_id in self.node_mapping:
return
assert len(self.lru_order) == len(self.node_mapping)
if len(self.lru_order) >= constants.AUTOSCALER_MAX_NODES_TRACKED:
# The LRU eviction case
node_id = self.lru_order.pop(0)
del self.node_mapping[node_id]
self.node_mapping[node_id] = value
self.lru_order.append(node_id)
def track(self, node_id: str, ip: str, node_type: str):
"""
Begin to track a new node.
Args:
node_id: The node id.
ip: The node ip address.
node_type: The node type.
"""
if node_id not in self.node_mapping:
self._add_node_mapping(node_id, (ip, node_type))
def untrack(self, node_id: str):
"""Gracefully stop tracking a node. If a node is intentionally removed from
the cluster, we should stop tracking it so we don't mistakenly mark it
as failed.
Args:
node_id: The node id which failed.
"""
if node_id in self.node_mapping:
self.lru_order.remove(node_id)
del self.node_mapping[node_id]
def get_all_failed_node_info(
self, non_failed_ids: Set[str]
) -> List[Tuple[str, str]]:
"""Get the information about all failed nodes. A failed node is any node which
we began to track that is not pending or alive (i.e. not failed).
Args:
non_failed_ids: Nodes are failed unless they are in this set.
Returns:
List[Tuple[str, str]]: A list of tuples. Each tuple is the ip
address and type of a failed node.
"""
failed_nodes = self.node_mapping.keys() - non_failed_ids
failed_info = []
# Returning the list in order is important for display purposes.
for node_id in filter(lambda node_id: node_id in failed_nodes, self.lru_order):
failed_info.append(self.node_mapping[node_id])
return failed_info
| NodeTracker |
python | mlflow__mlflow | mlflow/store/tracking/dbmodels/initial_models.py | {
"start": 5205,
"end": 6133
} | class ____(Base):
"""
DB model for :py:class:`mlflow.entities.RunTag`. These are recorded in ``tags`` table.
"""
__tablename__ = "tags"
key = Column(String(250))
"""
Tag key: `String` (limit 250 characters). *Primary Key* for ``tags`` table.
"""
value = Column(String(250), nullable=True)
"""
Value associated with tag: `String` (limit 250 characters). Could be *null*.
"""
run_uuid = Column(String(32), ForeignKey("runs.run_uuid"))
"""
Run UUID to which this tag belongs to: *Foreign Key* into ``runs`` table.
"""
run = relationship("SqlRun", backref=backref("tags", cascade="all"))
"""
SQLAlchemy relationship (many:one) with :py:class:`mlflow.store.dbmodels.models.SqlRun`.
"""
__table_args__ = (PrimaryKeyConstraint("key", "run_uuid", name="tag_pk"),)
def __repr__(self):
return f"<SqlRunTag({self.key}, {self.value})>"
| SqlTag |
python | marshmallow-code__marshmallow | tests/test_decorators.py | {
"start": 13228,
"end": 30022
} | class ____:
def test_validator_nested_many_invalid_data(self):
class NestedSchema(Schema):
foo = fields.Int(required=True)
class MySchema(Schema):
nested = fields.Nested(NestedSchema, required=True, many=True)
schema = MySchema()
errors = schema.validate({"nested": [1]})
assert errors
assert "nested" in errors
assert 0 in errors["nested"]
assert errors["nested"][0] == {"_schema": ["Invalid input type."]}
def test_validator_nested_many_schema_error(self):
class NestedSchema(Schema):
foo = fields.Int(required=True)
@validates_schema
def validate_schema(self, data, **kwargs):
raise ValidationError("This will never work.")
class MySchema(Schema):
nested = fields.Nested(NestedSchema, required=True, many=True)
schema = MySchema()
errors = schema.validate({"nested": [{"foo": 1}]})
assert errors
assert "nested" in errors
assert 0 in errors["nested"]
assert errors["nested"][0] == {"_schema": ["This will never work."]}
def test_validator_nested_many_field_error(self):
class NestedSchema(Schema):
foo = fields.Int(required=True)
@validates_schema
def validate_schema(self, data, **kwargs):
raise ValidationError("This will never work.", "foo")
class MySchema(Schema):
nested = fields.Nested(NestedSchema, required=True, many=True)
schema = MySchema()
errors = schema.validate({"nested": [{"foo": 1}]})
assert errors
assert "nested" in errors
assert 0 in errors["nested"]
assert errors["nested"][0] == {"foo": ["This will never work."]}
@pytest.mark.parametrize("data", ([{"foo": 1, "bar": 2}],))
@pytest.mark.parametrize(
("pass_collection", "expected_data", "expected_original_data"),
(
[True, [{"foo": 1}], [{"foo": 1, "bar": 2}]],
[False, {"foo": 1}, {"foo": 1, "bar": 2}],
),
)
def test_validator_nested_many_pass_original_and_pass_collection(
self, pass_collection, data, expected_data, expected_original_data
):
class NestedSchema(Schema):
foo = fields.Int(required=True)
@validates_schema(pass_collection=pass_collection, pass_original=True)
def validate_schema(self, data, original_data, many, **kwargs):
assert data == expected_data
assert original_data == expected_original_data
assert many is True
raise ValidationError("Method called")
class MySchema(Schema):
nested = fields.Nested(
NestedSchema, required=True, many=True, unknown=EXCLUDE
)
schema = MySchema()
errors = schema.validate({"nested": data})
error = errors["nested"] if pass_collection else errors["nested"][0]
assert error["_schema"][0] == "Method called"
def test_decorated_validators(self):
class MySchema(Schema):
foo = fields.Int()
bar = fields.Int()
@validates_schema
def validate_schema(self, data, **kwargs):
if data["foo"] <= 3:
raise ValidationError("Must be greater than 3")
@validates_schema(pass_collection=True)
def validate_raw(self, data, many, **kwargs):
if many:
assert type(data) is list
if len(data) < 2:
raise ValidationError("Must provide at least 2 items")
@validates_schema
def validate_bar(self, data, **kwargs):
if "bar" in data and data["bar"] < 0:
raise ValidationError("bar must not be negative", "bar")
schema = MySchema()
errors = schema.validate({"foo": 3})
assert "_schema" in errors
assert errors["_schema"][0] == "Must be greater than 3"
errors = schema.validate([{"foo": 4}], many=True)
assert "_schema" in errors
assert len(errors["_schema"]) == 1
assert errors["_schema"][0] == "Must provide at least 2 items"
errors = schema.validate({"foo": 4, "bar": -1})
assert "bar" in errors
assert len(errors["bar"]) == 1
assert errors["bar"][0] == "bar must not be negative"
def test_multiple_validators(self):
class MySchema(Schema):
foo = fields.Int()
bar = fields.Int()
@validates_schema
def validate_schema(self, data, **kwargs):
if data["foo"] <= 3:
raise ValidationError("Must be greater than 3")
@validates_schema
def validate_bar(self, data, **kwargs):
if "bar" in data and data["bar"] < 0:
raise ValidationError("bar must not be negative")
schema = MySchema()
errors = schema.validate({"foo": 3, "bar": -1})
assert type(errors) is dict
assert "_schema" in errors
assert len(errors["_schema"]) == 2
assert "Must be greater than 3" in errors["_schema"]
assert "bar must not be negative" in errors["_schema"]
errors = schema.validate([{"foo": 3, "bar": -1}, {"foo": 3}], many=True)
assert type(errors) is dict
assert "_schema" in errors[0]
assert len(errors[0]["_schema"]) == 2
assert "Must be greater than 3" in errors[0]["_schema"]
assert "bar must not be negative" in errors[0]["_schema"]
assert len(errors[1]["_schema"]) == 1
assert "Must be greater than 3" in errors[0]["_schema"]
def test_multiple_validators_merge_dict_errors(self):
class NestedSchema(Schema):
foo = fields.Int()
bar = fields.Int()
class MySchema(Schema):
nested = fields.Nested(NestedSchema)
@validates_schema
def validate_nested_foo(self, data, **kwargs):
raise ValidationError({"nested": {"foo": ["Invalid foo"]}})
@validates_schema
def validate_nested_bar_1(self, data, **kwargs):
raise ValidationError({"nested": {"bar": ["Invalid bar 1"]}})
@validates_schema
def validate_nested_bar_2(self, data, **kwargs):
raise ValidationError({"nested": {"bar": ["Invalid bar 2"]}})
with pytest.raises(ValidationError) as excinfo:
MySchema().load({"nested": {"foo": 1, "bar": 2}})
assert excinfo.value.messages == {
"nested": {
"foo": ["Invalid foo"],
"bar": ["Invalid bar 1", "Invalid bar 2"],
}
}
def test_passing_original_data(self):
class MySchema(Schema):
foo = fields.Int()
bar = fields.Int()
@validates_schema(pass_original=True)
def validate_original(self, data, original_data, partial, **kwargs):
if isinstance(original_data, dict) and isinstance(
original_data["foo"], str
):
raise ValidationError("foo cannot be a string")
@validates_schema(pass_collection=True, pass_original=True)
def validate_original_bar(self, data, original_data, many, **kwargs):
def check(datum):
if isinstance(datum, dict) and isinstance(datum["bar"], str):
raise ValidationError("bar cannot be a string")
if many:
for each in original_data:
check(each)
else:
check(original_data)
schema = MySchema()
errors = schema.validate({"foo": "4", "bar": 12})
assert errors["_schema"] == ["foo cannot be a string"]
errors = schema.validate({"foo": 4, "bar": "42"})
assert errors["_schema"] == ["bar cannot be a string"]
errors = schema.validate([{"foo": 4, "bar": "42"}], many=True)
assert errors["_schema"] == ["bar cannot be a string"]
def test_allow_reporting_field_errors_in_schema_validator(self):
class NestedSchema(Schema):
baz = fields.Int(required=True)
class MySchema(Schema):
foo = fields.Int(required=True)
bar = fields.Nested(NestedSchema, required=True)
bam = fields.Int(required=True)
@validates_schema(skip_on_field_errors=True)
def consistency_validation(self, data, **kwargs):
errors: dict[str, str | dict] = {}
if data["bar"]["baz"] != data["foo"]:
errors["bar"] = {"baz": "Non-matching value"}
if data["bam"] > data["foo"]:
errors["bam"] = "Value should be less than foo"
if errors:
raise ValidationError(errors)
schema = MySchema()
errors = schema.validate({"foo": 2, "bar": {"baz": 5}, "bam": 6})
assert errors["bar"]["baz"] == "Non-matching value"
assert errors["bam"] == "Value should be less than foo"
# https://github.com/marshmallow-code/marshmallow/issues/273
def test_allow_arbitrary_field_names_in_error(self):
class MySchema(Schema):
@validates_schema
def validator(self, data, **kwargs):
raise ValidationError("Error message", "arbitrary_key")
errors = MySchema().validate({})
assert errors["arbitrary_key"] == ["Error message"]
def test_skip_on_field_errors(self):
class MySchema(Schema):
foo = fields.Int(required=True, validate=validate.Equal(3))
bar = fields.Int(required=True)
@validates_schema(skip_on_field_errors=True)
def validate_schema(self, data, **kwargs):
if data["foo"] != data["bar"]:
raise ValidationError("Foo and bar must be equal.")
@validates_schema(skip_on_field_errors=True, pass_collection=True)
def validate_many(self, data, many, **kwargs):
if many:
assert type(data) is list
if len(data) < 2:
raise ValidationError("Must provide at least 2 items")
schema = MySchema()
# check that schema errors still occur with no field errors
errors = schema.validate({"foo": 3, "bar": 4})
assert "_schema" in errors
assert errors["_schema"][0] == "Foo and bar must be equal."
errors = schema.validate([{"foo": 3, "bar": 3}], many=True)
assert "_schema" in errors
assert errors["_schema"][0] == "Must provide at least 2 items"
# check that schema errors don't occur when field errors do
errors = schema.validate({"foo": 3, "bar": "not an int"})
assert "bar" in errors
assert "_schema" not in errors
errors = schema.validate({"foo": 2, "bar": 2})
assert "foo" in errors
assert "_schema" not in errors
errors = schema.validate([{"foo": 3, "bar": "not an int"}], many=True)
assert "bar" in errors[0]
assert "_schema" not in errors
# https://github.com/marshmallow-code/marshmallow/issues/2170
def test_data_key_is_used_in_errors_dict(self):
class MySchema(Schema):
foo = fields.Int(data_key="fooKey")
@validates("foo")
def validate_foo(self, value, **kwargs):
raise ValidationError("from validates")
@validates_schema(skip_on_field_errors=False)
def validate_schema(self, data, **kwargs):
raise ValidationError("from validates_schema str", field_name="foo")
@validates_schema(skip_on_field_errors=False)
def validate_schema2(self, data, **kwargs):
raise ValidationError({"fooKey": "from validates_schema dict"})
with pytest.raises(ValidationError) as excinfo:
MySchema().load({"fooKey": 42})
exc = excinfo.value
assert exc.messages == {
"fooKey": [
"from validates",
"from validates_schema str",
"from validates_schema dict",
]
}
def test_decorator_error_handling():
class ExampleSchema(Schema):
foo = fields.Int()
bar = fields.Int()
@pre_load()
def pre_load_error1(self, item, **kwargs):
if item["foo"] != 0:
return item
errors = {"foo": ["preloadmsg1"], "bar": ["preloadmsg2", "preloadmsg3"]}
raise ValidationError(errors)
@pre_load()
def pre_load_error2(self, item, **kwargs):
if item["foo"] != 4:
return item
raise ValidationError("preloadmsg1", "foo")
@pre_load()
def pre_load_error3(self, item, **kwargs):
if item["foo"] != 8:
return item
raise ValidationError("preloadmsg1")
@post_load()
def post_load_error1(self, item, **kwargs):
if item["foo"] != 1:
return item
errors = {"foo": ["postloadmsg1"], "bar": ["postloadmsg2", "postloadmsg3"]}
raise ValidationError(errors)
@post_load()
def post_load_error2(self, item, **kwargs):
if item["foo"] != 5:
return item
raise ValidationError("postloadmsg1", "foo")
def make_item(foo, bar):
data = schema.load({"foo": foo, "bar": bar})
assert data is not None
return data
schema = ExampleSchema()
with pytest.raises(ValidationError) as excinfo:
schema.load({"foo": 0, "bar": 1})
errors = excinfo.value.messages
assert "foo" in errors
assert len(errors["foo"]) == 1
assert errors["foo"][0] == "preloadmsg1"
assert "bar" in errors
assert len(errors["bar"]) == 2
assert "preloadmsg2" in errors["bar"]
assert "preloadmsg3" in errors["bar"]
with pytest.raises(ValidationError) as excinfo:
schema.load({"foo": 1, "bar": 1})
errors = excinfo.value.messages
assert "foo" in errors
assert len(errors["foo"]) == 1
assert errors["foo"][0] == "postloadmsg1"
assert "bar" in errors
assert len(errors["bar"]) == 2
assert "postloadmsg2" in errors["bar"]
assert "postloadmsg3" in errors["bar"]
with pytest.raises(ValidationError) as excinfo:
schema.load({"foo": 4, "bar": 1})
errors = excinfo.value.messages
assert len(errors) == 1
assert "foo" in errors
assert len(errors["foo"]) == 1
assert errors["foo"][0] == "preloadmsg1"
with pytest.raises(ValidationError) as excinfo:
schema.load({"foo": 5, "bar": 1})
errors = excinfo.value.messages
assert len(errors) == 1
assert "foo" in errors
assert len(errors["foo"]) == 1
assert errors["foo"][0] == "postloadmsg1"
with pytest.raises(ValidationError) as excinfo:
schema.load({"foo": 8, "bar": 1})
errors = excinfo.value.messages
assert len(errors) == 1
assert "_schema" in errors
assert len(errors["_schema"]) == 1
assert errors["_schema"][0] == "preloadmsg1"
@pytest.mark.parametrize("decorator", [pre_load, post_load])
def test_decorator_error_handling_with_load(decorator):
class ExampleSchema(Schema):
@decorator
def raise_value_error(self, item, **kwargs):
raise ValidationError({"foo": "error"})
schema = ExampleSchema()
with pytest.raises(ValidationError) as exc:
schema.load({})
assert exc.value.messages == {"foo": "error"}
schema.dump(object())
@pytest.mark.parametrize("decorator", [pre_load, post_load])
def test_decorator_error_handling_with_load_dict_error(decorator):
class ExampleSchema(Schema):
@decorator
def raise_value_error(self, item, **kwargs):
raise ValidationError({"foo": "error"}, "nested_field")
schema = ExampleSchema()
with pytest.raises(ValidationError) as exc:
schema.load({})
assert exc.value.messages == {"nested_field": {"foo": "error"}}
schema.dump(object())
@pytest.mark.parametrize("decorator", [pre_dump, post_dump])
def test_decorator_error_handling_with_dump(decorator):
class ExampleSchema(Schema):
@decorator
def raise_value_error(self, item, **kwargs):
raise ValidationError({"foo": "error"})
schema = ExampleSchema()
with pytest.raises(ValidationError) as exc:
schema.dump(object())
assert exc.value.messages == {"foo": "error"}
schema.load({})
| TestValidatesSchemaDecorator |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/config_types.py | {
"start": 11082,
"end": 12428
} | class ____(graphene.ObjectType):
class Meta:
interfaces = (GrapheneConfigType,)
name = "EnumConfigType"
values = non_null_list(GrapheneEnumConfigValue)
given_name = graphene.NonNull(graphene.String)
def __init__(
self,
get_config_type: Callable[[str], ConfigTypeSnap],
config_type_snap: ConfigTypeSnap,
):
self._config_type_snap = check.inst_param(
config_type_snap, "config_type_snap", ConfigTypeSnap
)
self._get_config_type = get_config_type
super().__init__(**_ctor_kwargs_for_snap(config_type_snap))
def resolve_recursive_config_types(
self, graphene_info: ResolveInfo
) -> list[GrapheneConfigTypeUnion]:
return [
to_config_type(self._get_config_type, config_type_key)
for config_type_key in _recursive_config_type_keys(
self._get_config_type, self._config_type_snap
)
]
def resolve_values(self, _graphene_info: ResolveInfo) -> list[GrapheneEnumConfigValue]:
return [
GrapheneEnumConfigValue(value=ev.value, description=ev.description)
for ev in check.not_none(self._config_type_snap.enum_values)
]
def resolve_given_name(self, _):
return self._config_type_snap.given_name
| GrapheneEnumConfigType |
python | openai__openai-python | src/openai/types/realtime/realtime_response_usage_input_token_details.py | {
"start": 228,
"end": 613
} | class ____(BaseModel):
audio_tokens: Optional[int] = None
"""The number of cached audio tokens used as input for the Response."""
image_tokens: Optional[int] = None
"""The number of cached image tokens used as input for the Response."""
text_tokens: Optional[int] = None
"""The number of cached text tokens used as input for the Response."""
| CachedTokensDetails |
python | docker__docker-py | docker/errors.py | {
"start": 5169,
"end": 5340
} | class ____(DockerException):
def __init__(self, name):
self.name = name
def __str__(self):
return (f"context '{self.name}' not found")
| ContextNotFound |
python | wandb__wandb | wandb/vendor/pygments/lexers/templates.py | {
"start": 29829,
"end": 31829
} | class ____(RegexLexer):
"""
Base lexer for Genshi markup, used by `HtmlGenshiLexer` and
`GenshiLexer`.
"""
flags = re.DOTALL
tokens = {
'root': [
(r'[^<$]+', Other),
(r'(<\?python)(.*?)(\?>)',
bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
# yield style and script blocks as Other
(r'<\s*(script|style)\s*.*?>.*?<\s*/\1\s*>', Other),
(r'<\s*py:[a-zA-Z0-9]+', Name.Tag, 'pytag'),
(r'<\s*[a-zA-Z0-9:.]+', Name.Tag, 'tag'),
include('variable'),
(r'[<$]', Other),
],
'pytag': [
(r'\s+', Text),
(r'[\w:-]+\s*=', Name.Attribute, 'pyattr'),
(r'/?\s*>', Name.Tag, '#pop'),
],
'pyattr': [
('(")(.*?)(")', bygroups(String, using(PythonLexer), String), '#pop'),
("(')(.*?)(')", bygroups(String, using(PythonLexer), String), '#pop'),
(r'[^\s>]+', String, '#pop'),
],
'tag': [
(r'\s+', Text),
(r'py:[\w-]+\s*=', Name.Attribute, 'pyattr'),
(r'[\w:-]+\s*=', Name.Attribute, 'attr'),
(r'/?\s*>', Name.Tag, '#pop'),
],
'attr': [
('"', String, 'attr-dstring'),
("'", String, 'attr-sstring'),
(r'[^\s>]*', String, '#pop')
],
'attr-dstring': [
('"', String, '#pop'),
include('strings'),
("'", String)
],
'attr-sstring': [
("'", String, '#pop'),
include('strings'),
("'", String)
],
'strings': [
('[^"\'$]+', String),
include('variable')
],
'variable': [
(r'(?<!\$)(\$\{)(.+?)(\})',
bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
(r'(?<!\$)(\$)([a-zA-Z_][\w\.]*)',
Name.Variable),
]
}
| GenshiMarkupLexer |
python | cherrypy__cherrypy | cherrypy/test/test_encoding.py | {
"start": 412,
"end": 18947
} | class ____(helper.CPWebCase):
@staticmethod
def setup_server():
class Root:
@cherrypy.expose
def index(self, param):
assert param == europoundUnicode, '%r != %r' % (
param,
europoundUnicode,
)
yield europoundUnicode
@cherrypy.expose
def mao_zedong(self):
return sing
@cherrypy.expose
@cherrypy.config(**{'tools.encode.encoding': 'utf-8'})
def utf8(self):
return sing8
@cherrypy.expose
def cookies_and_headers(self):
# if the headers have non-ascii characters and a cookie has
# any part which is unicode (even ascii), the response
# should not fail.
cherrypy.response.cookie['candy'] = 'bar'
cherrypy.response.cookie['candy']['domain'] = 'cherrypy.dev'
cherrypy.response.headers['Some-Header'] = (
'My d\xc3\xb6g has fleas'
)
cherrypy.response.headers['Bytes-Header'] = (
b'Bytes given header'
)
return 'Any content'
@cherrypy.expose
def reqparams(self, *args, **kwargs):
return b', '.join(
[
': '.join((k, v)).encode('utf8')
for k, v in sorted(cherrypy.request.params.items())
],
)
@cherrypy.expose
@cherrypy.config(
**{
'tools.encode.text_only': False,
'tools.encode.add_charset': True,
},
)
def nontext(self, *args, **kwargs):
cherrypy.response.headers['Content-Type'] = (
'application/binary'
)
return '\x00\x01\x02\x03'
class GZIP:
@cherrypy.expose
def index(self):
yield 'Hello, world'
@cherrypy.expose
# Turn encoding off so the gzip tool is the one doing the collapse.
@cherrypy.config(**{'tools.encode.on': False})
def noshow(self):
# Test for ticket #147, where yield showed no exceptions
# (content-encoding was still gzip even though traceback
# wasn't zipped).
raise IndexError()
yield 'Here be dragons'
@cherrypy.expose
@cherrypy.config(**{'response.stream': True})
def noshow_stream(self):
# Test for ticket #147, where yield showed no exceptions
# (content-encoding was still gzip even though traceback
# wasn't zipped).
raise IndexError()
yield 'Here be dragons'
class Decode:
@cherrypy.expose
@cherrypy.config(
**{
'tools.decode.on': True,
'tools.decode.default_encoding': ['utf-16'],
},
)
def extra_charset(self, *args, **kwargs):
return ', '.join(
[
': '.join((k, v))
for k, v in cherrypy.request.params.items()
],
)
@cherrypy.expose
@cherrypy.config(
**{
'tools.decode.on': True,
'tools.decode.encoding': 'utf-16',
},
)
def force_charset(self, *args, **kwargs):
return ', '.join(
[
': '.join((k, v))
for k, v in cherrypy.request.params.items()
],
)
root = Root()
root.gzip = GZIP()
root.decode = Decode()
cherrypy.tree.mount(root, config={'/gzip': {'tools.gzip.on': True}})
def test_query_string_decoding(self):
URI_TMPL = '/reqparams?q={q}'
europoundUtf8_2_bytes = europoundUnicode.encode('utf-8')
europoundUtf8_2nd_byte = europoundUtf8_2_bytes[1:2]
# Encoded utf8 query strings MUST be parsed correctly.
# Here, q is the POUND SIGN U+00A3 encoded in utf8 and then %HEX
self.getPage(URI_TMPL.format(q=url_quote(europoundUtf8_2_bytes)))
# The return value will be encoded as utf8.
self.assertBody(b'q: ' + europoundUtf8_2_bytes)
# Query strings that are incorrectly encoded MUST raise 404.
# Here, q is the second byte of POUND SIGN U+A3 encoded in utf8
# and then %HEX
# TODO: check whether this shouldn't raise 400 Bad Request instead
self.getPage(URI_TMPL.format(q=url_quote(europoundUtf8_2nd_byte)))
self.assertStatus(404)
self.assertErrorPage(
404,
'The given query string could not be processed. Query '
"strings for this resource must be encoded with 'utf8'.",
)
def test_urlencoded_decoding(self):
# Test the decoding of an application/x-www-form-urlencoded entity.
europoundUtf8 = europoundUnicode.encode('utf-8')
body = b'param=' + europoundUtf8
(
self.getPage(
'/',
method='POST',
headers=[
('Content-Type', 'application/x-www-form-urlencoded'),
('Content-Length', str(len(body))),
],
body=body,
),
)
self.assertBody(europoundUtf8)
# Encoded utf8 entities MUST be parsed and decoded correctly.
# Here, q is the POUND SIGN U+00A3 encoded in utf8
body = b'q=\xc2\xa3'
(
self.getPage(
'/reqparams',
method='POST',
headers=[
('Content-Type', 'application/x-www-form-urlencoded'),
('Content-Length', str(len(body))),
],
body=body,
),
)
self.assertBody(b'q: \xc2\xa3')
# ...and in utf16, which is not in the default attempt_charsets list:
body = b'\xff\xfeq\x00=\xff\xfe\xa3\x00'
(
self.getPage(
'/reqparams',
method='POST',
headers=[
(
'Content-Type',
'application/x-www-form-urlencoded;charset=utf-16',
),
('Content-Length', str(len(body))),
],
body=body,
),
)
self.assertBody(b'q: \xc2\xa3')
# Entities that are incorrectly encoded MUST raise 400.
# Here, q is the POUND SIGN U+00A3 encoded in utf16, but
# the Content-Type incorrectly labels it utf-8.
body = b'\xff\xfeq\x00=\xff\xfe\xa3\x00'
(
self.getPage(
'/reqparams',
method='POST',
headers=[
(
'Content-Type',
'application/x-www-form-urlencoded;charset=utf-8',
),
('Content-Length', str(len(body))),
],
body=body,
),
)
self.assertStatus(400)
self.assertErrorPage(
400,
'The request entity could not be decoded. The following charsets '
"were attempted: ['utf-8']",
)
def test_decode_tool(self):
# An extra charset should be tried first, and succeed if it matches.
# Here, we add utf-16 as a charset and pass a utf-16 body.
body = b'\xff\xfeq\x00=\xff\xfe\xa3\x00'
(
self.getPage(
'/decode/extra_charset',
method='POST',
headers=[
('Content-Type', 'application/x-www-form-urlencoded'),
('Content-Length', str(len(body))),
],
body=body,
),
)
self.assertBody(b'q: \xc2\xa3')
# An extra charset should be tried first, and continue to other default
# charsets if it doesn't match.
# Here, we add utf-16 as a charset but still pass a utf-8 body.
body = b'q=\xc2\xa3'
(
self.getPage(
'/decode/extra_charset',
method='POST',
headers=[
('Content-Type', 'application/x-www-form-urlencoded'),
('Content-Length', str(len(body))),
],
body=body,
),
)
self.assertBody(b'q: \xc2\xa3')
# An extra charset should error if force is True and it doesn't match.
# Here, we force utf-16 as a charset but still pass a utf-8 body.
body = b'q=\xc2\xa3'
(
self.getPage(
'/decode/force_charset',
method='POST',
headers=[
('Content-Type', 'application/x-www-form-urlencoded'),
('Content-Length', str(len(body))),
],
body=body,
),
)
self.assertErrorPage(
400,
'The request entity could not be decoded. The following charsets '
"were attempted: ['utf-16']",
)
def test_multipart_decoding(self):
# Test the decoding of a multipart entity when the charset (utf16) is
# explicitly given.
body = ntob(
'\r\n'.join(
[
'--X',
'Content-Type: text/plain;charset=utf-16',
'Content-Disposition: form-data; name="text"',
'',
'\xff\xfea\x00b\x00\x1c c\x00',
'--X',
'Content-Type: text/plain;charset=utf-16',
'Content-Disposition: form-data; name="submit"',
'',
'\xff\xfeC\x00r\x00e\x00a\x00t\x00e\x00',
'--X--',
],
),
)
(
self.getPage(
'/reqparams',
method='POST',
headers=[
('Content-Type', 'multipart/form-data;boundary=X'),
('Content-Length', str(len(body))),
],
body=body,
),
)
self.assertBody(b'submit: Create, text: ab\xe2\x80\x9cc')
@mock.patch('cherrypy._cpreqbody.Part.maxrambytes', 1)
def test_multipart_decoding_bigger_maxrambytes(self):
"""
Decoding of a multipart entity should also pass when
the entity is bigger than maxrambytes. See ticket #1352.
"""
self.test_multipart_decoding()
def test_multipart_decoding_no_charset(self):
# Test the decoding of a multipart entity when the charset (utf8) is
# NOT explicitly given, but is in the list of charsets to attempt.
body = ntob(
'\r\n'.join(
[
'--X',
'Content-Disposition: form-data; name="text"',
'',
'\xe2\x80\x9c',
'--X',
'Content-Disposition: form-data; name="submit"',
'',
'Create',
'--X--',
],
),
)
(
self.getPage(
'/reqparams',
method='POST',
headers=[
('Content-Type', 'multipart/form-data;boundary=X'),
('Content-Length', str(len(body))),
],
body=body,
),
)
self.assertBody(b'submit: Create, text: \xe2\x80\x9c')
def test_multipart_decoding_no_successful_charset(self):
# Test the decoding of a multipart entity when the charset (utf16) is
# NOT explicitly given, and is NOT in the list of charsets to attempt.
body = ntob(
'\r\n'.join(
[
'--X',
'Content-Disposition: form-data; name="text"',
'',
'\xff\xfea\x00b\x00\x1c c\x00',
'--X',
'Content-Disposition: form-data; name="submit"',
'',
'\xff\xfeC\x00r\x00e\x00a\x00t\x00e\x00',
'--X--',
],
),
)
(
self.getPage(
'/reqparams',
method='POST',
headers=[
('Content-Type', 'multipart/form-data;boundary=X'),
('Content-Length', str(len(body))),
],
body=body,
),
)
self.assertStatus(400)
self.assertErrorPage(
400,
'The request entity could not be decoded. The following charsets '
"were attempted: ['us-ascii', 'utf-8']",
)
def test_nontext(self):
self.getPage('/nontext')
self.assertHeader('Content-Type', 'application/binary;charset=utf-8')
self.assertBody('\x00\x01\x02\x03')
def testEncoding(self):
# Default encoding should be utf-8
self.getPage('/mao_zedong')
self.assertBody(sing8)
# Ask for utf-16.
self.getPage('/mao_zedong', [('Accept-Charset', 'utf-16')])
self.assertHeader('Content-Type', 'text/html;charset=utf-16')
self.assertBody(sing16)
# Ask for multiple encodings. ISO-8859-1 should fail, and utf-16
# should be produced.
self.getPage(
'/mao_zedong',
[('Accept-Charset', 'iso-8859-1;q=1, utf-16;q=0.5')],
)
self.assertBody(sing16)
# The "*" value should default to our default_encoding, utf-8
self.getPage('/mao_zedong', [('Accept-Charset', '*;q=1, utf-7;q=.2')])
self.assertBody(sing8)
# Only allow iso-8859-1, which should fail and raise 406.
self.getPage('/mao_zedong', [('Accept-Charset', 'iso-8859-1, *;q=0')])
self.assertStatus('406 Not Acceptable')
self.assertInBody(
'Your client sent this Accept-Charset header: '
'iso-8859-1, *;q=0. We tried these charsets: '
'iso-8859-1.',
)
# Ask for x-mac-ce, which should be unknown. See ticket #569.
self.getPage(
'/mao_zedong',
[('Accept-Charset', 'us-ascii, ISO-8859-1, x-mac-ce')],
)
self.assertStatus('406 Not Acceptable')
self.assertInBody(
'Your client sent this Accept-Charset header: '
'us-ascii, ISO-8859-1, x-mac-ce. We tried these '
'charsets: ISO-8859-1, us-ascii, x-mac-ce.',
)
# Test the 'encoding' arg to encode.
self.getPage('/utf8')
self.assertBody(sing8)
self.getPage('/utf8', [('Accept-Charset', 'us-ascii, ISO-8859-1')])
self.assertStatus('406 Not Acceptable')
# Test malformed quality value, which should raise 400.
self.getPage(
'/mao_zedong',
[('Accept-Charset', 'ISO-8859-1,utf-8;q=0.7,*;q=0.7)')],
)
self.assertStatus('400 Bad Request')
def testGzip(self):
zbuf = io.BytesIO()
zfile = gzip.GzipFile(mode='wb', fileobj=zbuf, compresslevel=9)
zfile.write(b'Hello, world')
zfile.close()
self.getPage('/gzip/', headers=[('Accept-Encoding', 'gzip')])
self.assertInBody(zbuf.getvalue()[:3])
self.assertHeader('Vary', 'Accept-Encoding')
self.assertHeader('Content-Encoding', 'gzip')
# Test when gzip is denied.
self.getPage('/gzip/', headers=[('Accept-Encoding', 'identity')])
self.assertHeader('Vary', 'Accept-Encoding')
self.assertNoHeader('Content-Encoding')
self.assertBody('Hello, world')
self.getPage('/gzip/', headers=[('Accept-Encoding', 'gzip;q=0')])
self.assertHeader('Vary', 'Accept-Encoding')
self.assertNoHeader('Content-Encoding')
self.assertBody('Hello, world')
# Test that trailing comma doesn't cause IndexError
# Ref: https://github.com/cherrypy/cherrypy/issues/988
self.getPage('/gzip/', headers=[('Accept-Encoding', 'gzip,deflate,')])
self.assertStatus(200)
self.assertNotInBody('IndexError')
self.getPage('/gzip/', headers=[('Accept-Encoding', '*;q=0')])
self.assertStatus(406)
self.assertNoHeader('Content-Encoding')
self.assertErrorPage(406, 'identity, gzip')
# Test for ticket #147
self.getPage('/gzip/noshow', headers=[('Accept-Encoding', 'gzip')])
self.assertNoHeader('Content-Encoding')
self.assertStatus(500)
self.assertErrorPage(500, pattern='IndexError\n')
# In this case, there's nothing we can do to deliver a
# readable page, since 1) the gzip header is already set,
# and 2) we may have already written some of the body.
# The fix is to never stream yields when using gzip.
if cherrypy.server.protocol_version == 'HTTP/1.0' or getattr(
cherrypy.server,
'using_apache',
False,
):
self.getPage(
'/gzip/noshow_stream',
headers=[('Accept-Encoding', 'gzip')],
)
self.assertHeader('Content-Encoding', 'gzip')
self.assertInBody('\x1f\x8b\x08\x00')
else:
# The wsgiserver will simply stop sending data, and the HTTP client
# will error due to an incomplete chunk-encoded stream.
self.assertRaises(
(ValueError, IncompleteRead),
self.getPage,
'/gzip/noshow_stream',
headers=[('Accept-Encoding', 'gzip')],
)
def test_UnicodeHeaders(self):
self.getPage('/cookies_and_headers')
self.assertBody('Any content')
def test_BytesHeaders(self):
self.getPage('/cookies_and_headers')
self.assertBody('Any content')
self.assertHeader('Bytes-Header', 'Bytes given header')
| EncodingTests |
python | django__django | tests/admin_widgets/models.py | {
"start": 5045,
"end": 5350
} | class ____(models.Model):
name = models.CharField(max_length=255)
if Image:
photo = models.ImageField(
storage=temp_storage, upload_to="photos", blank=True, null=True
)
class Meta:
ordering = ("name",)
def __str__(self):
return self.name
| Student |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 347598,
"end": 348728
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = (
"encoded_name",
"encoding",
"extension",
"is_image",
"is_truncated",
"language",
"name",
"size",
"text",
)
encoded_name = sgqlc.types.Field(String, graphql_name="encodedName")
encoding = sgqlc.types.Field(String, graphql_name="encoding")
extension = sgqlc.types.Field(String, graphql_name="extension")
is_image = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="isImage")
is_truncated = sgqlc.types.Field(
sgqlc.types.non_null(Boolean), graphql_name="isTruncated"
)
language = sgqlc.types.Field("Language", graphql_name="language")
name = sgqlc.types.Field(String, graphql_name="name")
size = sgqlc.types.Field(Int, graphql_name="size")
text = sgqlc.types.Field(
String,
graphql_name="text",
args=sgqlc.types.ArgDict(
(("truncate", sgqlc.types.Arg(Int, graphql_name="truncate", default=None)),)
),
)
| GistFile |
python | facebook__pyre-check | tools/upgrade/commands/codemods.py | {
"start": 2929,
"end": 4920
} | class ____(Command):
def __init__(self, *, repository: Repository, only_fix_error_code: int) -> None:
super().__init__(repository)
self._only_fix_error_code: int = only_fix_error_code
@staticmethod
def from_arguments(
arguments: argparse.Namespace, repository: Repository
) -> "MissingGlobalAnnotations":
return MissingGlobalAnnotations(
repository=repository, only_fix_error_code=arguments.only_fix_error_code
)
@classmethod
def add_arguments(cls, parser: argparse.ArgumentParser) -> None:
super(MissingGlobalAnnotations, cls).add_arguments(parser)
parser.set_defaults(command=cls.from_arguments)
parser.add_argument(
"--only-fix-error-code",
type=int,
help="Only add fixmes for errors with this specific error code.",
default=None,
)
@override
def run(self) -> None:
errors = Errors.from_stdin(self._only_fix_error_code)
for path, errors_for_path in errors.paths_to_errors.items():
LOG.info("Patching errors in `%s`", path)
errors_for_path = sorted(
errors_for_path, key=lambda error: error["line"], reverse=True
)
path = pathlib.Path(path)
lines = path.read_text().split("\n")
for error in errors_for_path:
if error["code"] != 5:
continue
line = error["line"] - 1
match = re.match(r".*`.*`.*`(.*)`.*", error["description"])
if not match:
continue
annotation = match.groups()[0]
LOG.info("Looking at %d: %s", line, lines[line])
if " =" in lines[line]:
lines[line] = lines[line].replace(" =", ": %s =" % annotation)
LOG.info("%d: %s", line, lines[line])
path.write_text("\n".join(lines))
| MissingGlobalAnnotations |
python | kamyu104__LeetCode-Solutions | Python/kth-missing-positive-number.py | {
"start": 32,
"end": 567
} | class ____(object):
def findKthPositive(self, arr, k):
"""
:type arr: List[int]
:type k: int
:rtype: int
"""
def check(arr, k, x):
return arr[x]-(x+1) < k
left, right = 0, len(arr)-1
while left <= right:
mid = left + (right-left)//2
if not check(arr, k, mid):
right = mid-1
else:
left = mid+1
return right+1+k # arr[right] + (k-(arr[right]-(right+1))) if right >= 0 else k
| Solution |
python | streamlit__streamlit | lib/tests/streamlit/elements/file_uploader_test.py | {
"start": 1276,
"end": 15697
} | class ____(DeltaGeneratorTestCase):
def test_just_label(self):
"""Test that it can be called with no other values."""
st.file_uploader("the label")
c = self.get_delta_from_queue().new_element.file_uploader
assert c.label == "the label"
assert (
c.label_visibility.value
== LabelVisibilityMessage.LabelVisibilityOptions.VISIBLE
)
assert not c.disabled
def test_just_disabled(self):
"""Test that it can be called with disabled param."""
st.file_uploader("the label", disabled=True)
c = self.get_delta_from_queue().new_element.file_uploader
assert c.disabled
def test_single_type(self):
"""Test that it can be called using a string for type parameter."""
st.file_uploader("the label", type="png")
c = self.get_delta_from_queue().new_element.file_uploader
assert c.type == [".png"]
def test_multiple_types(self):
"""Test that it can be called using an array for type parameter."""
st.file_uploader("the label", type=["png", ".svg", "foo"])
c = self.get_delta_from_queue().new_element.file_uploader
assert c.type == [".png", ".svg", ".foo"]
def test_jpg_expansion(self):
"""Test that it adds jpg when passing in just jpeg (and vice versa)."""
st.file_uploader("the label", type=["png", ".jpg"])
c = self.get_delta_from_queue().new_element.file_uploader
assert c.type == [".png", ".jpg", ".jpeg"]
st.file_uploader("the label", type=["png", ".jpeg"])
c = self.get_delta_from_queue().new_element.file_uploader
assert c.type == [".png", ".jpeg", ".jpg"]
def test_uppercase_expansion(self):
"""Test that it can expand jpg to jpeg even when uppercase."""
st.file_uploader("the label", type=["png", ".JpG"])
c = self.get_delta_from_queue().new_element.file_uploader
assert c.type == [".png", ".jpg", ".jpeg"]
@patch("streamlit.elements.widgets.file_uploader._get_upload_files")
def test_not_allowed_file_extension_raise_an_exception(
self, get_upload_files_patch
):
rec1 = UploadedFileRec("file1", "file1.pdf", "type", b"123")
uploaded_files = [
UploadedFile(
rec1, FileURLsProto(file_id="file1", delete_url="d1", upload_url="u1")
),
]
get_upload_files_patch.return_value = uploaded_files
with pytest.raises(StreamlitAPIException) as e:
return_val = st.file_uploader(
"label",
type="png",
)
st.write(return_val)
assert str(e.value) == "Invalid file extension: `.pdf`. Allowed: ['.png']"
@patch("streamlit.elements.widgets.file_uploader._get_upload_files")
def test_multiple_files(self, get_upload_files_patch):
"""Test the accept_multiple_files flag"""
# Patch UploadFileManager to return two files
rec1 = UploadedFileRec("file1", "file1.png", "type", b"123")
rec2 = UploadedFileRec("file2", "file2.png", "type", b"456")
uploaded_files = [
UploadedFile(
rec1, FileURLsProto(file_id="file1", delete_url="d1", upload_url="u1")
),
UploadedFile(
rec2, FileURLsProto(file_id="file2", delete_url="d1", upload_url="u1")
),
]
get_upload_files_patch.return_value = uploaded_files
for accept_multiple in [True, False]:
return_val = st.file_uploader(
"label", type="png", accept_multiple_files=accept_multiple
)
c = self.get_delta_from_queue().new_element.file_uploader
assert accept_multiple == c.multiple_files
# If "accept_multiple_files" is True, then we should get a list of
# values back. Otherwise, we should just get a single value.
if accept_multiple:
assert return_val == uploaded_files
for actual, expected in zip(return_val, uploaded_files, strict=False):
assert actual.name == expected.name
assert actual.type == expected.type
assert actual.size == expected.size
assert actual.getvalue() == expected.getvalue()
else:
first_uploaded_file = uploaded_files[0]
assert return_val == first_uploaded_file
assert return_val.name == first_uploaded_file.name
assert return_val.type == first_uploaded_file.type
assert return_val.size == first_uploaded_file.size
assert return_val.getvalue() == first_uploaded_file.getvalue()
def test_max_upload_size_mb(self):
"""Test that the max upload size is the configuration value."""
st.file_uploader("the label")
c = self.get_delta_from_queue().new_element.file_uploader
assert c.max_upload_size_mb == config.get_option("server.maxUploadSize")
@patch("streamlit.elements.widgets.file_uploader._get_upload_files")
def test_unique_uploaded_file_instance(self, get_upload_files_patch):
"""We should get a unique UploadedFile instance each time we access
the file_uploader widget."""
# Patch UploadFileManager to return two files
rec1 = UploadedFileRec("file1", "file1", "type", b"123")
rec2 = UploadedFileRec("file2", "file2", "type", b"456")
uploaded_files = [
UploadedFile(
rec1, FileURLsProto(file_id="file1", delete_url="d1", upload_url="u1")
),
UploadedFile(
rec2, FileURLsProto(file_id="file2", delete_url="d1", upload_url="u1")
),
]
get_upload_files_patch.return_value = uploaded_files
# These file_uploaders have different labels so that we don't cause
# a DuplicateKey error - but because we're patching the get_files
# function, both file_uploaders will refer to the same files.
file1: UploadedFile = st.file_uploader("a", accept_multiple_files=False)
file2: UploadedFile = st.file_uploader("b", accept_multiple_files=False)
assert id(file1) != id(file2)
# Seeking in one instance should not impact the position in the other.
file1.seek(2)
assert file1.read() == b"3"
assert file2.read() == b"123"
@patch("streamlit.elements.widgets.file_uploader._get_upload_files")
def test_deleted_files_filtered_out(self, get_upload_files_patch):
"""We should filter out DeletedFile objects for final user value."""
rec1 = UploadedFileRec("file1", "file1", "type", b"1234")
rec2 = UploadedFileRec("file2", "file2", "type", b"5678")
uploaded_files = [
DeletedFile(file_id="a"),
UploadedFile(
rec1, FileURLsProto(file_id="file1", delete_url="d1", upload_url="u1")
),
DeletedFile(file_id="b"),
UploadedFile(
rec2, FileURLsProto(file_id="file2", delete_url="d1", upload_url="u1")
),
DeletedFile(file_id="c"),
]
get_upload_files_patch.return_value = uploaded_files
result_1: UploadedFile = st.file_uploader("a", accept_multiple_files=False)
result_2: UploadedFile = st.file_uploader("b", accept_multiple_files=True)
assert result_1 is None
assert result_2 == [uploaded_files[1], uploaded_files[3]]
@parameterized.expand(
[
("visible", LabelVisibilityMessage.LabelVisibilityOptions.VISIBLE),
("hidden", LabelVisibilityMessage.LabelVisibilityOptions.HIDDEN),
("collapsed", LabelVisibilityMessage.LabelVisibilityOptions.COLLAPSED),
]
)
def test_label_visibility(self, label_visibility_value, proto_value):
"""Test that it can be called with label_visibility parameter."""
st.file_uploader("the label", label_visibility=label_visibility_value)
c = self.get_delta_from_queue().new_element.file_uploader
assert c.label_visibility.value == proto_value
def test_label_visibility_wrong_value(self):
with pytest.raises(StreamlitAPIException) as e:
st.file_uploader("the label", label_visibility="wrong_value")
assert (
str(e.value)
== "Unsupported label_visibility option 'wrong_value'. Valid values are 'visible', 'hidden' or 'collapsed'."
)
def test_shows_cached_widget_replay_warning(self):
"""Test that a warning is shown when this widget is used inside a cached function."""
st.cache_data(lambda: st.file_uploader("the label"))()
# The widget itself is still created, so we need to go back one element more:
el = self.get_delta_from_queue(-2).new_element.exception
assert el.type == "CachedWidgetWarning"
assert el.is_warning
@patch("streamlit.elements.widgets.file_uploader._get_upload_files")
def test_directory_upload(self, get_upload_files_patch):
"""Test directory upload functionality"""
# Mock directory upload with multiple files
rec1 = UploadedFileRec(
"file1", "project/main.py", "text/plain", b"print('hello')"
)
rec2 = UploadedFileRec(
"file2", "project/utils.py", "text/plain", b"def helper(): pass"
)
rec3 = UploadedFileRec(
"file3", "project/tests/test_main.py", "text/plain", b"def test(): pass"
)
uploaded_files = [
UploadedFile(
rec1, FileURLsProto(file_id="file1", delete_url="d1", upload_url="u1")
),
UploadedFile(
rec2, FileURLsProto(file_id="file2", delete_url="d2", upload_url="u2")
),
UploadedFile(
rec3, FileURLsProto(file_id="file3", delete_url="d3", upload_url="u3")
),
]
get_upload_files_patch.return_value = uploaded_files
# Test directory upload
return_val = st.file_uploader(
"Upload directory", type=[".py"], accept_multiple_files="directory"
)
c = self.get_delta_from_queue().new_element.file_uploader
assert c.multiple_files is True
assert c.accept_directory is True
# Directory uploads always return a list
assert return_val == uploaded_files
assert len(return_val) == 3
for actual, expected in zip(return_val, uploaded_files, strict=False):
assert actual.name == expected.name
assert actual.type == expected.type
assert actual.size == expected.size
assert actual.getvalue() == expected.getvalue()
@patch("streamlit.elements.widgets.file_uploader._get_upload_files")
def test_directory_upload_with_file_filtering(self, get_upload_files_patch):
"""Test that directory upload respects file type restrictions"""
# Mock mixed file types in directory - only .txt should be included
rec1 = UploadedFileRec(
"file1", "docs/readme.txt", "text/plain", b"readme content"
)
rec2 = UploadedFileRec(
"file2", "docs/notes.txt", "text/plain", b"notes content"
)
# These would be filtered out by file type restrictions
uploaded_files = [
UploadedFile(
rec1, FileURLsProto(file_id="file1", delete_url="d1", upload_url="u1")
),
UploadedFile(
rec2, FileURLsProto(file_id="file2", delete_url="d2", upload_url="u2")
),
]
get_upload_files_patch.return_value = uploaded_files
return_val = st.file_uploader(
"Upload text files only", type=["txt"], accept_multiple_files="directory"
)
c = self.get_delta_from_queue().new_element.file_uploader
assert c.multiple_files is True
assert c.accept_directory is True
assert c.type == [".txt"]
# Should only return .txt files
assert len(return_val) == 2
for file in return_val:
assert file.name.endswith(".txt")
@patch("streamlit.elements.widgets.file_uploader._get_upload_files")
def test_directory_upload_empty(self, get_upload_files_patch):
"""Test directory upload with no files"""
get_upload_files_patch.return_value = []
return_val = st.file_uploader(
"Upload empty directory", accept_multiple_files="directory"
)
c = self.get_delta_from_queue().new_element.file_uploader
assert c.multiple_files is True
assert c.accept_directory is True
# Empty directory should return empty list
assert return_val == []
def test_directory_upload_proto_values(self):
"""Test that directory upload sets correct proto values"""
st.file_uploader("Directory uploader", accept_multiple_files="directory")
c = self.get_delta_from_queue().new_element.file_uploader
assert c.multiple_files is True
assert c.accept_directory is True
def test_directory_upload_with_width(self):
"""Test directory upload with width parameter"""
st.file_uploader(
"Directory with width", accept_multiple_files="directory", width=300
)
c = self.get_delta_from_queue().new_element.file_uploader
assert c.multiple_files is True
assert c.accept_directory is True
def test_directory_upload_disabled(self):
"""Test disabled directory upload"""
st.file_uploader(
"Disabled directory", accept_multiple_files="directory", disabled=True
)
c = self.get_delta_from_queue().new_element.file_uploader
assert c.multiple_files is True
assert c.accept_directory is True
assert c.disabled is True
def test_directory_upload_with_help(self):
"""Test directory upload with help text"""
help_text = "Upload a directory containing your project files"
st.file_uploader(
"Project directory", accept_multiple_files="directory", help=help_text
)
c = self.get_delta_from_queue().new_element.file_uploader
assert c.multiple_files is True
assert c.accept_directory is True
assert c.help == help_text
| FileUploaderTest |
python | pytorch__pytorch | torch/distributed/fsdp/_fully_shard/_fsdp_param_group.py | {
"start": 39082,
"end": 40430
} | class ____(torch.autograd.Function):
@staticmethod
def _assert_not_tracing_fsdp():
if compiled_autograd_enabled():
# TODO: Find a way to print the offending FSDP2 module.
msg = """\
When Traceable FSDP2 is enabled, we should not be calling into `RegisterPostBackwardFunction`.
Instead, we rely on the param group's next `pre_backward` hook to trigger its previously unexecuted
`post_backward`, and we rely on FSDPState's `root_post_backward_callback` to trigger the resharding
of any leftover unsharded param groups.
If you are here, it means the forward part of this FSDP2 instance is not compiled, and you must also
compile the forward part if you want to use Traceable FSDP2."""
torch._dynamo.comptime.comptime.print(msg)
raise RuntimeError(msg)
@staticmethod
# pyrefly: ignore [bad-override]
def forward(ctx, param_group: FSDPParamGroup, *inputs: torch.Tensor):
# All tensors in `inputs` should require gradient
RegisterPostBackwardFunction._assert_not_tracing_fsdp()
ctx.param_group = param_group
return inputs
@staticmethod
def backward(ctx, *grads: torch.Tensor):
RegisterPostBackwardFunction._assert_not_tracing_fsdp()
ctx.param_group.post_backward()
return (None,) + grads
| RegisterPostBackwardFunction |
python | pytorch__pytorch | torch/testing/_internal/common_pruning.py | {
"start": 5449,
"end": 6147
} | class ____(nn.Module):
r"""Model with only Conv2d layers, some with bias, some in a Sequential and some outside.
Used to test pruned Conv2d-Bias-Conv2d fusion."""
def __init__(self) -> None:
super().__init__()
self.seq = nn.Sequential(
nn.Conv2d(1, 32, 3, 1, bias=True),
nn.Conv2d(32, 32, 3, 1, bias=True),
nn.Conv2d(32, 64, 3, 1, bias=False),
)
self.conv2d1 = nn.Conv2d(64, 48, 3, 1, bias=True)
self.conv2d2 = nn.Conv2d(48, 52, 3, 1, bias=False)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.seq(x)
x = self.conv2d1(x)
x = self.conv2d2(x)
return x
| Conv2dBias |
python | google__pytype | pytype/metrics.py | {
"start": 5624,
"end": 6077
} | class ____(Metric):
"""A counter that measures the time spent in a "with" statement."""
def __enter__(self):
self._start_time = get_cpu_clock()
def __exit__(self, exc_type, exc_value, traceback):
self._total = get_cpu_clock() - self._start_time
del self._start_time
def _summary(self):
return f"{self._total:f} seconds"
def _merge(self, other):
# pylint: disable=protected-access
self._total += other._total
| StopWatch |
python | numpy__numpy | numpy/distutils/command/install_data.py | {
"start": 269,
"end": 848
} | class ____ (old_install_data):
def run(self):
old_install_data.run(self)
if have_setuptools:
# Run install_clib again, since setuptools does not run sub-commands
# of install automatically
self.run_command('install_clib')
def finalize_options (self):
self.set_undefined_options('install',
('install_lib', 'install_dir'),
('root', 'root'),
('force', 'force'),
)
| install_data |
python | kamyu104__LeetCode-Solutions | Python/can-place-flowers.py | {
"start": 29,
"end": 508
} | class ____(object):
def canPlaceFlowers(self, flowerbed, n):
"""
:type flowerbed: List[int]
:type n: int
:rtype: bool
"""
for i in xrange(len(flowerbed)):
if flowerbed[i] == 0 and (i == 0 or flowerbed[i-1] == 0) and \
(i == len(flowerbed)-1 or flowerbed[i+1] == 0):
flowerbed[i] = 1
n -= 1
if n <= 0:
return True
return False
| Solution |
python | ray-project__ray | python/ray/data/_internal/datasource/clickhouse_datasink.py | {
"start": 2788,
"end": 3415
} | class ____(IntEnum):
"""
Enum of possible modes for sinking data
Attributes:
CREATE: Create a new table; fail if that table already exists.
APPEND: Use an existing table if present, otherwise create one; then append data.
OVERWRITE: Drop the table if it already exists, then re-create it and write.
"""
# Create a new table and fail if that table already exists.
CREATE = 1
# Append data to an existing table, or create one if it does not exist.
APPEND = 2
# Drop the table if it already exists, then re-create it and write.
OVERWRITE = 3
@DeveloperAPI
| SinkMode |
python | tiangolo__fastapi | scripts/notify_translations.py | {
"start": 3616,
"end": 12994
} | class ____(BaseModel):
pull_request: PartialGitHubEventIssue | None = None
def get_graphql_response(
*,
settings: Settings,
query: str,
after: Union[str, None] = None,
category_id: Union[str, None] = None,
discussion_number: Union[int, None] = None,
discussion_id: Union[str, None] = None,
comment_id: Union[str, None] = None,
body: Union[str, None] = None,
) -> Dict[str, Any]:
headers = {"Authorization": f"token {settings.github_token.get_secret_value()}"}
variables = {
"after": after,
"category_id": category_id,
"discussion_number": discussion_number,
"discussion_id": discussion_id,
"comment_id": comment_id,
"body": body,
}
response = httpx.post(
github_graphql_url,
headers=headers,
timeout=settings.httpx_timeout,
json={"query": query, "variables": variables, "operationName": "Q"},
)
if response.status_code != 200:
logging.error(
f"Response was not 200, after: {after}, category_id: {category_id}"
)
logging.error(response.text)
raise RuntimeError(response.text)
data = response.json()
if "errors" in data:
logging.error(f"Errors in response, after: {after}, category_id: {category_id}")
logging.error(data["errors"])
logging.error(response.text)
raise RuntimeError(response.text)
return cast(Dict[str, Any], data)
def get_graphql_translation_discussions(
*, settings: Settings
) -> List[AllDiscussionsDiscussionNode]:
data = get_graphql_response(
settings=settings,
query=all_discussions_query,
category_id=questions_translations_category_id,
)
graphql_response = AllDiscussionsResponse.model_validate(data)
return graphql_response.data.repository.discussions.nodes
def get_graphql_translation_discussion_comments_edges(
*, settings: Settings, discussion_number: int, after: Union[str, None] = None
) -> List[CommentsEdge]:
data = get_graphql_response(
settings=settings,
query=translation_discussion_query,
discussion_number=discussion_number,
after=after,
)
graphql_response = CommentsResponse.model_validate(data)
return graphql_response.data.repository.discussion.comments.edges
def get_graphql_translation_discussion_comments(
*, settings: Settings, discussion_number: int
) -> list[Comment]:
comment_nodes: List[Comment] = []
discussion_edges = get_graphql_translation_discussion_comments_edges(
settings=settings, discussion_number=discussion_number
)
while discussion_edges:
for discussion_edge in discussion_edges:
comment_nodes.append(discussion_edge.node)
last_edge = discussion_edges[-1]
discussion_edges = get_graphql_translation_discussion_comments_edges(
settings=settings,
discussion_number=discussion_number,
after=last_edge.cursor,
)
return comment_nodes
def create_comment(*, settings: Settings, discussion_id: str, body: str) -> Comment:
data = get_graphql_response(
settings=settings,
query=add_comment_mutation,
discussion_id=discussion_id,
body=body,
)
response = AddCommentResponse.model_validate(data)
return response.data.addDiscussionComment.comment
def update_comment(*, settings: Settings, comment_id: str, body: str) -> Comment:
data = get_graphql_response(
settings=settings,
query=update_comment_mutation,
comment_id=comment_id,
body=body,
)
response = UpdateCommentResponse.model_validate(data)
return response.data.updateDiscussionComment.comment
def main() -> None:
settings = Settings()
if settings.debug:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
logging.debug(f"Using config: {settings.model_dump_json()}")
g = Github(settings.github_token.get_secret_value())
repo = g.get_repo(settings.github_repository)
if not settings.github_event_path.is_file():
raise RuntimeError(
f"No github event file available at: {settings.github_event_path}"
)
contents = settings.github_event_path.read_text()
github_event = PartialGitHubEvent.model_validate_json(contents)
logging.info(f"Using GitHub event: {github_event}")
number = (
github_event.pull_request and github_event.pull_request.number
) or settings.number
if number is None:
raise RuntimeError("No PR number available")
# Avoid race conditions with multiple labels
sleep_time = random.random() * 10 # random number between 0 and 10 seconds
logging.info(
f"Sleeping for {sleep_time} seconds to avoid "
"race conditions and multiple comments"
)
time.sleep(sleep_time)
# Get PR
logging.debug(f"Processing PR: #{number}")
pr = repo.get_pull(number)
label_strs = {label.name for label in pr.get_labels()}
langs = []
for label in label_strs:
if label.startswith("lang-") and not label == lang_all_label:
langs.append(label[5:])
logging.info(f"PR #{pr.number} has labels: {label_strs}")
if not langs or lang_all_label not in label_strs:
logging.info(f"PR #{pr.number} doesn't seem to be a translation PR, skipping")
sys.exit(0)
# Generate translation map, lang ID to discussion
discussions = get_graphql_translation_discussions(settings=settings)
lang_to_discussion_map: Dict[str, AllDiscussionsDiscussionNode] = {}
for discussion in discussions:
for edge in discussion.labels.edges:
label = edge.node.name
if label.startswith("lang-") and not label == lang_all_label:
lang = label[5:]
lang_to_discussion_map[lang] = discussion
logging.debug(f"Using translations map: {lang_to_discussion_map}")
# Messages to create or check
new_translation_message = f"Good news everyone! 😉 There's a new translation PR to be reviewed: #{pr.number} by @{pr.user.login}. 🎉 This requires 2 approvals from native speakers to be merged. 🤓"
done_translation_message = f"~There's a new translation PR to be reviewed: #{pr.number} by @{pr.user.login}~ Good job! This is done. 🍰☕"
# Normally only one language, but still
for lang in langs:
if lang not in lang_to_discussion_map:
log_message = f"Could not find discussion for language: {lang}"
logging.error(log_message)
raise RuntimeError(log_message)
discussion = lang_to_discussion_map[lang]
logging.info(
f"Found a translation discussion for language: {lang} in discussion: #{discussion.number}"
)
already_notified_comment: Union[Comment, None] = None
already_done_comment: Union[Comment, None] = None
logging.info(
f"Checking current comments in discussion: #{discussion.number} to see if already notified about this PR: #{pr.number}"
)
comments = get_graphql_translation_discussion_comments(
settings=settings, discussion_number=discussion.number
)
for comment in comments:
if new_translation_message in comment.body:
already_notified_comment = comment
elif done_translation_message in comment.body:
already_done_comment = comment
logging.info(
f"Already notified comment: {already_notified_comment}, already done comment: {already_done_comment}"
)
if pr.state == "open" and awaiting_label in label_strs:
logging.info(
f"This PR seems to be a language translation and awaiting reviews: #{pr.number}"
)
if already_notified_comment:
logging.info(
f"This PR #{pr.number} was already notified in comment: {already_notified_comment.url}"
)
else:
logging.info(
f"Writing notification comment about PR #{pr.number} in Discussion: #{discussion.number}"
)
comment = create_comment(
settings=settings,
discussion_id=discussion.id,
body=new_translation_message,
)
logging.info(f"Notified in comment: {comment.url}")
elif pr.state == "closed" or approved_label in label_strs:
logging.info(f"Already approved or closed PR #{pr.number}")
if already_done_comment:
logging.info(
f"This PR #{pr.number} was already marked as done in comment: {already_done_comment.url}"
)
elif already_notified_comment:
updated_comment = update_comment(
settings=settings,
comment_id=already_notified_comment.id,
body=done_translation_message,
)
logging.info(f"Marked as done in comment: {updated_comment.url}")
else:
logging.info(
f"There doesn't seem to be anything to be done about PR #{pr.number}"
)
logging.info("Finished")
if __name__ == "__main__":
main()
| PartialGitHubEvent |
python | doocs__leetcode | solution/2800-2899/2832.Maximal Range That Each Element Is Maximum in It/Solution.py | {
"start": 0,
"end": 630
} | class ____:
def maximumLengthOfRanges(self, nums: List[int]) -> List[int]:
n = len(nums)
left = [-1] * n
right = [n] * n
stk = []
for i, x in enumerate(nums):
while stk and nums[stk[-1]] <= x:
stk.pop()
if stk:
left[i] = stk[-1]
stk.append(i)
stk = []
for i in range(n - 1, -1, -1):
while stk and nums[stk[-1]] <= nums[i]:
stk.pop()
if stk:
right[i] = stk[-1]
stk.append(i)
return [r - l - 1 for l, r in zip(left, right)]
| Solution |
python | django__django | django/contrib/auth/models.py | {
"start": 17855,
"end": 18126
} | class ____(AbstractUser):
"""
Users within the Django authentication system are represented by this
model.
Username and password are required. Other fields are optional.
"""
class Meta(AbstractUser.Meta):
swappable = "AUTH_USER_MODEL"
| User |
python | walkccc__LeetCode | solutions/2662. Minimum Cost of a Path With Special Roads/2662.py | {
"start": 0,
"end": 1478
} | class ____:
def minimumCost(
self,
start: list[int],
target: list[int],
specialRoads: list[list[int]],
) -> int:
return self.dijkstra(specialRoads, *start, *target)
def dijkstra(
self,
specialRoads: list[list[int]],
srcX: int,
srcY: int,
dstX: int,
dstY: int,
) -> int:
n = len(specialRoads)
# dist[i] := the minimum distance of (srcX, srcY) to specialRoads[i](x2, y2)
dist = [math.inf] * n
minHeap = [] # (d, u), where u := the i-th specialRoads
# (srcX, srcY) -> (x1, y1) to cost -> (x2, y2)
for u, (x1, y1, _, _, cost) in enumerate(specialRoads):
d = abs(x1 - srcX) + abs(y1 - srcY) + cost
dist[u] = d
heapq.heappush(minHeap, (dist[u], u))
while minHeap:
d, u = heapq.heappop(minHeap)
if d > dist[u]:
continue
_, _, ux2, uy2, _ = specialRoads[u]
for v in range(n):
if v == u:
continue
vx1, vy1, _, _, vcost = specialRoads[v]
# (ux2, uy2) -> (vx1, vy1) to vcost -> (vx2, vy2)
newDist = d + abs(vx1 - ux2) + abs(vy1 - uy2) + vcost
if newDist < dist[v]:
dist[v] = newDist
heapq.heappush(minHeap, (dist[v], v))
ans = abs(dstX - srcX) + abs(dstY - srcY)
for u in range(n):
_, _, x2, y2, _ = specialRoads[u]
# (srcX, srcY) -> (x2, y2) -> (dstX, dstY).
ans = min(ans, dist[u] + abs(dstX - x2) + abs(dstY - y2))
return ans
| Solution |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/cover/test_lookup.py | {
"start": 5770,
"end": 9486
} | class ____:
pass
@pytest.mark.parametrize(
"typ,coll_type",
[
(typing.ChainMap[Elem, ElemValue], typing.ChainMap),
(typing.DefaultDict[Elem, ElemValue], typing.DefaultDict),
(typing.OrderedDict[Elem, ElemValue], typing.OrderedDict),
],
ids=repr,
)
@given(data=st.data())
def test_specialised_mapping_types(data, typ, coll_type):
ex = data.draw(from_type(typ).filter(len))
assert isinstance(ex, coll_type)
instances = [isinstance(elem, Elem) for elem in ex]
assert all(instances)
assert all(isinstance(elem, ElemValue) for elem in ex.values())
@given(from_type(typing.ItemsView[Elem, Elem]).filter(len))
def test_ItemsView(ex):
# See https://github.com/python/typing/issues/177
assert isinstance(ex, type({}.items()))
assert all(isinstance(elem, tuple) and len(elem) == 2 for elem in ex)
assert all(all(isinstance(e, Elem) for e in elem) for elem in ex)
@pytest.mark.parametrize("generic", [typing.Match, typing.Pattern])
@pytest.mark.parametrize("typ", [bytes, str])
@given(data=st.data())
def test_regex_types(data, generic, typ):
x = data.draw(from_type(generic[typ]))
assert isinstance(x[0] if generic is typing.Match else x.pattern, typ)
@given(x=...)
def test_Generator(x: typing.Generator[Elem, None, ElemValue]):
assert isinstance(x, typing.Generator)
try:
while True:
e = next(x)
assert isinstance(e, Elem)
x.send(None) # The generators we create don't check the send type
except StopIteration as stop:
assert isinstance(stop.value, ElemValue)
def test_Optional_minimises_to_None():
assert minimal(from_type(typing.Optional[int]), lambda ex: True) is None
@pytest.mark.parametrize("n", [0, 1, 5])
@pytest.mark.parametrize("t", [tuple, _Tuple])
def test_variable_length_tuples(t, n):
type_ = t[int, ...]
check_can_generate_examples(from_type(type_).filter(lambda ex: len(ex) == n))
def test_lookup_overrides_defaults():
sentinel = object()
with temp_registered(int, st.just(sentinel)):
@given(from_type(list[int]))
def inner_1(ex):
assert all(elem is sentinel for elem in ex)
inner_1()
@given(from_type(list[int]))
def inner_2(ex):
assert all(isinstance(elem, int) for elem in ex)
inner_2()
def test_register_generic_typing_strats():
# I don't expect anyone to do this, but good to check it works as expected
with temp_registered(
typing.Sequence,
types._global_type_lookup[set],
):
# We register sets for the abstract sequence type, which masks subtypes
# from supertype resolution but not direct resolution
assert_all_examples(
from_type(typing.Sequence[int]), lambda ex: isinstance(ex, set)
)
assert_all_examples(
from_type(typing.Container[int]),
lambda ex: not isinstance(ex, typing.Sequence),
)
assert_all_examples(from_type(list[int]), lambda ex: isinstance(ex, list))
def if_available(name):
try:
return getattr(typing, name)
except AttributeError:
return pytest.param(name, marks=[pytest.mark.skip])
@pytest.mark.parametrize(
"typ",
[
typing.Sequence,
typing.Container,
typing.Mapping,
typing.Reversible,
typing.SupportsBytes,
typing.SupportsAbs,
typing.SupportsComplex,
typing.SupportsFloat,
typing.SupportsInt,
typing.SupportsRound,
if_available("SupportsIndex"),
],
ids=get_pretty_function_description,
)
def test_resolves_weird_types(typ):
check_can_generate_examples(from_type(typ))
| ElemValue |
python | huggingface__transformers | src/transformers/models/auto/modeling_auto.py | {
"start": 91604,
"end": 91848
} | class ____(_BaseAutoModelClass):
_model_mapping = MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING
AutoModelForMaskedImageModeling = auto_class_update(AutoModelForMaskedImageModeling, head_doc="masked image modeling")
| AutoModelForMaskedImageModeling |
python | sphinx-doc__sphinx | sphinx/transforms/post_transforms/__init__.py | {
"start": 13673,
"end": 14420
} | class ____(SphinxPostTransform):
"""Add the domain name of the parent node as a class in each desc_signature node."""
default_priority = 200
def run(self, **kwargs: Any) -> None:
for node in self.document.findall(addnodes.desc_signature):
if node.parent.get('domain'):
node['classes'].append(node.parent['domain'])
def setup(app: Sphinx) -> ExtensionMetadata:
app.add_post_transform(ReferencesResolver)
app.add_post_transform(OnlyNodeTransform)
app.add_post_transform(SigElementFallbackTransform)
app.add_post_transform(PropagateDescDomain)
return {
'version': 'builtin',
'parallel_read_safe': True,
'parallel_write_safe': True,
}
| PropagateDescDomain |
python | huggingface__transformers | src/transformers/models/clap/modeling_clap.py | {
"start": 54447,
"end": 56114
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([ClapTextLayer(config) for i in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
@can_return_tuple
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = False,
output_hidden_states: Optional[bool] = False,
return_dict: Optional[bool] = True,
**kwargs,
) -> Union[tuple[torch.Tensor], BaseModelOutput]:
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(
hidden_states=hidden_states,
attention_mask=attention_mask,
output_attentions=output_attentions,
**kwargs,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
return BaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
# Copied from transformers.models.bert.modeling_bert.BertPooler
| ClapTextEncoder |
python | pydantic__pydantic | pydantic-core/tests/test_tzinfo.py | {
"start": 741,
"end": 1358
} | class ____:
"""
Object that is less than anything (except itself).
"""
def __eq__(self, other):
return isinstance(other, _SMALLEST)
def __gt__(self, other):
return False
SMALLEST = _SMALLEST()
pickle_choices = [(pickle, pickle, proto) for proto in range(pickle.HIGHEST_PROTOCOL + 1)]
HOUR = timedelta(hours=1).total_seconds()
ZERO = timedelta(0).total_seconds()
def first_sunday_on_or_after(dt):
days_to_go = 6 - dt.weekday()
if days_to_go:
dt += timedelta(days_to_go)
return dt
DSTSTART = datetime(1, 4, 1, 2)
DSTEND = datetime(1, 10, 25, 1)
| _SMALLEST |
python | walkccc__LeetCode | solutions/3464. Maximize the Distance Between Points on a Square/3464.py | {
"start": 60,
"end": 273
} | class ____:
startX: int
startY: int
endX: int
endY: int
length: int
def __iter__(self):
yield self.startX
yield self.startY
yield self.endX
yield self.endY
yield self.length
| Sequence |
python | gevent__gevent | src/gevent/queue.py | {
"start": 22657,
"end": 23379
} | class ____(Queue):
'''A subclass of :class:`Queue` that retrieves entries in priority order (lowest first).
Entries are typically tuples of the form: ``(priority number, data)``.
.. versionchanged:: 1.2a1
Any *items* given to the constructor will now be passed through
:func:`heapq.heapify` to ensure the invariants of this class hold.
Previously it was just assumed that they were already a heap.
'''
__slots__ = ()
def _create_queue(self, items=()):
q = list(items)
_heapify(q)
return q
def _put(self, item):
_heappush(self.queue, item)
self._did_put_task()
def _get(self):
return _heappop(self.queue)
| PriorityQueue |
python | run-llama__llama_index | llama-index-core/llama_index/core/indices/keyword_table/base.py | {
"start": 1392,
"end": 8264
} | class ____(BaseIndex[KeywordTable]):
"""
Base Keyword Table Index.
This index extracts keywords from the text, and maps each
keyword to the node(s) that it corresponds to. In this sense it mimics a
"hash table". During index construction, the keyword table is constructed
by extracting keywords from each node and creating an internal mapping.
During query time, the keywords are extracted from the query text, and these
keywords are used to index into the keyword table. The retrieved nodes
are then used to answer the query.
Args:
keyword_extract_template (Optional[BasePromptTemplate]): A Keyword
Extraction Prompt
(see :ref:`Prompt-Templates`).
use_async (bool): Whether to use asynchronous calls. Defaults to False.
show_progress (bool): Whether to show tqdm progress bars. Defaults to False.
"""
index_struct_cls = KeywordTable
def __init__(
self,
nodes: Optional[Sequence[BaseNode]] = None,
objects: Optional[Sequence[IndexNode]] = None,
index_struct: Optional[KeywordTable] = None,
llm: Optional[LLM] = None,
keyword_extract_template: Optional[BasePromptTemplate] = None,
max_keywords_per_chunk: int = 10,
use_async: bool = False,
show_progress: bool = False,
**kwargs: Any,
) -> None:
"""Initialize params."""
# need to set parameters before building index in base class.
self._llm = llm or Settings.llm
self.max_keywords_per_chunk = max_keywords_per_chunk
self.keyword_extract_template = (
keyword_extract_template or DEFAULT_KEYWORD_EXTRACT_TEMPLATE
)
# NOTE: Partially format keyword extract template here.
self.keyword_extract_template = self.keyword_extract_template.partial_format(
max_keywords=self.max_keywords_per_chunk
)
self._use_async = use_async
super().__init__(
nodes=nodes,
index_struct=index_struct,
show_progress=show_progress,
objects=objects,
**kwargs,
)
def as_retriever(
self,
retriever_mode: Union[
str, KeywordTableRetrieverMode
] = KeywordTableRetrieverMode.DEFAULT,
**kwargs: Any,
) -> BaseRetriever:
# NOTE: lazy import
from llama_index.core.indices.keyword_table.retrievers import (
KeywordTableGPTRetriever,
KeywordTableRAKERetriever,
KeywordTableSimpleRetriever,
)
if retriever_mode == KeywordTableRetrieverMode.DEFAULT:
return KeywordTableGPTRetriever(
self, object_map=self._object_map, llm=self._llm, **kwargs
)
elif retriever_mode == KeywordTableRetrieverMode.SIMPLE:
return KeywordTableSimpleRetriever(
self, object_map=self._object_map, **kwargs
)
elif retriever_mode == KeywordTableRetrieverMode.RAKE:
return KeywordTableRAKERetriever(
self, object_map=self._object_map, **kwargs
)
else:
raise ValueError(f"Unknown retriever mode: {retriever_mode}")
@abstractmethod
def _extract_keywords(self, text: str) -> Set[str]:
"""Extract keywords from text."""
async def _async_extract_keywords(self, text: str) -> Set[str]:
"""Extract keywords from text."""
# by default just call sync version
return self._extract_keywords(text)
def _add_nodes_to_index(
self,
index_struct: KeywordTable,
nodes: Sequence[BaseNode],
show_progress: bool = False,
) -> None:
"""Add document to index."""
nodes_with_progress = get_tqdm_iterable(
nodes, show_progress, "Extracting keywords from nodes"
)
for n in nodes_with_progress:
keywords = self._extract_keywords(
n.get_content(metadata_mode=MetadataMode.LLM)
)
index_struct.add_node(list(keywords), n)
async def _async_add_nodes_to_index(
self,
index_struct: KeywordTable,
nodes: Sequence[BaseNode],
show_progress: bool = False,
) -> None:
"""Add document to index."""
nodes_with_progress = get_tqdm_iterable(
nodes, show_progress, "Extracting keywords from nodes"
)
for n in nodes_with_progress:
keywords = await self._async_extract_keywords(
n.get_content(metadata_mode=MetadataMode.LLM)
)
index_struct.add_node(list(keywords), n)
def _build_index_from_nodes(
self, nodes: Sequence[BaseNode], **build_kwargs: Any
) -> KeywordTable:
"""Build the index from nodes."""
# do simple concatenation
index_struct = KeywordTable(table={})
if self._use_async:
tasks = [
self._async_add_nodes_to_index(index_struct, nodes, self._show_progress)
]
run_async_tasks(tasks)
else:
self._add_nodes_to_index(index_struct, nodes, self._show_progress)
return index_struct
def _insert(self, nodes: Sequence[BaseNode], **insert_kwargs: Any) -> None:
"""Insert nodes."""
for n in nodes:
keywords = self._extract_keywords(
n.get_content(metadata_mode=MetadataMode.LLM)
)
self._index_struct.add_node(list(keywords), n)
def _delete_node(self, node_id: str, **delete_kwargs: Any) -> None:
"""Delete a node."""
# delete node from the keyword table
keywords_to_delete = set()
for keyword, existing_node_ids in self._index_struct.table.items():
if node_id in existing_node_ids:
existing_node_ids.remove(node_id)
if len(existing_node_ids) == 0:
keywords_to_delete.add(keyword)
# delete keywords that have zero nodes
for keyword in keywords_to_delete:
del self._index_struct.table[keyword]
@property
def ref_doc_info(self) -> Dict[str, RefDocInfo]:
"""Retrieve a dict mapping of ingested documents and their nodes+metadata."""
node_doc_ids_sets = list(self._index_struct.table.values())
node_doc_ids = list(set().union(*node_doc_ids_sets))
nodes = self.docstore.get_nodes(node_doc_ids)
all_ref_doc_info = {}
for node in nodes:
ref_node = node.source_node
if not ref_node:
continue
ref_doc_info = self.docstore.get_ref_doc_info(ref_node.node_id)
if not ref_doc_info:
continue
all_ref_doc_info[ref_node.node_id] = ref_doc_info
return all_ref_doc_info
| BaseKeywordTableIndex |
python | gevent__gevent | src/gevent/tests/test__server.py | {
"start": 3034,
"end": 10605
} | class ____(greentest.TestCase):
# pylint: disable=too-many-public-methods
__timeout__ = greentest.LARGE_TIMEOUT
Settings = Settings
server = None
def cleanup(self):
if getattr(self, 'server', None) is not None:
self.server.stop()
self.server = None
sleep_to_clear_old_sockets()
def get_listener(self):
return self._close_on_teardown(tcp_listener(backlog=5))
def get_server_host_port_family(self):
server_host = self.server.server_host
if not server_host:
server_host = greentest.DEFAULT_LOCAL_HOST_ADDR
elif server_host == '::':
server_host = greentest.DEFAULT_LOCAL_HOST_ADDR6
try:
family = self.server.socket.family
except AttributeError:
# server deletes socket when closed
family = socket.AF_INET
return server_host, self.server.server_port, family
@contextmanager
def makefile(self, timeout=_DEFAULT_SOCKET_TIMEOUT, bufsize=1, include_raw_socket=False):
server_host, server_port, family = self.get_server_host_port_family()
bufarg = 'buffering' if PY3 else 'bufsize'
makefile_kwargs = {bufarg: bufsize}
if PY3:
# Under Python3, you can't read and write to the same
# makefile() opened in r, and r+ is not allowed
makefile_kwargs['mode'] = 'rwb'
with socket.socket(family=family) as sock:
rconn = None
# We want the socket to be accessible from the fileobject
# we return. On Python 2, natively this is available as
# _sock, but Python 3 doesn't have that.
sock.connect((server_host, server_port))
sock.settimeout(timeout)
with sock.makefile(**makefile_kwargs) as rconn:
result = rconn if not include_raw_socket else (rconn, sock)
yield result
def send_request(self, url='/', timeout=_DEFAULT_SOCKET_TIMEOUT, bufsize=1):
with self.makefile(timeout=timeout, bufsize=bufsize) as conn:
self.send_request_to_fd(conn, url)
def send_request_to_fd(self, fd, url='/'):
fd.write(('GET %s HTTP/1.0\r\n\r\n' % url).encode('latin-1'))
fd.flush()
LOCAL_CONN_REFUSED_ERRORS = ()
if greentest.OSX:
# A kernel bug in OS X sometimes results in this
LOCAL_CONN_REFUSED_ERRORS = (errno.EPROTOTYPE,)
elif greentest.WIN and greentest.PYPY3:
# We see WinError 10049: The requested address is not valid
# which is not one of the errors we get anywhere else.
# Not sure which errno constant this is?
LOCAL_CONN_REFUSED_ERRORS = (10049,)
elif greentest.RUNNING_ON_MANYLINUX:
# In https://github.com/pypa/manylinux/pull/1785 the manylinux images updated to
# OpenSSL 3.5. While that has been tested to work before without producing this
# error (https://github.com/gevent/gevent/pull/2103), it _is_ doing so in github
# actions.
# XXX: Not sure this is really the right way to handle this.
LOCAL_CONN_REFUSED_ERRORS += (errno.EPIPE,)
def assertConnectionRefused(self, in_proc_server=True):
try:
with self.assertRaises(socket.error) as exc:
with self.makefile() as conn:
conn.close()
except LoopExit:
if not in_proc_server:
raise
# A LoopExit is fine. If we've killed the server
# and don't have any other greenlets to run, then
# blocking to open the connection might raise this.
# This became likely on Windows once we stopped
# passing IP addresses through an extra call to
# ``getaddrinfo``, which changed the number of switches
return
ex = exc.exception
self.assertIn(ex.args[0],
(errno.ECONNREFUSED, errno.EADDRNOTAVAIL,
errno.ECONNRESET, errno.ECONNABORTED) + self.LOCAL_CONN_REFUSED_ERRORS,
(ex, ex.args))
def assert500(self):
self.Settings.assert500(self)
def assert503(self):
self.Settings.assert503(self)
def assertAcceptedConnectionError(self):
self.Settings.assertAcceptedConnectionError(self)
def assertPoolFull(self):
self.Settings.assertPoolFull(self)
def assertNotAccepted(self):
try:
with self.makefile(include_raw_socket=True) as (conn, sock):
conn.write(b'GET / HTTP/1.0\r\n\r\n')
conn.flush()
result = b''
try:
while True:
data = sock.recv(1)
if not data:
break
result += data
except socket.timeout:
self.assertFalse(result)
return
except LoopExit:
# See assertConnectionRefused
return
self.assertTrue(result.startswith(b'HTTP/1.0 500 Internal Server Error'), repr(result))
def assertRequestSucceeded(self, timeout=_DEFAULT_SOCKET_TIMEOUT):
with self.makefile(timeout=timeout) as conn:
conn.write(b'GET /ping HTTP/1.0\r\n\r\n')
result = conn.read()
self.assertTrue(result.endswith(b'\r\n\r\nPONG'), repr(result))
def start_server(self):
self.server.start()
self.assertRequestSucceeded()
self.assertRequestSucceeded()
def stop_server(self):
self.server.stop()
self.assertConnectionRefused()
def report_netstat(self, _msg):
# At one point this would call 'sudo netstat -anp | grep PID'
# with os.system. We can probably do better with psutil.
return
def _create_server(self, *args, **kwargs):
kind = kwargs.pop('server_kind', self.ServerSubClass)
addr = kwargs.pop('server_listen_addr', (greentest.DEFAULT_BIND_ADDR, 0))
return kind(addr, *args, **kwargs)
def init_server(self, *args, **kwargs):
self.server = self._create_server(*args, **kwargs)
self.server.start()
sleep_to_clear_old_sockets()
@property
def socket(self):
return self.server.socket
def _test_invalid_callback(self):
if sysinfo.RUNNING_ON_APPVEYOR:
self.skipTest("Sometimes misses the error") # XXX: Why?
try:
# Can't use a kwarg here, WSGIServer and StreamServer
# take different things (application and handle)
self.init_server(lambda: None)
self.expect_one_error()
self.assert500()
self.assert_error(TypeError)
finally:
self.server.stop()
# XXX: There's something unreachable (with a traceback?)
# We need to clear it to make the leak checks work on Travis;
# so far I can't reproduce it locally on OS X.
import gc; gc.collect()
def fill_default_server_args(self, kwargs):
return self.Settings.fill_default_server_args(self, kwargs)
def ServerClass(self, *args, **kwargs):
return self.Settings.ServerClass(*args,
**self.fill_default_server_args(kwargs))
def ServerSubClass(self, *args, **kwargs):
return self.Settings.ServerSubClass(*args,
**self.fill_default_server_args(kwargs))
def get_spawn(self):
return None
| TestCase |
python | openai__openai-python | src/openai/_response.py | {
"start": 12938,
"end": 16393
} | class ____(BaseAPIResponse[R]):
@property
def request_id(self) -> str | None:
return self.http_response.headers.get("x-request-id") # type: ignore[no-any-return]
@overload
async def parse(self, *, to: type[_T]) -> _T: ...
@overload
async def parse(self) -> R: ...
async def parse(self, *, to: type[_T] | None = None) -> R | _T:
"""Returns the rich python representation of this response's data.
For lower-level control, see `.read()`, `.json()`, `.iter_bytes()`.
You can customise the type that the response is parsed into through
the `to` argument, e.g.
```py
from openai import BaseModel
class MyModel(BaseModel):
foo: str
obj = response.parse(to=MyModel)
print(obj.foo)
```
We support parsing:
- `BaseModel`
- `dict`
- `list`
- `Union`
- `str`
- `httpx.Response`
"""
cache_key = to if to is not None else self._cast_to
cached = self._parsed_by_type.get(cache_key)
if cached is not None:
return cached # type: ignore[no-any-return]
if not self._is_sse_stream:
await self.read()
parsed = self._parse(to=to)
if is_given(self._options.post_parser):
parsed = self._options.post_parser(parsed)
if isinstance(parsed, BaseModel):
add_request_id(parsed, self.request_id)
self._parsed_by_type[cache_key] = parsed
return cast(R, parsed)
async def read(self) -> bytes:
"""Read and return the binary response content."""
try:
return await self.http_response.aread()
except httpx.StreamConsumed as exc:
# the default error raised by httpx isn't very
# helpful in our case so we re-raise it with
# a different error message
raise StreamAlreadyConsumed() from exc
async def text(self) -> str:
"""Read and decode the response content into a string."""
await self.read()
return self.http_response.text
async def json(self) -> object:
"""Read and decode the JSON response content."""
await self.read()
return self.http_response.json()
async def close(self) -> None:
"""Close the response and release the connection.
Automatically called if the response body is read to completion.
"""
await self.http_response.aclose()
async def iter_bytes(self, chunk_size: int | None = None) -> AsyncIterator[bytes]:
"""
A byte-iterator over the decoded response content.
This automatically handles gzip, deflate and brotli encoded responses.
"""
async for chunk in self.http_response.aiter_bytes(chunk_size):
yield chunk
async def iter_text(self, chunk_size: int | None = None) -> AsyncIterator[str]:
"""A str-iterator over the decoded response content
that handles both gzip, deflate, etc but also detects the content's
string encoding.
"""
async for chunk in self.http_response.aiter_text(chunk_size):
yield chunk
async def iter_lines(self) -> AsyncIterator[str]:
"""Like `iter_text()` but will only yield chunks for each line"""
async for chunk in self.http_response.aiter_lines():
yield chunk
| AsyncAPIResponse |
python | jmcnamara__XlsxWriter | xlsxwriter/test/contenttypes/test_contenttypes01.py | {
"start": 351,
"end": 2651
} | class ____(unittest.TestCase):
"""
Test assembling a complete ContentTypes file.
"""
def test_assemble_xml_file(self):
"""Test writing an ContentTypes file."""
self.maxDiff = None
fh = StringIO()
content = ContentTypes()
content._set_filehandle(fh)
content._add_worksheet_name("sheet1")
content._add_default(("jpeg", "image/jpeg"))
content._add_shared_strings()
content._add_calc_chain()
content._assemble_xml_file()
exp = _xml_to_list(
"""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<Types xmlns="http://schemas.openxmlformats.org/package/2006/content-types">
<Default Extension="rels" ContentType="application/vnd.openxmlformats-package.relationships+xml"/>
<Default Extension="xml" ContentType="application/xml"/>
<Default Extension="jpeg" ContentType="image/jpeg"/>
<Override PartName="/docProps/app.xml" ContentType="application/vnd.openxmlformats-officedocument.extended-properties+xml"/>
<Override PartName="/docProps/core.xml" ContentType="application/vnd.openxmlformats-package.core-properties+xml"/>
<Override PartName="/xl/styles.xml" ContentType="application/vnd.openxmlformats-officedocument.spreadsheetml.styles+xml"/>
<Override PartName="/xl/theme/theme1.xml" ContentType="application/vnd.openxmlformats-officedocument.theme+xml"/>
<Override PartName="/xl/workbook.xml" ContentType="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet.main+xml"/>
<Override PartName="/xl/worksheets/sheet1.xml" ContentType="application/vnd.openxmlformats-officedocument.spreadsheetml.worksheet+xml"/>
<Override PartName="/xl/sharedStrings.xml" ContentType="application/vnd.openxmlformats-officedocument.spreadsheetml.sharedStrings+xml"/>
<Override PartName="/xl/calcChain.xml" ContentType="application/vnd.openxmlformats-officedocument.spreadsheetml.calcChain+xml"/>
</Types>
"""
)
got = _xml_to_list(fh.getvalue())
self.assertEqual(exp, got)
| TestAssembleContentTypes |
python | pypa__pipenv | pipenv/patched/pip/_internal/resolution/resolvelib/requirements.py | {
"start": 1547,
"end": 4142
} | class ____(Requirement):
def __init__(self, ireq: InstallRequirement) -> None:
assert ireq.link is None, "This is a link, not a specifier"
self._ireq = ireq
self._equal_cache: Optional[str] = None
self._hash: Optional[int] = None
self._extras = frozenset(canonicalize_name(e) for e in self._ireq.extras)
@property
def _equal(self) -> str:
if self._equal_cache is not None:
return self._equal_cache
self._equal_cache = str(self._ireq)
return self._equal_cache
def __str__(self) -> str:
return str(self._ireq.req)
def __repr__(self) -> str:
return f"{self.__class__.__name__}({str(self._ireq.req)!r})"
def __eq__(self, other: object) -> bool:
if not isinstance(other, SpecifierRequirement):
return NotImplemented
return self._equal == other._equal
def __hash__(self) -> int:
if self._hash is not None:
return self._hash
self._hash = hash(self._equal)
return self._hash
@property
def project_name(self) -> NormalizedName:
assert self._ireq.req, "Specifier-backed ireq is always PEP 508"
return canonicalize_name(self._ireq.req.name)
@property
def name(self) -> str:
return format_name(self.project_name, self._extras)
def format_for_error(self) -> str:
# Convert comma-separated specifiers into "A, B, ..., F and G"
# This makes the specifier a bit more "human readable", without
# risking a change in meaning. (Hopefully! Not all edge cases have
# been checked)
parts = [s.strip() for s in str(self).split(",")]
if len(parts) == 0:
return ""
elif len(parts) == 1:
return parts[0]
return ", ".join(parts[:-1]) + " and " + parts[-1]
def get_candidate_lookup(self) -> CandidateLookup:
return None, self._ireq
def is_satisfied_by(self, candidate: Candidate) -> bool:
assert candidate.name == self.name, (
f"Internal issue: Candidate is not for this requirement "
f"{candidate.name} vs {self.name}"
)
# We can safely always allow prereleases here since PackageFinder
# already implements the prerelease logic, and would have filtered out
# prerelease candidates if the user does not expect them.
assert self._ireq.req, "Specifier-backed ireq is always PEP 508"
spec = self._ireq.req.specifier
return spec.contains(candidate.version, prereleases=True)
| SpecifierRequirement |
python | ray-project__ray | python/ray/train/_internal/backend_executor.py | {
"start": 29798,
"end": 30697
} | class ____:
# TODO: fix inheritence. perhaps create WorkerGroupInterface.
# Need to define getstate and setstate so that getattr does not screwup
# pickling. See https://stackoverflow.com/a/50888571/11249691
def __getstate__(self):
return vars(self)
def __setstate__(self, state):
vars(self).update(state)
def __getattr__(self, name):
raise InactiveWorkerGroupError()
def __len__(self):
raise InactiveWorkerGroupError()
def _get_session(method_name: str):
# Get the session for this worker.
session = get_session()
if not session:
# Session is not initialized yet.
raise TrainBackendError(
f"`{method_name}` has been called "
"before `start_training`. Please call "
"`start_training` before "
f"`{method_name}`."
)
return session
| InactiveWorkerGroup |
python | apache__airflow | task-sdk/src/airflow/sdk/bases/decorator.py | {
"start": 11850,
"end": 23794
} | class ____(ExpandableFactory, Generic[FParams, FReturn, OperatorSubclass]):
"""
Helper class for providing dynamic task mapping to decorated functions.
``task_decorator_factory`` returns an instance of this, instead of just a plain wrapped function.
:meta private:
"""
function: Callable[FParams, FReturn] = attr.ib(validator=attr.validators.is_callable())
operator_class: type[OperatorSubclass]
multiple_outputs: bool = attr.ib()
kwargs: dict[str, Any] = attr.ib(factory=dict)
decorator_name: str = attr.ib(repr=False, default="task")
_airflow_is_task_decorator: ClassVar[bool] = True
is_setup: bool = False
is_teardown: bool = False
on_failure_fail_dagrun: bool = False
# This is set in __attrs_post_init__ by update_wrapper. Provided here for type hints.
__wrapped__: Callable[FParams, FReturn] = attr.ib(init=False)
@multiple_outputs.default
def _infer_multiple_outputs(self):
if "return" not in self.function.__annotations__:
# No return type annotation, nothing to infer
return False
try:
# We only care about the return annotation, not anything about the parameters
def fake(): ...
fake.__annotations__ = {"return": self.function.__annotations__["return"]}
return_type = typing_extensions.get_type_hints(fake, self.function.__globals__).get("return", Any)
except NameError as e:
warnings.warn(
f"Cannot infer multiple_outputs for TaskFlow function {self.function.__name__!r} with forward"
f" type references that are not imported. (Error was {e})",
stacklevel=4,
)
return False
except TypeError: # Can't evaluate return type.
return False
ttype = getattr(return_type, "__origin__", return_type)
return isinstance(ttype, type) and issubclass(ttype, Mapping)
def __attrs_post_init__(self):
if "self" in self.function_signature.parameters:
raise TypeError(f"@{self.decorator_name} does not support methods")
self.kwargs.setdefault("task_id", self.function.__name__)
update_wrapper(self, self.function)
def __call__(self, *args: FParams.args, **kwargs: FParams.kwargs) -> XComArg:
if self.is_teardown:
if "trigger_rule" in self.kwargs:
raise ValueError("Trigger rule not configurable for teardown tasks.")
self.kwargs.update(trigger_rule=TriggerRule.ALL_DONE_SETUP_SUCCESS)
on_failure_fail_dagrun = self.kwargs.pop("on_failure_fail_dagrun", self.on_failure_fail_dagrun)
op = self.operator_class(
python_callable=self.function,
op_args=args,
op_kwargs=kwargs,
multiple_outputs=self.multiple_outputs,
**self.kwargs,
)
op.is_setup = self.is_setup
op.is_teardown = self.is_teardown
op.on_failure_fail_dagrun = on_failure_fail_dagrun
op_doc_attrs = [op.doc, op.doc_json, op.doc_md, op.doc_rst, op.doc_yaml]
# Set the task's doc_md to the function's docstring if it exists and no other doc* args are set.
if self.function.__doc__ and not any(op_doc_attrs):
op.doc_md = self.function.__doc__
return XComArg(op)
def _validate_arg_names(self, func: ValidationSource, kwargs: dict[str, Any]):
# Ensure that context variables are not shadowed.
context_keys_being_mapped = KNOWN_CONTEXT_KEYS.intersection(kwargs)
if len(context_keys_being_mapped) == 1:
(name,) = context_keys_being_mapped
raise ValueError(f"cannot call {func}() on task context variable {name!r}")
if context_keys_being_mapped:
names = ", ".join(repr(n) for n in context_keys_being_mapped)
raise ValueError(f"cannot call {func}() on task context variables {names}")
super()._validate_arg_names(func, kwargs)
def expand(self, **map_kwargs: OperatorExpandArgument) -> XComArg:
if self.kwargs.get("trigger_rule") == TriggerRule.ALWAYS and any(
[isinstance(expanded, XComArg) for expanded in map_kwargs.values()]
):
raise ValueError(
"Task-generated mapping within a task using 'expand' is not allowed with trigger rule 'always'."
)
if not map_kwargs:
raise TypeError("no arguments to expand against")
self._validate_arg_names("expand", map_kwargs)
prevent_duplicates(self.kwargs, map_kwargs, fail_reason="mapping already partial")
# Since the input is already checked at parse time, we can set strict
# to False to skip the checks on execution.
if self.is_teardown:
if "trigger_rule" in self.kwargs:
raise ValueError("Trigger rule not configurable for teardown tasks.")
self.kwargs.update(trigger_rule=TriggerRule.ALL_DONE_SETUP_SUCCESS)
return self._expand(DictOfListsExpandInput(map_kwargs), strict=False)
def expand_kwargs(self, kwargs: OperatorExpandKwargsArgument, *, strict: bool = True) -> XComArg:
if (
self.kwargs.get("trigger_rule") == TriggerRule.ALWAYS
and not isinstance(kwargs, XComArg)
and any(
[
isinstance(v, XComArg)
for kwarg in kwargs
if not isinstance(kwarg, XComArg)
for v in kwarg.values()
]
)
):
raise ValueError(
"Task-generated mapping within a task using 'expand_kwargs' is not allowed with trigger rule 'always'."
)
if isinstance(kwargs, Sequence):
for item in kwargs:
if not isinstance(item, (XComArg, Mapping)):
raise TypeError(f"expected XComArg or list[dict], not {type(kwargs).__name__}")
elif not isinstance(kwargs, XComArg):
raise TypeError(f"expected XComArg or list[dict], not {type(kwargs).__name__}")
return self._expand(ListOfDictsExpandInput(kwargs), strict=strict)
def _expand(self, expand_input: ExpandInput, *, strict: bool) -> XComArg:
ensure_xcomarg_return_value(expand_input.value)
task_kwargs = self.kwargs.copy()
dag = task_kwargs.pop("dag", None) or DagContext.get_current()
task_group = task_kwargs.pop("task_group", None) or TaskGroupContext.get_current(dag)
default_args, partial_params = get_merged_defaults(
dag=dag,
task_group=task_group,
task_params=task_kwargs.pop("params", None),
task_default_args=task_kwargs.pop("default_args", None),
)
partial_kwargs: dict[str, Any] = {
"is_setup": self.is_setup,
"is_teardown": self.is_teardown,
"on_failure_fail_dagrun": self.on_failure_fail_dagrun,
}
base_signature = inspect.signature(BaseOperator)
ignore = {
"default_args", # This is target we are working on now.
"kwargs", # A common name for a keyword argument.
"do_xcom_push", # In the same boat as `multiple_outputs`
"multiple_outputs", # We will use `self.multiple_outputs` instead.
"params", # Already handled above `partial_params`.
"task_concurrency", # Deprecated(replaced by `max_active_tis_per_dag`).
}
partial_keys = set(base_signature.parameters) - ignore
partial_kwargs.update({key: value for key, value in default_args.items() if key in partial_keys})
partial_kwargs.update(task_kwargs)
task_id = get_unique_task_id(partial_kwargs.pop("task_id"), dag, task_group)
if task_group:
task_id = task_group.child_id(task_id)
# Logic here should be kept in sync with BaseOperatorMeta.partial().
if partial_kwargs.get("wait_for_downstream"):
partial_kwargs["depends_on_past"] = True
start_date = timezone.convert_to_utc(partial_kwargs.pop("start_date", None))
end_date = timezone.convert_to_utc(partial_kwargs.pop("end_date", None))
if "pool_slots" in partial_kwargs:
if partial_kwargs["pool_slots"] < 1:
dag_str = ""
if dag:
dag_str = f" in dag {dag.dag_id}"
raise ValueError(f"pool slots for {task_id}{dag_str} cannot be less than 1")
for fld, convert in (
("retries", parse_retries),
("retry_delay", coerce_timedelta),
("max_retry_delay", coerce_timedelta),
("resources", coerce_resources),
):
if (v := partial_kwargs.get(fld, NOTSET)) is not NOTSET:
partial_kwargs[fld] = convert(v)
partial_kwargs.setdefault("executor_config", {})
partial_kwargs.setdefault("op_args", [])
partial_kwargs.setdefault("op_kwargs", {})
# Mypy does not work well with a subclassed attrs class :(
_MappedOperator = cast("Any", DecoratedMappedOperator)
try:
operator_name = self.operator_class.custom_operator_name # type: ignore
except AttributeError:
operator_name = self.operator_class.__name__
operator = _MappedOperator(
operator_class=self.operator_class,
expand_input=EXPAND_INPUT_EMPTY, # Don't use this; mapped values go to op_kwargs_expand_input.
partial_kwargs=partial_kwargs,
task_id=task_id,
params=partial_params,
operator_extra_links=self.operator_class.operator_extra_links,
template_ext=self.operator_class.template_ext,
template_fields=self.operator_class.template_fields,
template_fields_renderers=self.operator_class.template_fields_renderers,
ui_color=self.operator_class.ui_color,
ui_fgcolor=self.operator_class.ui_fgcolor,
is_empty=False,
is_sensor=self.operator_class._is_sensor,
can_skip_downstream=self.operator_class._can_skip_downstream,
task_module=self.operator_class.__module__,
task_type=self.operator_class.__name__,
operator_name=operator_name,
dag=dag,
task_group=task_group,
start_date=start_date,
end_date=end_date,
multiple_outputs=self.multiple_outputs,
python_callable=self.function,
op_kwargs_expand_input=expand_input,
disallow_kwargs_override=strict,
# Different from classic operators, kwargs passed to a taskflow
# task's expand() contribute to the op_kwargs operator argument, not
# the operator arguments themselves, and should expand against it.
expand_input_attr="op_kwargs_expand_input",
start_trigger_args=self.operator_class.start_trigger_args,
start_from_trigger=self.operator_class.start_from_trigger,
)
return XComArg(operator=operator)
def partial(self, **kwargs: Any) -> _TaskDecorator[FParams, FReturn, OperatorSubclass]:
self._validate_arg_names("partial", kwargs)
old_kwargs = self.kwargs.get("op_kwargs", {})
prevent_duplicates(old_kwargs, kwargs, fail_reason="duplicate partial")
kwargs.update(old_kwargs)
return attr.evolve(self, kwargs={**self.kwargs, "op_kwargs": kwargs})
def override(self, **kwargs: Any) -> _TaskDecorator[FParams, FReturn, OperatorSubclass]:
result = attr.evolve(self, kwargs={**self.kwargs, **kwargs})
setattr(result, "is_setup", self.is_setup)
setattr(result, "is_teardown", self.is_teardown)
setattr(result, "on_failure_fail_dagrun", self.on_failure_fail_dagrun)
return result
@attr.define(kw_only=True, repr=False)
| _TaskDecorator |
python | pytorch__pytorch | torch/onnx/errors.py | {
"start": 484,
"end": 1672
} | class ____(OnnxExporterError):
"""Raised when an operator is unsupported by the exporter."""
# NOTE: This is legacy and is only used by the torchscript exporter
# Clean up when the torchscript exporter is removed
def __init__(self, name: str, version: int, supported_version: int | None) -> None:
if supported_version is not None:
msg = (
f"Exporting the operator '{name}' to ONNX opset version {version} "
"is not supported. Support for this operator was added in version "
f"{supported_version}, try exporting with this version"
)
elif name.startswith(("aten::", "prim::", "quantized::")):
msg = (
f"Exporting the operator '{name}' to ONNX opset version {version} "
"is not supported"
)
else:
msg = (
f"ONNX export failed on an operator with unrecognized namespace {name}. "
"If you are trying to export a custom operator, make sure you registered it with "
"the right domain and version."
)
super().__init__(msg)
| UnsupportedOperatorError |
python | getsentry__sentry | src/sentry/replays/endpoints/project_replay_clicks_index.py | {
"start": 1992,
"end": 9746
} | class ____(ProjectEndpoint):
owner = ApiOwner.REPLAY
publish_status = {
"GET": ApiPublishStatus.PUBLIC,
}
@extend_schema(
operation_id="List Clicked Nodes",
parameters=[
CursorQueryParam,
GlobalParams.ORG_ID_OR_SLUG,
GlobalParams.PROJECT_ID_OR_SLUG,
GlobalParams.ENVIRONMENT,
ReplayParams.REPLAY_ID,
VisibilityParams.PER_PAGE,
VisibilityParams.QUERY,
],
responses={
200: inline_sentry_response_serializer("ListReplayClicks", ReplayClickResponse),
400: RESPONSE_BAD_REQUEST,
403: RESPONSE_FORBIDDEN,
404: RESPONSE_NOT_FOUND,
},
examples=ReplayExamples.GET_REPLAY_CLICKS,
)
def get(self, request: Request, project: Project, replay_id: str) -> Response:
"""Retrieve a collection of RRWeb DOM node-ids and the timestamp they were clicked."""
if not features.has(
"organizations:session-replay", project.organization, actor=request.user
):
return Response(status=404)
filter_params = self.get_filter_params(request, project)
try:
replay_id = str(uuid.UUID(replay_id))
except ValueError:
return Response(status=404)
def data_fn(offset, limit):
try:
search_filters = parse_search_query(request.query_params.get("query", ""))
except InvalidSearchQuery as e:
raise ParseError(str(e))
return query_replay_clicks(
project_id=filter_params["project_id"][0],
replay_id=replay_id,
start=filter_params["start"],
end=filter_params["end"],
limit=limit,
offset=offset,
search_filters=search_filters,
organization_id=project.organization.id,
)
return self.paginate(
request=request,
paginator=GenericOffsetPaginator(data_fn=data_fn),
on_results=lambda results: {"data": results["data"]},
)
def query_replay_clicks(
project_id: int,
replay_id: str,
start: datetime.datetime,
end: datetime.datetime,
limit: int,
offset: int,
search_filters: Sequence[QueryToken],
organization_id: int,
):
"""Query replay clicks.
This query is atypical in that it does not aggregate by replay_id and it is not exposed as a
user facing endpoint. This query enables the replays client to fetch click information for
queries that were written for the replays index endpoint. In other words, we need to translate
a list of conditions meant for an aggregated query into a list of conditions against a
non-aggregated query. This means most of our ANDs become logical ORs and negation queries do
not logically filter any results.
Why do most ANDs become logical ORs? Our query has been pre-validated to contain the result.
We know this replay matches the query now we just need to find the component parts that
created the match. Because the filter (tag = "div" AND id = "button") works in an aggregated
context every row in the aggregation contributes to the result. So in our query of a
pre-fetched result we know a single row could match both conditions or multiple rows could
match either condition independently. Either case constitutes a successful response. In the
case of selector matches those "AND" conditions will apply because they require a single row
matches all the conditions to produce the aggregated result set.
Why do negation queries have no impact? Because if the aggregated result does not contain a
condition (e.g. tag = "button") then no row in the subset of the aggregation can logically
contain it. We could remove these conditions but it is irrelevant to the output. They are
logically disabled by the nature of the context they operate in.
If these conditions only apply to aggregated results why do we not aggregate here and simplify
our implementation? Because aggregation precludes the ability to paginate. There is no other
reason.
"""
conditions = handle_search_filters(click_search_config, search_filters)
if len(conditions) > 1:
conditions = [Or(conditions)]
snuba_request = Request(
dataset="replays",
app_id="replay-backend-web",
query=Query(
match=Entity("replays"),
select=[
Function("identity", parameters=[Column("click_node_id")], alias="node_id"),
Column("timestamp"),
],
where=[
Condition(Column("project_id"), Op.EQ, project_id),
Condition(Column("timestamp"), Op.GTE, start),
Condition(Column("timestamp"), Op.LT, end),
Condition(Column("replay_id"), Op.EQ, replay_id),
Condition(Column("click_tag"), Op.NEQ, ""),
*conditions,
],
orderby=[OrderBy(Column("timestamp"), Direction.ASC)],
limit=Limit(limit),
offset=Offset(offset),
granularity=Granularity(3600),
),
tenant_ids={"organization_id": organization_id, "referrer": "replay-backend-web"},
)
return raw_snql_query(snuba_request, REFERRER)
# TODO: Can this be abstracted in someway so the override does not need to redefine the whole
# function?
#
# Identitical to the original handle_search_filters function except `And` operations are
# transformed into `Or` operations.
def handle_search_filters(
search_config: dict[str, FieldProtocol],
search_filters: Sequence[QueryToken],
) -> list[Condition]:
"""Convert search filters to snuba conditions."""
result: list[Condition] = []
look_back = None
for search_filter in search_filters:
# SearchFilters are transformed into Conditions and appended to the result set. If they
# are top level filters they are implicitly AND'ed in the WHERE/HAVING clause. Otherwise
# explicit operators are used.
if isinstance(search_filter, SearchFilter):
try:
condition = search_filter_to_condition(search_config, search_filter)
if condition is None:
raise ParseError(f"Unsupported search field: {search_filter.key.name}")
except OperatorNotSupported:
raise ParseError(f"Invalid operator specified for `{search_filter.key.name}`")
except CouldNotParseValue:
raise ParseError(f"Could not parse value for `{search_filter.key.name}`")
if look_back == "AND":
look_back = None
attempt_compressed_condition(result, condition, Or)
elif look_back == "OR":
look_back = None
attempt_compressed_condition(result, condition, Or)
else:
result.append(condition)
# ParenExpressions are recursively computed. If more than one condition is returned then
# those conditions are AND'ed.
elif isinstance(search_filter, ParenExpression):
conditions = handle_search_filters(search_config, search_filter.children)
if len(conditions) < 2:
result.extend(conditions)
else:
result.append(Or(conditions))
# String types are limited to AND and OR... I think? In the case where its not a valid
# look-back it is implicitly ignored.
elif isinstance(search_filter, str):
look_back = search_filter
return result
| ProjectReplayClicksIndexEndpoint |
python | spack__spack | var/spack/test_repos/spack_repo/tutorial/packages/mpich/package.py | {
"start": 239,
"end": 5524
} | class ____(AutotoolsPackage):
"""MPICH is a high performance and widely portable implementation of
the Message Passing Interface (MPI) standard."""
homepage = "http://www.mpich.org"
url = "http://www.mpich.org/static/downloads/3.0.4/mpich-3.0.4.tar.gz"
git = "https://github.com/pmodels/mpich.git"
list_url = "http://www.mpich.org/static/downloads/"
list_depth = 1
version("develop", submodules=True)
version("3.2.1", md5="e175452f4d61646a52c73031683fc375")
version("3.2", md5="f414cfa77099cd1fa1a5ae4e22db508a")
version("3.1.4", md5="2ab544607986486562e076b83937bba2")
version("3.1.3", md5="93cb17f91ac758cbf9174ecb03563778")
version("3.1.2", md5="7fbf4b81dcb74b07ae85939d1ceee7f1")
version("3.1.1", md5="40dc408b1e03cc36d80209baaa2d32b7")
version("3.1", md5="5643dd176499bfb7d25079aaff25f2ec")
version("3.0.4", md5="9c5d5d4fe1e17dd12153f40bc5b6dbc0")
variant("hydra", default=True, description="Build the hydra process manager")
variant("pmi", default=True, description="Build with PMI support")
variant("romio", default=True, description="Enable ROMIO MPI I/O implementation")
variant("verbs", default=False, description="Build support for OpenFabrics verbs.")
variant(
"device",
default="ch3",
description="""Abstract Device Interface (ADI)
implementation. The ch4 device is currently in experimental state""",
values=("ch3", "ch4"),
multi=False,
)
variant(
"netmod",
default="tcp",
description="""Network module. Only single netmod builds are
supported. For ch3 device configurations, this presumes the
ch3:nemesis communication channel. ch3:sock is not supported by this
spack package at this time.""",
values=("tcp", "mxm", "ofi", "ucx"),
multi=False,
)
provides("mpi")
provides("mpi@:3.0", when="@3:")
provides("mpi@:1.3", when="@1:")
filter_compiler_wrappers("mpicc", "mpicxx", "mpif77", "mpif90", "mpifort", relative_root="bin")
# fix MPI_Barrier segmentation fault
# see https://lists.mpich.org/pipermail/discuss/2016-May/004764.html
# and https://lists.mpich.org/pipermail/discuss/2016-June/004768.html
patch("mpich32_clang.patch", when="@=3.2%clang")
depends_on("findutils", type="build")
depends_on("libfabric", when="netmod=ofi")
conflicts("device=ch4", when="@:3.2")
conflicts("netmod=ofi", when="@:3.1.4")
conflicts("netmod=ucx", when="device=ch3")
conflicts("netmod=mxm", when="device=ch4")
conflicts("netmod=mxm", when="@:3.1.3")
conflicts("netmod=tcp", when="device=ch4")
def setup_dependent_build_environment(
self, env: EnvironmentModifications, dependent_spec: Spec
) -> None:
# TUTORIAL: set the following variables for dependents:
#
# MPICC=join_path(self.prefix.bin, 'mpicc')
# MPICXX=join_path(self.prefix.bin, 'mpic++')
# MPIF77=join_path(self.prefix.bin, 'mpif77')
# MPIF90=join_path(self.prefix.bin, 'mpif90')
# MPICH_CC=spack_cc
# MPICH_CXX=spack_cxx
# MPICH_F77=spack_f77
# MPICH_F90=spack_fc
# MPICH_FC=spack_fc
pass
def setup_dependent_package(self, module, dependent_spec):
self.spec.mpicc = join_path(self.prefix.bin, "mpicc")
self.spec.mpicxx = join_path(self.prefix.bin, "mpic++")
self.spec.mpifc = join_path(self.prefix.bin, "mpif90")
self.spec.mpif77 = join_path(self.prefix.bin, "mpif77")
def autoreconf(self, spec, prefix):
"""Not needed usually, configure should be already there"""
# If configure exists nothing needs to be done
if os.path.exists(self.configure_abs_path):
return
# Else bootstrap with autotools
bash = which("bash")
bash("./autogen.sh")
@run_before("autoreconf")
def die_without_fortran(self):
# Until we can pass variants such as +fortran through virtual
# dependencies depends_on('mpi'), require Fortran compiler to
# avoid delayed build errors in dependents.
if (self.compiler.f77 is None) or (self.compiler.fc is None):
raise InstallError("Mpich requires both C and Fortran compilers!")
def configure_args(self):
spec = self.spec
config_args = [
"--enable-shared",
"--with-pm={0}".format("hydra" if "+hydra" in spec else "no"),
"--with-pmi={0}".format("yes" if "+pmi" in spec else "no"),
"--{0}-romio".format("enable" if "+romio" in spec else "disable"),
"--{0}-ibverbs".format("with" if "+verbs" in spec else "without"),
]
# setup device configuration
device_config = ""
if "device=ch4" in spec:
device_config = "--with-device=ch4:"
elif "device=ch3" in spec:
device_config = "--with-device=ch3:nemesis:"
if "netmod=ucx" in spec:
device_config += "ucx"
elif "netmod=ofi" in spec:
device_config += "ofi"
elif "netmod=mxm" in spec:
device_config += "mxm"
elif "netmod=tcp" in spec:
device_config += "tcp"
config_args.append(device_config)
return config_args
| Mpich |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.