language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | scrapy__scrapy | tests/test_command_crawl.py | {
"start": 1877,
"end": 2721
} | class ____(scrapy.Spider):
name = 'myspider'
async def start(self):
self.logger.debug(
'FEEDS: {}'.format(
json.dumps(self.settings.getdict('FEEDS'), sort_keys=True)
)
)
return
yield
"""
j = proj_path / "example.json"
j.write_text("not empty", encoding="utf-8")
args = ["-O", "example.json"]
log = self.get_log(spider_code, proj_path, args=args)
assert (
'[myspider] DEBUG: FEEDS: {"example.json": {"format": "json", "overwrite": true}}'
in log
)
with j.open(encoding="utf-8") as f2:
first_line = f2.readline()
assert first_line != "not empty"
def test_output_and_overwrite_output(self, proj_path: Path) -> None:
spider_code = """
import scrapy
| MySpider |
python | getsentry__sentry | src/sentry/models/groupowner.py | {
"start": 3543,
"end": 9560
} | class ____(Model):
"""
Tracks the "owners" or "suggested assignees" of a group.
"""
__relocation_scope__ = RelocationScope.Excluded
group = FlexibleForeignKey("sentry.Group", db_constraint=False)
project = FlexibleForeignKey("sentry.Project", db_constraint=False)
organization = FlexibleForeignKey("sentry.Organization", db_constraint=False)
type = models.PositiveSmallIntegerField(
choices=(
(GroupOwnerType.SUSPECT_COMMIT, "Suspect Commit"),
(GroupOwnerType.OWNERSHIP_RULE, "Ownership Rule"),
(GroupOwnerType.CODEOWNERS, "Codeowners"),
)
)
context = LegacyTextJSONField(null=True)
user_id = HybridCloudForeignKey(settings.AUTH_USER_MODEL, on_delete="CASCADE", null=True)
team = FlexibleForeignKey("sentry.Team", null=True)
date_added = models.DateTimeField(default=timezone.now)
objects: ClassVar[GroupOwnerManager] = GroupOwnerManager()
class Meta:
app_label = "sentry"
db_table = "sentry_groupowner"
indexes = [
models.Index(
F("type"),
Cast(
KeyTextTransform(
"commitId",
Cast(F("context"), models.JSONField()),
),
BigIntegerField(),
),
name="groupowner_type_json_commitid",
),
]
def save(self, *args, **kwargs):
keys = [k for k in (self.user_id, self.team_id) if k is not None]
assert len(keys) != 2, "Must have team or user or neither, not both"
super().save(*args, **kwargs)
def owner_id(self):
if self.user_id:
return f"user:{self.user_id}"
if self.team_id:
return f"team:{self.team_id}"
if not self.user_id and not self.team_id:
return None
raise NotImplementedError("Unknown Owner")
def owner(self):
from sentry.types.actor import Actor
if not self.owner_id():
return None
return Actor.from_identifier(self.owner_id())
@classmethod
def get_autoassigned_owner(cls, group_id, project_id, autoassignment_types):
"""
Non-cached read access to find the autoassigned GroupOwner.
"""
# Ordered by date_added as well to ensure that the first GroupOwner is returned
# Multiple GroupOwners can be created but they are created in the correct evaluation order, so the first one takes precedence
issue_owner = (
cls.objects.filter(
group_id=group_id, project_id=project_id, type__in=autoassignment_types
)
.exclude(user_id__isnull=True, team_id__isnull=True)
.order_by("type", "date_added")
.first()
)
# should return False if no owner
if issue_owner is None:
return False
return issue_owner
@classmethod
def invalidate_debounce_issue_owners_evaluation_cache(cls, project_id, group_id=None):
"""
If `group_id` is provided, clear the debounce issue owners cache for that group, else clear
the cache of all groups for a project that had an event within the
ISSUE_OWNERS_DEBOUNCE_DURATION window.
"""
if group_id:
cache.delete(ISSUE_OWNERS_DEBOUNCE_KEY(group_id))
return
# Get all the groups for a project that had an event within the ISSUE_OWNERS_DEBOUNCE_DURATION window.
# Any groups without events in that window would have expired their TTL in the cache.
queryset = Group.objects.filter(
project_id=project_id,
last_seen__gte=timezone.now() - timedelta(seconds=ISSUE_OWNERS_DEBOUNCE_DURATION),
).values_list("id", flat=True)
# Run cache invalidation in batches
group_id_iter = queryset.iterator(chunk_size=1000)
while True:
group_ids = list(itertools.islice(group_id_iter, 1000))
if not group_ids:
break
cache_keys = [ISSUE_OWNERS_DEBOUNCE_KEY(group_id) for group_id in group_ids]
cache.delete_many(cache_keys)
@classmethod
def invalidate_assignee_exists_cache(cls, project_id, group_id=None):
"""
If `group_id` is provided, clear the assignee exists cache for that group, else
clear the cache of all groups for a project that had an event within the
ASSIGNEE_EXISTS_DURATION window.
"""
if group_id:
cache.delete(ASSIGNEE_EXISTS_KEY(group_id))
return
# Get all the groups for a project that had an event within the ASSIGNEE_EXISTS_DURATION window.
# Any groups without events in that window would have expired their TTL in the cache.
queryset = Group.objects.filter(
project_id=project_id,
last_seen__gte=timezone.now() - timedelta(seconds=ASSIGNEE_EXISTS_DURATION),
).values_list("id", flat=True)
# Run cache invalidation in batches
group_id_iter = queryset.iterator(chunk_size=1000)
while True:
group_ids = list(itertools.islice(group_id_iter, 1000))
if not group_ids:
break
cache_keys = [ASSIGNEE_EXISTS_KEY(group_id) for group_id in group_ids]
cache.delete_many(cache_keys)
def get_owner_details(group_list: Sequence[Group]) -> dict[int, list[OwnersSerialized]]:
group_ids = [g.id for g in group_list]
group_owners = GroupOwner.objects.filter(group__in=group_ids).exclude(
user_id__isnull=True, team_id__isnull=True
)
owner_details = defaultdict(list)
for go in group_owners:
owner_details[go.group_id].append(
OwnersSerialized(
type=GROUP_OWNER_TYPE[GroupOwnerType(go.type)],
owner=go.owner().identifier,
date_added=go.date_added,
),
)
return owner_details
| GroupOwner |
python | kamyu104__LeetCode-Solutions | Python/repeated-dna-sequences.py | {
"start": 49,
"end": 888
} | class ____(object):
def findRepeatedDnaSequences(self, s):
"""
:type s: str
:rtype: List[str]
"""
dict, rolling_hash, res = {}, 0, []
for i in xrange(len(s)):
rolling_hash = ((rolling_hash << 3) & 0x3fffffff) | (ord(s[i]) & 7)
if rolling_hash not in dict:
dict[rolling_hash] = True
elif dict[rolling_hash]:
res.append(s[i - 9: i + 1])
dict[rolling_hash] = False
return res
def findRepeatedDnaSequences2(self, s):
"""
:type s: str
:rtype: List[str]
"""
l, r = [], []
if len(s) < 10: return []
for i in range(len(s) - 9):
l.extend([s[i:i + 10]])
return [k for k, v in collections.Counter(l).items() if v > 1]
| Solution |
python | google__pytype | pytype/datatypes.py | {
"start": 334,
"end": 3479
} | class ____:
r"""A disjoint-set data structure for `AliasingDict`.
This is used to record the alias information for `AliasingDict`. It is
consist of different components. Each component will contain the names
that represent the same thing.
E.g., for a five-node component/tree, the representative for all the
nodes in the component is `T`:
T [T] The root node and representative
/ \ [U] Its parent is `T`
U V [V] Its parent is `T`
/ \ [W] Its parent is `V`
W X [X] Its parent is `V`
For performance consideration, we will compress the path each time when
we compute the representative of a node. E.g., if we try to get the
representative of node `W`, then the above tree will become:
T
/|\
U W V
\
X
Attributes:
name2id: mapping all names to unique id.
parent: the parent id of current unique id.
rank: the height of the tree for corresponding component, it is an
optimization to merge two components.
id2name: mapping unique id to corresponding names, the reverse map of
`name2id`.
latest_id: the maximal allocated id.
"""
def __init__(self):
self.name2id = {}
self.parent = []
self.rank = []
self.id2name = []
self.latest_id = 0
def merge_from(self, uf):
"""Merge a UnionFind into the current one."""
for i, name in enumerate(uf.id2name):
self.merge(name, uf.id2name[uf.parent[i]])
def find_by_name(self, name):
"""Find the representative of a component represented by given name."""
key = self._get_or_add_id(name)
return self.id2name[self._find(key)]
def merge(self, name1, name2):
"""Merge two components represented by the given names."""
key1 = self._get_or_add_id(name1)
key2 = self._get_or_add_id(name2)
self._merge(key1, key2)
return self.find_by_name(name1)
def _get_or_add_id(self, name):
if name not in self.name2id:
self.name2id[name] = self.latest_id
self.parent.append(self.latest_id)
self.rank.append(1)
self.id2name.append(name)
self.latest_id += 1
return self.name2id[name]
def _find(self, key):
"""Find the tree root."""
assert self.latest_id > key
res = key
if self.parent[key] != key:
res = self._find(self.parent[key])
# Compress/Optimize the search path
self.parent[key] = res
return res
def _merge(self, k1, k2):
"""Merge two components."""
assert self.latest_id > k1 and self.latest_id > k2
s1 = self._find(k1)
s2 = self._find(k2)
if s1 != s2:
if self.rank[s1] > self.rank[s2]:
self.parent[s2] = s1
elif self.rank[s1] < self.rank[s2]:
self.parent[s1] = s2
else:
self.parent[s1] = s2
self.rank[s2] += 1
def __repr__(self):
comps = []
used = set()
for x in self.id2name:
if x not in used:
comp = []
for y in self.id2name:
if self.find_by_name(x) == self.find_by_name(y):
used.add(y)
comp.append(y)
comps.append(comp)
return repr(comps)
| UnionFind |
python | marshmallow-code__marshmallow | tests/test_decorators.py | {
"start": 30022,
"end": 30090
} | class ____:
def __init__(self, foo):
self.foo = foo
| Nested |
python | pytorch__pytorch | torch/package/_mangling.py | {
"start": 113,
"end": 1892
} | class ____:
"""
Used on import, to ensure that all modules imported have a shared mangle parent.
"""
def __init__(self) -> None:
global _mangle_index
self._mangle_index = _mangle_index
# Increment the global index
_mangle_index += 1
# Angle brackets are used so that there is almost no chance of
# confusing this module for a real module. Plus, it is Python's
# preferred way of denoting special modules.
self._mangle_parent = f"<torch_package_{self._mangle_index}>"
def mangle(self, name) -> str:
assert len(name) != 0
return self._mangle_parent + "." + name
def demangle(self, mangled: str) -> str:
"""
Note: This only demangles names that were mangled by this specific
PackageMangler. It will pass through names created by a different
PackageMangler instance.
"""
if mangled.startswith(self._mangle_parent + "."):
return mangled.partition(".")[2]
# wasn't a mangled name
return mangled
def parent_name(self):
return self._mangle_parent
def is_mangled(name: str) -> bool:
return bool(re.match(r"<torch_package_\d+>", name))
def demangle(name: str) -> str:
"""
Note: Unlike PackageMangler.demangle, this version works on any
mangled name, irrespective of which PackageMangler created it.
"""
if is_mangled(name):
_first, sep, last = name.partition(".")
# If there is only a base mangle prefix, e.g. '<torch_package_0>',
# then return an empty string.
return last if len(sep) != 0 else ""
return name
def get_mangle_prefix(name: str) -> str:
return name.partition(".")[0] if is_mangled(name) else name
| PackageMangler |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_django/DJ008.py | {
"start": 3827,
"end": 4083
} | class ____(models.Model):
"""Model with type-annotated abstract = True using regular Meta - should not trigger DJ008"""
new_field = models.CharField(max_length=10)
class Meta:
abstract: ClassVar[bool] = True
| TypeAnnotatedAbstractModel2 |
python | dagster-io__dagster | python_modules/libraries/dagster-aws/dagster_aws/ecs/tasks.py | {
"start": 15341,
"end": 18443
} | class ____(
NamedTuple("_CurrentEcsTaskMetadata", [("cluster", str), ("task_arn", str)])
):
pass
def get_current_ecs_task_metadata() -> CurrentEcsTaskMetadata:
task_metadata_uri = _container_metadata_uri() + "/task" # pyright: ignore[reportOptionalOperand]
response = requests.get(task_metadata_uri).json()
cluster = response.get("Cluster")
task_arn = response.get("TaskARN")
return CurrentEcsTaskMetadata(cluster=cluster, task_arn=task_arn)
def _container_metadata_uri():
"""Get the metadata uri for the current ECS task.
ECS injects an environment variable into each Fargate task. The value
of this environment variable is a url that can be queried to introspect
information about the current processes's running task:
https://docs.aws.amazon.com/AmazonECS/latest/userguide/task-metadata-endpoint-v4-fargate.html
"""
return os.environ.get("ECS_CONTAINER_METADATA_URI_V4")
def current_ecs_container_name():
return requests.get(_container_metadata_uri()).json()["Name"] # pyright: ignore[reportArgumentType]
def get_current_ecs_task(ecs, task_arn, cluster):
def describe_task_or_raise(task_arn, cluster):
try:
return ecs.describe_tasks(tasks=[task_arn], cluster=cluster)["tasks"][0]
except IndexError:
raise EcsNoTasksFound
try:
task = backoff(
describe_task_or_raise,
retry_on=(EcsNoTasksFound,),
kwargs={"task_arn": task_arn, "cluster": cluster},
max_retries=BACKOFF_RETRIES,
)
except EcsNoTasksFound:
raise EcsEventualConsistencyTimeout
return task
def get_task_kwargs_from_current_task(
ec2,
cluster,
task,
):
run_task_kwargs = {"cluster": cluster}
if not task.get("capacityProviderStrategy"):
run_task_kwargs["launchType"] = task.get("launchType") or "FARGATE"
else:
run_task_kwargs["capacityProviderStrategy"] = task.get("capacityProviderStrategy")
if run_task_kwargs["launchType"] != "EXTERNAL":
enis = []
subnets = []
for attachment in task["attachments"]:
if attachment["type"] == "ElasticNetworkInterface":
for detail in attachment["details"]:
if detail["name"] == "subnetId":
subnets.append(detail["value"])
if detail["name"] == "networkInterfaceId":
enis.append(ec2.NetworkInterface(detail["value"]))
public_ip = False
security_groups = []
for eni in enis:
if (eni.association_attribute or {}).get("PublicIp"):
public_ip = True
for group in eni.groups:
security_groups.append(group["GroupId"])
aws_vpc_config = {
"subnets": subnets,
"assignPublicIp": "ENABLED" if public_ip else "DISABLED",
"securityGroups": security_groups,
}
run_task_kwargs["networkConfiguration"] = {"awsvpcConfiguration": aws_vpc_config}
return run_task_kwargs
| CurrentEcsTaskMetadata |
python | spack__spack | lib/spack/spack/vendor/jinja2/nodes.py | {
"start": 23639,
"end": 24164
} | class ____(_FilterTestCommon):
"""Apply a filter to an expression. ``name`` is the name of the
filter, the other fields are the same as :class:`Call`.
If ``node`` is ``None``, the filter is being used in a filter block
and is applied to the content of the block.
"""
node: t.Optional[Expr] # type: ignore
def as_const(self, eval_ctx: t.Optional[EvalContext] = None) -> t.Any:
if self.node is None:
raise Impossible()
return super().as_const(eval_ctx=eval_ctx)
| Filter |
python | sphinx-doc__sphinx | tests/test_util/test_util_typing.py | {
"start": 1675,
"end": 1756
} | class ____(Enum):
a = 1
T = TypeVar('T')
MyInt = NewType('MyInt', int)
| MyEnum |
python | ray-project__ray | python/ray/data/tests/test_transform_pyarrow.py | {
"start": 28608,
"end": 107282
} | class ____:
pass
def _create_dataset(op, data):
ds = ray.data.range(2, override_num_blocks=2)
if op == "map":
def map(x):
return {
"id": x["id"],
"my_data": data[x["id"]],
}
ds = ds.map(map)
else:
assert op == "map_batches"
def map_batches(x):
row_id = x["id"][0]
return {
"id": x["id"],
"my_data": [data[row_id]],
}
ds = ds.map_batches(map_batches, batch_size=None)
# Needed for the map_batches case to trigger the error,
# because the error happens when merging the blocks.
ds = ds.map_batches(lambda x: x, batch_size=2)
return ds
@pytest.mark.skipif(
_object_extension_type_allowed(), reason="Arrow table supports pickled objects"
)
@pytest.mark.parametrize(
"op, data",
[
("map", [UnsupportedType(), 1]),
("map_batches", [None, 1]),
("map_batches", [{"a": 1}, {"a": 2}]),
],
)
def test_fallback_to_pandas_on_incompatible_data(
op,
data,
ray_start_regular_shared,
):
# Test if the first UDF output is incompatible with Arrow,
# Ray Data will fall back to using Pandas.
ds = _create_dataset(op, data)
ds = ds.materialize()
bundles = ds.iter_internal_ref_bundles()
block = ray.get(next(bundles).block_refs[0])
assert isinstance(block, pd.DataFrame)
_PYARROW_SUPPORTS_TYPE_PROMOTION = (
get_pyarrow_version() >= MIN_PYARROW_VERSION_TYPE_PROMOTION
)
@pytest.mark.parametrize(
"op, data, should_fail, expected_type",
[
# Case A: Upon serializing to Arrow fallback to `ArrowPythonObjectType`
("map_batches", [1, 2**100], False, ArrowPythonObjectType()),
("map_batches", [1.0, 2**100], False, ArrowPythonObjectType()),
("map_batches", ["1.0", 2**100], False, ArrowPythonObjectType()),
# Case B: No fallback to `ArrowPythonObjectType`, but type promotion allows
# int to be promoted to a double
(
"map_batches",
[1.0, 2**4],
not _PYARROW_SUPPORTS_TYPE_PROMOTION,
pa.float64(),
),
# Case C: No fallback to `ArrowPythonObjectType` and no type promotion possible
("map_batches", ["1.0", 2**4], True, None),
],
)
def test_pyarrow_conversion_error_handling(
ray_start_regular_shared,
op,
data,
should_fail: bool,
expected_type: pa.DataType,
):
# Ray Data infers the block type (arrow or pandas) and the block schema
# based on the first *block* produced by UDF.
#
# These tests simulate following scenarios
# 1. (Case A) Type of the value of the first block is deduced as Arrow scalar
# type, but second block carries value that overflows pa.int64 representation,
# and column henceforth will be serialized as `ArrowPythonObjectExtensionType`
# coercing first block to it as well
# 2. (Case B) Both blocks carry proper Arrow scalars which, however, have
# diverging types and therefore Arrow fails during merging of these blocks
# into 1
ds = _create_dataset(op, data)
if should_fail:
with pytest.raises(Exception) as e:
ds.materialize()
error_msg = str(e.value)
expected_msg = "ArrowConversionError: Error converting data to Arrow:"
assert expected_msg in error_msg
assert "my_data" in error_msg
else:
ds.materialize()
assert ds.schema().base_schema == pa.schema(
[pa.field("id", pa.int64()), pa.field("my_data", expected_type)]
)
assert ds.take_all() == [
{"id": i, "my_data": data[i]} for i in range(len(data))
]
def test_mixed_tensor_types_same_dtype(
mixed_tensor_types_same_dtype_blocks, mixed_tensor_types_same_dtype_expected
):
"""Test mixed tensor types with same data type but different shapes."""
t1, t2 = mixed_tensor_types_same_dtype_blocks
t3 = concat([t1, t2])
assert isinstance(t3, pa.Table)
assert len(t3) == mixed_tensor_types_same_dtype_expected["length"]
# Verify schema - should have tensor field as variable-shaped
assert t3.schema == mixed_tensor_types_same_dtype_expected["schema"]
tensor_field = t3.schema.field("tensor")
assert isinstance(tensor_field.type, ArrowVariableShapedTensorType)
# Verify content
result_tensors = t3.column("tensor").to_pylist()
assert len(result_tensors) == mixed_tensor_types_same_dtype_expected["length"]
expected_tensors = mixed_tensor_types_same_dtype_expected["tensor_values"]
# Verify each tensor
for i, (result_tensor, expected_tensor) in enumerate(
zip(result_tensors, expected_tensors)
):
assert isinstance(result_tensor, np.ndarray)
assert result_tensor.shape == expected_tensor.shape
assert result_tensor.dtype == expected_tensor.dtype
np.testing.assert_array_equal(result_tensor, expected_tensor)
def test_mixed_tensor_types_fixed_shape_different(
mixed_tensor_types_fixed_shape_blocks, mixed_tensor_types_fixed_shape_expected
):
"""Test mixed tensor types with different fixed shapes."""
t1, t2 = mixed_tensor_types_fixed_shape_blocks
t3 = concat([t1, t2])
assert isinstance(t3, pa.Table)
assert len(t3) == mixed_tensor_types_fixed_shape_expected["length"]
# Verify schema - should have tensor field as variable-shaped
assert t3.schema == mixed_tensor_types_fixed_shape_expected["schema"]
tensor_field = t3.schema.field("tensor")
assert isinstance(tensor_field.type, ArrowVariableShapedTensorType)
# Verify content
result_tensors = t3.column("tensor").to_pylist()
assert len(result_tensors) == mixed_tensor_types_fixed_shape_expected["length"]
expected_tensors = mixed_tensor_types_fixed_shape_expected["tensor_values"]
# Verify each tensor
for i, (result_tensor, expected_tensor) in enumerate(
zip(result_tensors, expected_tensors)
):
assert isinstance(result_tensor, np.ndarray)
assert result_tensor.shape == expected_tensor.shape
assert result_tensor.dtype == expected_tensor.dtype
np.testing.assert_array_equal(result_tensor, expected_tensor)
def test_mixed_tensor_types_variable_shaped(
mixed_tensor_types_variable_shaped_blocks,
mixed_tensor_types_variable_shaped_expected,
):
"""Test mixed tensor types with variable-shaped tensors."""
t1, t2 = mixed_tensor_types_variable_shaped_blocks
t3 = concat([t1, t2])
assert isinstance(t3, pa.Table)
assert len(t3) == mixed_tensor_types_variable_shaped_expected["length"]
# Verify schema - should have tensor field as variable-shaped
assert t3.schema == mixed_tensor_types_variable_shaped_expected["schema"]
tensor_field = t3.schema.field("tensor")
assert isinstance(tensor_field.type, ArrowVariableShapedTensorType)
# Verify content
result_tensors = t3.column("tensor").to_pylist()
assert len(result_tensors) == mixed_tensor_types_variable_shaped_expected["length"]
expected_tensors = mixed_tensor_types_variable_shaped_expected["tensor_values"]
# Verify each tensor
for i, (result_tensor, expected_tensor) in enumerate(
zip(result_tensors, expected_tensors)
):
assert isinstance(result_tensor, np.ndarray)
assert result_tensor.shape == expected_tensor.shape
assert result_tensor.dtype == expected_tensor.dtype
np.testing.assert_array_equal(result_tensor, expected_tensor)
@pytest.mark.skipif(
not _extension_array_concat_supported(),
reason="ExtensionArrays support concatenation only in Pyarrow >= 12.0",
)
def test_mixed_tensor_types_in_struct(
struct_with_mixed_tensor_types_blocks, struct_with_mixed_tensor_types_expected
):
"""Test that the fix works for mixed tensor types in structs."""
t1, t2 = struct_with_mixed_tensor_types_blocks
# This should work with our fix
t3 = concat([t1, t2])
assert isinstance(t3, pa.Table)
assert len(t3) == struct_with_mixed_tensor_types_expected["length"]
# Verify the result has the expected structure
assert t3.schema == struct_with_mixed_tensor_types_expected["schema"]
assert "id" in t3.column_names
assert "struct" in t3.column_names
# Verify struct field contains both types of tensors
struct_data = t3.column("struct").to_pylist()
assert len(struct_data) == struct_with_mixed_tensor_types_expected["length"]
expected_struct_values = struct_with_mixed_tensor_types_expected["struct_values"]
# Verify struct values
for i, (struct_row, expected_values) in enumerate(
zip(struct_data, expected_struct_values)
):
for key, expected_value in expected_values.items():
assert struct_row[key] == expected_value
@pytest.mark.skipif(
not _extension_array_concat_supported(),
reason="ExtensionArrays support concatenation only in Pyarrow >= 12.0",
)
def test_nested_struct_with_mixed_tensor_types(
nested_struct_with_mixed_tensor_types_blocks,
nested_struct_with_mixed_tensor_types_expected,
):
"""Test nested structs with mixed tensor types at different levels."""
t1, t2 = nested_struct_with_mixed_tensor_types_blocks
t3 = concat([t1, t2])
assert isinstance(t3, pa.Table)
assert len(t3) == nested_struct_with_mixed_tensor_types_expected["length"]
# Verify the result has the expected structure
assert t3.schema == nested_struct_with_mixed_tensor_types_expected["schema"]
assert "id" in t3.column_names
assert "complex_struct" in t3.column_names
# Verify nested struct field contains both types of tensors
struct_data = t3.column("complex_struct").to_pylist()
assert len(struct_data) == nested_struct_with_mixed_tensor_types_expected["length"]
expected_fields = nested_struct_with_mixed_tensor_types_expected["expected_fields"]
# Check that nested structures are preserved
for field in expected_fields:
if field in ["nested", "outer_tensor", "outer_value"]:
assert field in struct_data[0]
elif field in ["inner_tensor", "inner_value"]:
assert field in struct_data[0]["nested"]
@pytest.mark.skipif(
not _extension_array_concat_supported(),
reason="ExtensionArrays support concatenation only in Pyarrow >= 12.0",
)
def test_multiple_tensor_fields_in_struct(
multiple_tensor_fields_struct_blocks, multiple_tensor_fields_struct_expected
):
"""Test structs with multiple tensor fields of different types."""
t1, t2 = multiple_tensor_fields_struct_blocks
t3 = concat([t1, t2])
assert isinstance(t3, pa.Table)
assert len(t3) == multiple_tensor_fields_struct_expected["length"]
# Verify the result has the expected structure
assert t3.schema == multiple_tensor_fields_struct_expected["schema"]
assert "id" in t3.column_names
assert "multi_tensor_struct" in t3.column_names
# Verify struct field contains both types of tensors
struct_data = t3.column("multi_tensor_struct").to_pylist()
assert len(struct_data) == multiple_tensor_fields_struct_expected["length"]
expected_fields = multiple_tensor_fields_struct_expected["expected_fields"]
# Check that all tensor fields are present
for row in struct_data:
for field in expected_fields:
assert field in row
def test_struct_with_incompatible_tensor_dtypes_fails():
"""Test that concatenating structs with incompatible tensor dtypes fails gracefully."""
# Block 1: Struct with float32 fixed-shape tensor
tensor_data1 = np.ones((2, 2), dtype=np.float32)
# Block 2: Struct with int64 variable-shaped tensor (different dtype)
tensor_data2 = np.array(
[
np.ones((3, 3), dtype=np.int64),
np.zeros((1, 4), dtype=np.int64),
],
dtype=object,
)
t1, t2 = _create_struct_tensor_blocks(
tensor_data1, tensor_data2, "fixed", "variable"
)
# This should fail because of incompatible tensor dtypes
with pytest.raises(
ArrowConversionError,
match=re.escape(
"Can't unify tensor types with divergent scalar types: [ArrowTensorTypeV2(shape=(2,), dtype=float), ArrowVariableShapedTensorType(ndim=2, dtype=int64)]"
),
):
concat([t1, t2])
@pytest.mark.skipif(
not _extension_array_concat_supported(),
reason="ExtensionArrays support concatenation only in Pyarrow >= 12.0",
)
def test_struct_with_additional_fields(
struct_with_additional_fields_blocks, struct_with_additional_fields_expected
):
"""Test structs where some blocks have additional fields."""
t1, t2 = struct_with_additional_fields_blocks
t3 = concat([t1, t2])
assert isinstance(t3, pa.Table)
assert len(t3) == struct_with_additional_fields_expected["length"]
# Verify the result has the expected structure
assert t3.schema == struct_with_additional_fields_expected["schema"]
assert "id" in t3.column_names
assert "struct" in t3.column_names
# Verify struct field contains both types of tensors
struct_data = t3.column("struct").to_pylist()
assert len(struct_data) == struct_with_additional_fields_expected["length"]
field_presence = struct_with_additional_fields_expected["field_presence"]
extra_values = struct_with_additional_fields_expected["extra_values"]
# Check field presence and values
for i, row in enumerate(struct_data):
for field, should_be_present in field_presence.items():
assert (field in row) == should_be_present
# Check extra field values
if "extra" in row:
assert row["extra"] == extra_values[i]
@pytest.mark.skipif(
not _extension_array_concat_supported(),
reason="ExtensionArrays support concatenation only in Pyarrow >= 12.0",
)
def test_struct_with_null_tensor_values(
struct_with_null_tensor_values_blocks, struct_with_null_tensor_values_expected
):
"""Test structs where some fields are missing and get filled with nulls."""
t1, t2 = struct_with_null_tensor_values_blocks
t3 = concat([t1, t2])
assert isinstance(t3, pa.Table)
assert len(t3) == struct_with_null_tensor_values_expected["length"]
# Validate schema - should have both fields
assert t3.schema == struct_with_null_tensor_values_expected["schema"]
# Validate result
assert t3.column("id").to_pylist() == struct_with_null_tensor_values_expected["ids"]
# Check the struct column directly to avoid the Arrow tensor extension null bug
struct_column = t3.column("struct")
expected_values = struct_with_null_tensor_values_expected["values"]
expected_tensor_validity = struct_with_null_tensor_values_expected[
"tensor_validity"
]
# Check each row
for i, (expected_value, expected_valid) in enumerate(
zip(expected_values, expected_tensor_validity)
):
assert struct_column[i]["value"].as_py() == expected_value
if expected_valid:
assert struct_column[i]["tensor"] is not None
else:
# Check that the tensor field is null by checking its validity
tensor_field = struct_column[i]["tensor"]
assert tensor_field.is_valid is False
# Test fixtures for _align_struct_fields tests
@pytest.fixture
def simple_struct_blocks():
"""Fixture for simple struct blocks with missing fields."""
# Block 1: Struct with fields 'a' and 'b'
struct_data1 = [{"a": 1, "b": "x"}, {"a": 2, "b": "y"}]
# Block 2: Struct with fields 'a' and 'c' (missing 'b', has 'c')
struct_data2 = [{"a": 3, "c": True}, {"a": 4, "c": False}]
return _create_basic_struct_blocks(
struct_data1, struct_data2, id_data1=None, id_data2=None
)
@pytest.fixture
def simple_struct_schema():
"""Fixture for simple struct schema with all fields."""
struct_fields = [("a", pa.int64()), ("b", pa.string()), ("c", pa.bool_())]
return _create_struct_schema(struct_fields, include_id=False)
@pytest.fixture
def nested_struct_blocks():
"""Fixture for nested struct blocks with missing fields."""
# Block 1: Nested struct with inner fields 'x' and 'y'
struct_data1 = [{"inner": {"x": 1, "y": "a"}}, {"inner": {"x": 2, "y": "b"}}]
# Block 2: Nested struct with inner fields 'x' and 'z' (missing 'y', has 'z')
struct_data2 = [{"inner": {"x": 3, "z": 1.5}}, {"inner": {"x": 4, "z": 2.5}}]
return _create_basic_struct_blocks(
struct_data1, struct_data2, column_name="outer", id_data1=None, id_data2=None
)
@pytest.fixture
def nested_struct_schema():
"""Fixture for nested struct schema with all fields."""
inner_fields = [("x", pa.int64()), ("y", pa.string()), ("z", pa.float64())]
struct_fields = [("inner", pa.struct(inner_fields))]
return _create_struct_schema(
struct_fields,
include_id=False,
other_fields=[("outer", pa.struct(struct_fields))],
)
@pytest.fixture
def missing_column_blocks():
"""Fixture for blocks where one is missing a struct column entirely."""
# Block 1: Has struct column
t1 = pa.table(
{
"struct": pa.array([{"a": 1, "b": "x"}, {"a": 2, "b": "y"}]),
"other": pa.array([10, 20]),
}
)
# Block 2: Missing struct column entirely
t2 = pa.table({"other": pa.array([30, 40])})
return t1, t2
@pytest.fixture
def missing_column_schema():
"""Fixture for schema with struct column that may be missing."""
return pa.schema(
[
("struct", pa.struct([("a", pa.int64()), ("b", pa.string())])),
("other", pa.int64()),
]
)
@pytest.fixture
def multiple_struct_blocks():
"""Fixture for blocks with multiple struct columns."""
# Block 1: Two struct columns with different field sets
struct1_data1 = [{"a": 1, "b": "x"}, {"a": 2, "b": "y"}]
struct2_data1 = [{"p": 10, "q": True}, {"p": 20, "q": False}]
# Block 2: Same struct columns but with different/missing fields
struct1_data2 = [{"a": 3, "c": 1.5}, {"a": 4, "c": 2.5}] # missing 'b', has 'c'
struct2_data2 = [
{"p": 30, "r": "alpha"},
{"p": 40, "r": "beta"},
] # missing 'q', has 'r'
t1 = pa.table(
{
"struct1": pa.array(struct1_data1),
"struct2": pa.array(struct2_data1),
}
)
t2 = pa.table(
{
"struct1": pa.array(struct1_data2),
"struct2": pa.array(struct2_data2),
}
)
return t1, t2
@pytest.fixture
def multiple_struct_schema():
"""Fixture for schema with multiple struct columns."""
struct1_fields = [("a", pa.int64()), ("b", pa.string()), ("c", pa.float64())]
struct2_fields = [("p", pa.int64()), ("q", pa.bool_()), ("r", pa.string())]
return pa.schema(
[
("struct1", pa.struct(struct1_fields)),
("struct2", pa.struct(struct2_fields)),
]
)
@pytest.fixture
def mixed_column_blocks():
"""Fixture for blocks with mix of struct and non-struct columns."""
# Block 1: Mix of struct and non-struct columns
struct_data1 = [{"a": 1, "b": "x"}, {"a": 2, "b": "y"}]
int_col1 = [10, 20]
string_col1 = ["foo", "bar"]
# Block 2: Same structure
struct_data2 = [{"a": 3, "c": True}, {"a": 4, "c": False}] # missing 'b', has 'c'
int_col2 = [30, 40]
string_col2 = ["baz", "qux"]
t1 = pa.table(
{
"struct": pa.array(struct_data1),
"int_col": pa.array(int_col1),
"string_col": pa.array(string_col1),
}
)
t2 = pa.table(
{
"struct": pa.array(struct_data2),
"int_col": pa.array(int_col2),
"string_col": pa.array(string_col2),
}
)
return t1, t2
@pytest.fixture
def mixed_column_schema():
"""Fixture for schema with mix of struct and non-struct columns."""
struct_fields = [("a", pa.int64()), ("b", pa.string()), ("c", pa.bool_())]
return pa.schema(
[
("struct", pa.struct(struct_fields)),
("int_col", pa.int64()),
("string_col", pa.string()),
]
)
@pytest.fixture
def empty_block_blocks():
"""Fixture for blocks where one is empty."""
# Empty block
empty_struct_type = pa.struct([("a", pa.int64()), ("b", pa.string())])
t1 = pa.table({"struct": pa.array([], type=empty_struct_type)})
# Non-empty block
struct_data2 = [{"a": 1, "c": True}, {"a": 2, "c": False}] # missing 'b', has 'c'
t2 = pa.table({"struct": pa.array(struct_data2)})
return t1, t2
@pytest.fixture
def empty_block_schema():
"""Fixture for schema used with empty blocks."""
struct_fields = [("a", pa.int64()), ("b", pa.string()), ("c", pa.bool_())]
return _create_struct_schema(struct_fields, include_id=False)
@pytest.fixture
def already_aligned_blocks():
"""Fixture for blocks that are already aligned."""
# Both blocks have identical schemas
struct_data1 = [{"a": 1, "b": "x"}, {"a": 2, "b": "y"}]
struct_data2 = [{"a": 3, "b": "z"}, {"a": 4, "b": "w"}]
return _create_basic_struct_blocks(
struct_data1, struct_data2, id_data1=None, id_data2=None
)
@pytest.fixture
def already_aligned_schema():
"""Fixture for schema used with already aligned blocks."""
struct_fields = [("a", pa.int64()), ("b", pa.string())]
return _create_struct_schema(struct_fields, include_id=False)
@pytest.fixture
def no_struct_blocks():
"""Fixture for blocks with no struct columns."""
# Blocks with no struct columns
int_col1 = [1, 2]
string_col1 = ["a", "b"]
int_col2 = [3, 4]
string_col2 = ["c", "d"]
t1 = pa.table({"int_col": pa.array(int_col1), "string_col": pa.array(string_col1)})
t2 = pa.table({"int_col": pa.array(int_col2), "string_col": pa.array(string_col2)})
return t1, t2
@pytest.fixture
def no_struct_schema():
"""Fixture for schema with no struct columns."""
return pa.schema([("int_col", pa.int64()), ("string_col", pa.string())])
@pytest.fixture
def deep_nesting_blocks():
"""Fixture for blocks with deeply nested structs."""
# Block 1: Deeply nested struct
struct_data1 = [
{"level2": {"level3": {"a": 1, "b": "x"}}},
{"level2": {"level3": {"a": 2, "b": "y"}}},
]
# Block 2: Same structure but missing some fields
struct_data2 = [
{"level2": {"level3": {"a": 3, "c": True}}}, # missing 'b', has 'c'
{"level2": {"level3": {"a": 4, "c": False}}},
]
return _create_basic_struct_blocks(
struct_data1, struct_data2, column_name="level1", id_data1=None, id_data2=None
)
@pytest.fixture
def deep_nesting_schema():
"""Fixture for schema with deeply nested structs."""
level3_fields = [("a", pa.int64()), ("b", pa.string()), ("c", pa.bool_())]
level2_fields = [("level3", pa.struct(level3_fields))]
level1_fields = [("level2", pa.struct(level2_fields))]
return pa.schema([("level1", pa.struct(level1_fields))])
def test_align_struct_fields_simple(simple_struct_blocks, simple_struct_schema):
"""Test basic struct field alignment with missing fields."""
t1, t2 = simple_struct_blocks
aligned_blocks = _align_struct_fields([t1, t2], simple_struct_schema)
assert len(aligned_blocks) == 2
# Check first block - should have 'c' field filled with None
result1 = aligned_blocks[0]
assert result1.schema == simple_struct_schema
assert result1["struct"].to_pylist() == [
{"a": 1, "b": "x", "c": None},
{"a": 2, "b": "y", "c": None},
]
# Check second block - should have 'b' field filled with None
result2 = aligned_blocks[1]
assert result2.schema == simple_struct_schema
assert result2["struct"].to_pylist() == [
{"a": 3, "b": None, "c": True},
{"a": 4, "b": None, "c": False},
]
def test_align_struct_fields_nested(nested_struct_blocks, nested_struct_schema):
"""Test nested struct field alignment."""
t1, t2 = nested_struct_blocks
aligned_blocks = _align_struct_fields([t1, t2], nested_struct_schema)
assert len(aligned_blocks) == 2
# Check first block - should have 'z' field filled with None
result1 = aligned_blocks[0]
assert result1.schema == nested_struct_schema
assert result1["outer"].to_pylist() == [
{"inner": {"x": 1, "y": "a", "z": None}},
{"inner": {"x": 2, "y": "b", "z": None}},
]
# Check second block - should have 'y' field filled with None
result2 = aligned_blocks[1]
assert result2.schema == nested_struct_schema
assert result2["outer"].to_pylist() == [
{"inner": {"x": 3, "y": None, "z": 1.5}},
{"inner": {"x": 4, "y": None, "z": 2.5}},
]
def test_align_struct_fields_missing_column(
missing_column_blocks, missing_column_schema
):
"""Test alignment when a struct column is missing from some blocks."""
t1, t2 = missing_column_blocks
aligned_blocks = _align_struct_fields([t1, t2], missing_column_schema)
assert len(aligned_blocks) == 2
# Check first block - should be unchanged
result1 = aligned_blocks[0]
assert result1.schema == missing_column_schema
assert result1["struct"].to_pylist() == [{"a": 1, "b": "x"}, {"a": 2, "b": "y"}]
assert result1["other"].to_pylist() == [10, 20]
# Check second block - should have null struct column
result2 = aligned_blocks[1]
assert result2.schema == missing_column_schema
assert result2["struct"].to_pylist() == [None, None]
assert result2["other"].to_pylist() == [30, 40]
def test_align_struct_fields_multiple_structs(
multiple_struct_blocks, multiple_struct_schema
):
"""Test alignment with multiple struct columns."""
t1, t2 = multiple_struct_blocks
aligned_blocks = _align_struct_fields([t1, t2], multiple_struct_schema)
assert len(aligned_blocks) == 2
# Check first block
result1 = aligned_blocks[0]
assert result1.schema == multiple_struct_schema
assert result1["struct1"].to_pylist() == [
{"a": 1, "b": "x", "c": None},
{"a": 2, "b": "y", "c": None},
]
assert result1["struct2"].to_pylist() == [
{"p": 10, "q": True, "r": None},
{"p": 20, "q": False, "r": None},
]
# Check second block
result2 = aligned_blocks[1]
assert result2.schema == multiple_struct_schema
assert result2["struct1"].to_pylist() == [
{"a": 3, "b": None, "c": 1.5},
{"a": 4, "b": None, "c": 2.5},
]
assert result2["struct2"].to_pylist() == [
{"p": 30, "q": None, "r": "alpha"},
{"p": 40, "q": None, "r": "beta"},
]
def test_align_struct_fields_non_struct_columns(
mixed_column_blocks, mixed_column_schema
):
"""Test that non-struct columns are left unchanged."""
t1, t2 = mixed_column_blocks
aligned_blocks = _align_struct_fields([t1, t2], mixed_column_schema)
assert len(aligned_blocks) == 2
# Check that non-struct columns are unchanged
for i, block in enumerate(aligned_blocks):
assert block["int_col"].to_pylist() == [10 + i * 20, 20 + i * 20]
assert (
block["string_col"].to_pylist() == ["foo", "bar"]
if i == 0
else ["baz", "qux"]
)
def test_align_struct_fields_empty_blocks(empty_block_blocks, empty_block_schema):
"""Test alignment with empty blocks."""
t1, t2 = empty_block_blocks
aligned_blocks = _align_struct_fields([t1, t2], empty_block_schema)
assert len(aligned_blocks) == 2
# Check empty block
result1 = aligned_blocks[0]
assert result1.schema == empty_block_schema
assert len(result1) == 0
# Check non-empty block
result2 = aligned_blocks[1]
assert result2.schema == empty_block_schema
assert result2["struct"].to_pylist() == [
{"a": 1, "b": None, "c": True},
{"a": 2, "b": None, "c": False},
]
def test_align_struct_fields_already_aligned(
already_aligned_blocks, already_aligned_schema
):
"""Test that already aligned blocks are returned unchanged."""
t1, t2 = already_aligned_blocks
aligned_blocks = _align_struct_fields([t1, t2], already_aligned_schema)
# Should return the original blocks unchanged
assert aligned_blocks == [t1, t2]
def test_align_struct_fields_no_struct_columns(no_struct_blocks, no_struct_schema):
"""Test alignment when there are no struct columns in the schema."""
t1, t2 = no_struct_blocks
aligned_blocks = _align_struct_fields([t1, t2], no_struct_schema)
# Should return the original blocks unchanged
assert aligned_blocks == [t1, t2]
def test_align_struct_fields_deep_nesting(deep_nesting_blocks, deep_nesting_schema):
"""Test alignment with deeply nested structs."""
t1, t2 = deep_nesting_blocks
aligned_blocks = _align_struct_fields([t1, t2], deep_nesting_schema)
assert len(aligned_blocks) == 2
# Check first block - should have 'c' field filled with None
result1 = aligned_blocks[0]
assert result1.schema == deep_nesting_schema
assert result1["level1"].to_pylist() == [
{"level2": {"level3": {"a": 1, "b": "x", "c": None}}},
{"level2": {"level3": {"a": 2, "b": "y", "c": None}}},
]
# Check second block - should have 'b' field filled with None
result2 = aligned_blocks[1]
assert result2.schema == deep_nesting_schema
assert result2["level1"].to_pylist() == [
{"level2": {"level3": {"a": 3, "b": None, "c": True}}},
{"level2": {"level3": {"a": 4, "b": None, "c": False}}},
]
# Test fixtures for tensor-related tests
@pytest.fixture
def uniform_tensor_blocks():
"""Fixture for uniform tensor blocks with same shape."""
# Block 1: Fixed shape tensors (2x2)
a1 = np.arange(12).reshape((3, 2, 2))
t1 = pa.table({"a": ArrowTensorArray.from_numpy(a1)})
# Block 2: Fixed shape tensors (2x2)
a2 = np.arange(12, 24).reshape((3, 2, 2))
t2 = pa.table({"a": ArrowTensorArray.from_numpy(a2)})
return t1, t2
@pytest.fixture
def uniform_tensor_expected():
"""Fixture for expected results from uniform tensor concatenation."""
if DataContext.get_current().use_arrow_tensor_v2:
tensor_type = ArrowTensorTypeV2
else:
tensor_type = ArrowTensorType
expected_schema = pa.schema([("a", tensor_type((2, 2), pa.int64()))])
expected_length = 6
expected_chunks = 2
# Expected content
a1 = np.arange(12).reshape((3, 2, 2))
a2 = np.arange(12, 24).reshape((3, 2, 2))
return {
"schema": expected_schema,
"length": expected_length,
"chunks": expected_chunks,
"content": [a1, a2],
}
@pytest.fixture
def variable_shaped_tensor_blocks():
"""Fixture for variable-shaped tensor blocks."""
# Block 1: Variable shape tensors
a1 = np.array(
[np.arange(4).reshape((2, 2)), np.arange(4, 13).reshape((3, 3))], dtype=object
)
t1 = pa.table({"a": ArrowTensorArray.from_numpy(a1)})
# Block 2: Variable shape tensors
a2 = np.array(
[np.arange(4).reshape((2, 2)), np.arange(4, 13).reshape((3, 3))], dtype=object
)
t2 = pa.table({"a": ArrowTensorArray.from_numpy(a2)})
return t1, t2
@pytest.fixture
def variable_shaped_tensor_expected():
"""Fixture for expected results from variable-shaped tensor concatenation."""
expected_schema = pa.schema([("a", ArrowVariableShapedTensorType(pa.int64(), 2))])
expected_length = 4
expected_chunks = 2
# Expected content
a1 = np.array(
[np.arange(4).reshape((2, 2)), np.arange(4, 13).reshape((3, 3))], dtype=object
)
a2 = np.array(
[np.arange(4).reshape((2, 2)), np.arange(4, 13).reshape((3, 3))], dtype=object
)
return {
"schema": expected_schema,
"length": expected_length,
"chunks": expected_chunks,
"content": [a1, a2],
}
@pytest.fixture
def mixed_tensor_blocks():
"""Fixture for mixed fixed-shape and variable-shaped tensor blocks."""
# Block 1: Fixed shape tensors
a1 = np.arange(12).reshape((3, 2, 2))
t1 = pa.table({"a": ArrowTensorArray.from_numpy(a1)})
# Block 2: Variable shape tensors
a2 = np.array(
[np.arange(4).reshape((2, 2)), np.arange(4, 13).reshape((3, 3))], dtype=object
)
t2 = pa.table({"a": ArrowTensorArray.from_numpy(a2)})
return t1, t2
@pytest.fixture
def mixed_tensor_expected():
"""Fixture for expected results from mixed tensor concatenation."""
expected_schema = pa.schema([("a", ArrowVariableShapedTensorType(pa.int64(), 2))])
expected_length = 5
expected_chunks = 2
# Expected content
a1 = np.arange(12).reshape((3, 2, 2))
a2 = np.array(
[np.arange(4).reshape((2, 2)), np.arange(4, 13).reshape((3, 3))], dtype=object
)
return {
"schema": expected_schema,
"length": expected_length,
"chunks": expected_chunks,
"content": [a1, a2],
}
@pytest.fixture
def different_shape_tensor_blocks():
"""Fixture for tensor blocks with different fixed shapes."""
# Block 1: Fixed shape tensors (2x2)
a1 = np.arange(12).reshape((3, 2, 2))
t1 = pa.table({"a": ArrowTensorArray.from_numpy(a1)})
# Block 2: Fixed shape tensors (3x3)
a2 = np.arange(12, 39).reshape((3, 3, 3))
t2 = pa.table({"a": ArrowTensorArray.from_numpy(a2)})
return t1, t2
@pytest.fixture
def different_shape_tensor_expected():
"""Fixture for expected results from different shape tensor concatenation."""
expected_schema = pa.schema([("a", ArrowVariableShapedTensorType(pa.int64(), 2))])
expected_length = 6
expected_chunks = 2
# Expected content
a1 = np.arange(12).reshape((3, 2, 2))
a2 = np.arange(12, 39).reshape((3, 3, 3))
return {
"schema": expected_schema,
"length": expected_length,
"chunks": expected_chunks,
"content": [a1, a2],
}
@pytest.fixture
def mixed_tensor_types_same_dtype_blocks():
"""Fixture for mixed tensor types with same dtype but different shapes."""
# Block 1: Fixed shape tensors with float32
tensor_data1 = np.ones((2, 2), dtype=np.float32)
# Block 2: Variable shape tensors with float32
tensor_data2 = np.array(
[
np.ones((3, 3), dtype=np.float32),
np.zeros((1, 4), dtype=np.float32),
],
dtype=object,
)
return _create_tensor_blocks(tensor_data1, tensor_data2, "fixed", "variable")
@pytest.fixture
def mixed_tensor_types_same_dtype_expected():
"""Fixture for expected results from mixed tensor types with same dtype."""
expected_schema = _create_tensor_schema(struct_name="tensor")
expected_tensors = [
# First 2 were converted to var-shaped with their shape expanded
# with singleton axis: from (2,) to (1, 2)
np.ones((1, 2), dtype=np.float32),
np.ones((1, 2), dtype=np.float32),
# Last 2 were left intact
np.ones((3, 3), dtype=np.float32),
np.zeros((1, 4), dtype=np.float32),
]
return _create_expected_result(expected_schema, 4, tensor_values=expected_tensors)
@pytest.fixture
def mixed_tensor_types_fixed_shape_blocks():
"""Fixture for mixed tensor types with different fixed shapes."""
# Block 1: Fixed shape tensors (2x2)
tensor_data1 = np.ones((2, 2), dtype=np.float32)
# Block 2: Fixed shape tensors (3x3)
tensor_data2 = np.zeros((3, 3), dtype=np.float32)
return _create_tensor_blocks(
tensor_data1, tensor_data2, "fixed", "fixed", id_data2=[3, 4, 5]
)
@pytest.fixture
def mixed_tensor_types_fixed_shape_expected():
"""Fixture for expected results from mixed tensor types with different fixed shapes."""
expected_schema = _create_tensor_schema(struct_name="tensor", ndim=1)
expected_tensors = [
np.ones((2,), dtype=np.float32), # First 2 converted to variable-shaped
np.ones((2,), dtype=np.float32),
np.zeros((3,), dtype=np.float32), # Last 3 variable-shaped
np.zeros((3,), dtype=np.float32),
np.zeros((3,), dtype=np.float32),
]
return _create_expected_result(expected_schema, 5, tensor_values=expected_tensors)
@pytest.fixture
def mixed_tensor_types_variable_shaped_blocks():
"""Fixture for mixed tensor types with variable-shaped tensors."""
# Block 1: Variable shape tensors
tensor_data1 = np.array(
[
np.ones((2, 2), dtype=np.float32),
np.zeros((3, 3), dtype=np.float32),
],
dtype=object,
)
# Block 2: Variable shape tensors with different shapes
tensor_data2 = np.array(
[
np.ones((1, 4), dtype=np.float32),
np.zeros((2, 1), dtype=np.float32),
],
dtype=object,
)
return _create_tensor_blocks(tensor_data1, tensor_data2, "variable", "variable")
@pytest.fixture
def mixed_tensor_types_variable_shaped_expected():
"""Fixture for expected results from mixed variable-shaped tensor types."""
expected_schema = _create_tensor_schema(struct_name="tensor")
expected_tensors = [
np.ones((2, 2), dtype=np.float32),
np.zeros((3, 3), dtype=np.float32),
np.ones((1, 4), dtype=np.float32),
np.zeros((2, 1), dtype=np.float32),
]
return _create_expected_result(expected_schema, 4, tensor_values=expected_tensors)
@pytest.fixture
def struct_with_mixed_tensor_types_blocks():
"""Fixture for struct blocks with mixed tensor types."""
# Block 1: Struct with fixed-shape tensor
tensor_data1 = np.ones((2, 2), dtype=np.float32)
# Block 2: Struct with variable-shaped tensor
tensor_data2 = np.array(
[
np.ones((3, 3), dtype=np.float32),
np.zeros((1, 4), dtype=np.float32),
],
dtype=object,
)
return _create_struct_tensor_blocks(tensor_data1, tensor_data2, "fixed", "variable")
@pytest.fixture
def struct_with_mixed_tensor_types_expected():
"""Fixture for expected results from struct with mixed tensor types."""
expected_schema = _create_tensor_schema(struct_name="struct")
expected_struct_values = [
{"value": 1}, # First two from fixed-shape tensor struct
{"value": 2},
{"value": 3}, # Last two from variable-shaped tensor struct
{"value": 4},
]
return _create_expected_result(
expected_schema, 4, struct_values=expected_struct_values
)
@pytest.fixture
def nested_struct_with_mixed_tensor_types_blocks():
"""Fixture for nested struct blocks with mixed tensor types."""
# Block 1: Nested struct with fixed-shape tensors
tensor_data1 = np.ones((2, 2), dtype=np.float32)
tensor_array1 = _create_tensor_array(tensor_data1, "fixed")
inner_struct1 = pa.StructArray.from_arrays(
[tensor_array1, pa.array([10, 20], type=pa.int64())],
names=["inner_tensor", "inner_value"],
)
outer_tensor1 = _create_tensor_array(np.zeros((2, 1), dtype=np.float32), "fixed")
outer_struct1 = pa.StructArray.from_arrays(
[inner_struct1, outer_tensor1, pa.array([1, 2], type=pa.int64())],
names=["nested", "outer_tensor", "outer_value"],
)
t1 = pa.table({"id": [1, 2], "complex_struct": outer_struct1})
# Block 2: Nested struct with variable-shaped tensors
tensor_data2 = np.array(
[
np.ones((3, 3), dtype=np.float32),
np.zeros((1, 4), dtype=np.float32),
],
dtype=object,
)
tensor_array2 = _create_tensor_array(tensor_data2, "variable")
inner_struct2 = pa.StructArray.from_arrays(
[tensor_array2, pa.array([30, 40], type=pa.int64())],
names=["inner_tensor", "inner_value"],
)
outer_tensor2 = _create_tensor_array(
np.array(
[np.ones((2, 2), dtype=np.float32), np.zeros((1, 3), dtype=np.float32)],
dtype=object,
),
"variable",
)
outer_struct2 = pa.StructArray.from_arrays(
[inner_struct2, outer_tensor2, pa.array([3, 4], type=pa.int64())],
names=["nested", "outer_tensor", "outer_value"],
)
t2 = pa.table({"id": [3, 4], "complex_struct": outer_struct2})
return t1, t2
@pytest.fixture
def nested_struct_with_mixed_tensor_types_expected():
"""Fixture for expected results from nested struct with mixed tensor types."""
expected_schema = pa.schema(
[
("id", pa.int64()),
(
"complex_struct",
pa.struct(
[
(
"nested",
pa.struct(
[
(
"inner_tensor",
ArrowVariableShapedTensorType(pa.float32(), 2),
),
("inner_value", pa.int64()),
]
),
),
(
"outer_tensor",
ArrowVariableShapedTensorType(pa.float32(), 2),
),
("outer_value", pa.int64()),
]
),
),
]
)
expected_fields = [
"nested",
"outer_tensor",
"outer_value",
"inner_tensor",
"inner_value",
]
return _create_expected_result(expected_schema, 4, expected_fields=expected_fields)
@pytest.fixture
def multiple_tensor_fields_struct_blocks():
"""Fixture for struct blocks with multiple tensor fields."""
# Block 1: Struct with multiple fixed-shape tensors
tensor1_data = np.ones((2, 2), dtype=np.float32)
tensor1_array = _create_tensor_array(tensor1_data, "fixed")
tensor2_data = np.zeros((2, 3), dtype=np.int32)
tensor2_array = _create_tensor_array(tensor2_data, "fixed")
struct_array1 = pa.StructArray.from_arrays(
[tensor1_array, tensor2_array, pa.array([1, 2], type=pa.int64())],
names=["tensor1", "tensor2", "value"],
)
t1 = pa.table({"id": [1, 2], "multi_tensor_struct": struct_array1})
# Block 2: Struct with multiple variable-shaped tensors
tensor1_data2 = np.array(
[
np.ones((3, 3), dtype=np.float32),
np.zeros((1, 4), dtype=np.float32),
],
dtype=object,
)
tensor1_array2 = _create_tensor_array(tensor1_data2, "variable")
tensor2_data2 = np.array(
[
np.ones((2, 2), dtype=np.int32),
np.zeros((3, 1), dtype=np.int32),
],
dtype=object,
)
tensor2_array2 = _create_tensor_array(tensor2_data2, "variable")
struct_array2 = pa.StructArray.from_arrays(
[tensor1_array2, tensor2_array2, pa.array([3, 4], type=pa.int64())],
names=["tensor1", "tensor2", "value"],
)
t2 = pa.table({"id": [3, 4], "multi_tensor_struct": struct_array2})
return t1, t2
@pytest.fixture
def multiple_tensor_fields_struct_expected():
"""Fixture for expected results from struct with multiple tensor fields."""
expected_schema = pa.schema(
[
("id", pa.int64()),
(
"multi_tensor_struct",
pa.struct(
[
("tensor1", ArrowVariableShapedTensorType(pa.float32(), 2)),
("tensor2", ArrowVariableShapedTensorType(pa.int32(), 2)),
("value", pa.int64()),
]
),
),
]
)
expected_fields = ["tensor1", "tensor2", "value"]
return _create_expected_result(expected_schema, 4, expected_fields=expected_fields)
@pytest.fixture
def struct_with_additional_fields_blocks():
"""Fixture for struct blocks where some have additional fields."""
# Block 1: Struct with tensor field and basic fields
tensor_data1 = np.ones((2, 2), dtype=np.float32)
# Block 2: Struct with tensor field and additional fields
tensor_data2 = np.array(
[
np.ones((3, 3), dtype=np.float32),
np.zeros((1, 4), dtype=np.float32),
],
dtype=object,
)
return _create_struct_tensor_blocks(
tensor_data1, tensor_data2, "fixed", "variable", extra_data2=["a", "b"]
)
@pytest.fixture
def struct_with_additional_fields_expected():
"""Fixture for expected results from struct with additional fields."""
expected_schema = _create_tensor_schema(struct_name="struct", include_extra=True)
expected_field_presence = {"tensor": True, "value": True, "extra": True}
expected_extra_values = [None, None, "a", "b"]
return _create_expected_result(
expected_schema,
4,
field_presence=expected_field_presence,
extra_values=expected_extra_values,
)
@pytest.fixture
def struct_with_null_tensor_values_blocks():
"""Fixture for struct blocks where some fields are missing and get filled with nulls."""
# Block 1: Struct with tensor and value fields
tensor_data1 = np.ones((2, 2), dtype=np.float32)
tensor_array1 = ArrowTensorArray.from_numpy(tensor_data1)
value_array1 = pa.array([1, 2], type=pa.int64())
struct_array1 = pa.StructArray.from_arrays(
[tensor_array1, value_array1], names=["tensor", "value"]
)
t1 = pa.table({"id": [1, 2], "struct": struct_array1})
# Block 2: Struct with only value field (missing tensor field)
value_array2 = pa.array([3], type=pa.int64())
struct_array2 = pa.StructArray.from_arrays([value_array2], names=["value"])
t2 = pa.table({"id": [3], "struct": struct_array2})
return t1, t2
@pytest.fixture
def struct_with_null_tensor_values_expected():
"""Fixture for expected results from struct with null tensor values."""
expected_schema = pa.schema(
[
("id", pa.int64()),
(
"struct",
pa.struct(
[
("tensor", ArrowTensorTypeV2((2,), pa.float32())),
("value", pa.int64()),
]
),
),
]
)
expected_length = 3
expected_ids = [1, 2, 3]
# Expected value field values
expected_values = [1, 2, 3]
# Expected tensor field validity
expected_tensor_validity = [True, True, False]
return {
"schema": expected_schema,
"length": expected_length,
"ids": expected_ids,
"values": expected_values,
"tensor_validity": expected_tensor_validity,
}
@pytest.fixture
def basic_concat_blocks():
"""Fixture for basic concat test data."""
t1 = pa.table({"a": [1, 2], "b": [5, 6]})
t2 = pa.table({"a": [3, 4], "b": [7, 8]})
return [t1, t2]
@pytest.fixture
def basic_concat_expected():
"""Fixture for basic concat expected results."""
return {
"length": 4,
"column_names": ["a", "b"],
"schema_types": [pa.int64(), pa.int64()],
"chunks": 2,
"content": {"a": [1, 2, 3, 4], "b": [5, 6, 7, 8]},
}
@pytest.fixture
def null_promotion_blocks():
"""Fixture for null promotion test data."""
t1 = pa.table({"a": [None, None], "b": [5, 6]})
t2 = pa.table({"a": [3, 4], "b": [None, None]})
return [t1, t2]
@pytest.fixture
def null_promotion_expected():
"""Fixture for null promotion expected results."""
return {
"length": 4,
"column_names": ["a", "b"],
"schema_types": [pa.int64(), pa.int64()],
"chunks": 2,
"content": {"a": [None, None, 3, 4], "b": [5, 6, None, None]},
}
@pytest.fixture
def struct_different_field_names_blocks():
"""Fixture for struct with different field names test data."""
struct_data1 = [{"x": 1, "y": "a"}, {"x": 2, "y": "b"}]
struct_data2 = [{"x": 3, "z": "c"}]
struct_type1 = pa.struct([("x", pa.int32()), ("y", pa.string())])
struct_type2 = pa.struct([("x", pa.int32()), ("z", pa.string())])
additional_columns1 = {"a": [1, 2]}
additional_columns2 = {"a": [3]}
return _create_struct_blocks_with_columns(
struct_data1,
struct_data2,
struct_type1,
struct_type2,
additional_columns1,
additional_columns2,
)
@pytest.fixture
def struct_different_field_names_expected():
"""Fixture for struct with different field names expected results."""
field_names = ["x", "y", "z"]
field_types = [pa.int32(), pa.string(), pa.string()]
additional_fields = [("a", pa.int64())]
schema = _create_simple_struct_schema(field_names, field_types, additional_fields)
content = {
"a": [1, 2, 3],
"d": [
{"x": 1, "y": "a", "z": None},
{"x": 2, "y": "b", "z": None},
{"x": 3, "y": None, "z": "c"},
],
}
return _create_struct_expected_result(schema, 3, content)
@pytest.fixture
def nested_structs_blocks():
"""Fixture for nested structs test data."""
t1 = pa.table(
{
"a": [1],
"d": pa.array(
[
{
"x": {
"y": {"p": 1}, # Missing "q"
"z": {"m": 3}, # Missing "n"
},
"w": 5,
}
],
type=pa.struct(
[
(
"x",
pa.struct(
[
(
"y",
pa.struct([("p", pa.int32())]), # Only "p"
),
(
"z",
pa.struct([("m", pa.int32())]), # Only "m"
),
]
),
),
("w", pa.int32()),
]
),
),
}
)
t2 = pa.table(
{
"a": [2],
"d": pa.array(
[
{
"x": {
"y": {"q": 7}, # Missing "p"
"z": {"n": 9}, # Missing "m"
},
"w": 10,
}
],
type=pa.struct(
[
(
"x",
pa.struct(
[
(
"y",
pa.struct([("q", pa.int32())]), # Only "q"
),
(
"z",
pa.struct([("n", pa.int32())]), # Only "n"
),
]
),
),
("w", pa.int32()),
]
),
),
}
)
return [t1, t2]
@pytest.fixture
def nested_structs_expected():
"""Fixture for nested structs expected results."""
return {
"length": 2,
"schema": pa.schema(
[
("a", pa.int64()),
(
"d",
pa.struct(
[
(
"x",
pa.struct(
[
(
"y",
pa.struct(
[("p", pa.int32()), ("q", pa.int32())]
),
),
(
"z",
pa.struct(
[("m", pa.int32()), ("n", pa.int32())]
),
),
]
),
),
("w", pa.int32()),
]
),
),
]
),
"content": {
"a": [1, 2],
"d": [
{
"x": {
"y": {"p": 1, "q": None}, # Missing "q" filled with None
"z": {"m": 3, "n": None}, # Missing "n" filled with None
},
"w": 5,
},
{
"x": {
"y": {"p": None, "q": 7}, # Missing "p" filled with None
"z": {"m": None, "n": 9}, # Missing "m" filled with None
},
"w": 10,
},
],
},
}
@pytest.fixture
def struct_null_values_blocks():
"""Fixture for struct with null values test data."""
struct_data1 = [{"x": 1, "y": "a"}, None] # Second row is null
struct_data2 = [None] # Entire struct is null
field_names = ["x", "y"]
field_types = [pa.int32(), pa.string()]
additional_columns1 = {"a": [1, 2]}
additional_columns2 = {"a": [3]}
return _create_simple_struct_blocks(
struct_data1,
struct_data2,
field_names,
field_types,
additional_columns1,
additional_columns2,
)
@pytest.fixture
def struct_null_values_expected():
"""Fixture for struct with null values expected results."""
field_names = ["x", "y"]
field_types = [pa.int32(), pa.string()]
additional_fields = [("a", pa.int64())]
schema = _create_simple_struct_schema(field_names, field_types, additional_fields)
content = {
"a": [1, 2, 3],
"d": [
{"x": 1, "y": "a"},
None, # Entire struct is None, not {"x": None, "y": None}
None, # Entire struct is None, not {"x": None, "y": None}
],
}
return _create_struct_expected_result(schema, 3, content)
@pytest.fixture
def struct_mismatched_lengths_blocks():
"""Fixture for struct with mismatched lengths test data."""
struct_data1 = [{"x": 1, "y": "a"}, {"x": 2, "y": "b"}]
struct_data2 = [{"x": 3, "y": "c"}]
field_names = ["x", "y"]
field_types = [pa.int32(), pa.string()]
additional_columns1 = {"a": [1, 2]}
additional_columns2 = {"a": [3]}
return _create_simple_struct_blocks(
struct_data1,
struct_data2,
field_names,
field_types,
additional_columns1,
additional_columns2,
)
@pytest.fixture
def struct_mismatched_lengths_expected():
"""Fixture for struct with mismatched lengths expected results."""
field_names = ["x", "y"]
field_types = [pa.int32(), pa.string()]
additional_fields = [("a", pa.int64())]
schema = _create_simple_struct_schema(field_names, field_types, additional_fields)
content = {
"a": [1, 2, 3],
"d": [
{"x": 1, "y": "a"},
{"x": 2, "y": "b"},
{"x": 3, "y": "c"},
],
}
return _create_struct_expected_result(schema, 3, content)
@pytest.fixture
def struct_empty_arrays_blocks():
"""Fixture for struct with empty arrays test data."""
struct_data1 = [{"x": 1, "y": "a"}, {"x": 2, "y": "b"}]
# Define the second table with null struct value (empty arrays for fields)
x_array = pa.array([None], type=pa.int32())
y_array = pa.array([None], type=pa.string())
# Create a struct array from null field arrays
null_struct_array = pa.StructArray.from_arrays(
[x_array, y_array],
["x", "y"],
mask=pa.array([True]),
)
t1 = pa.table(
{
"a": [1, 2],
"d": pa.array(
struct_data1, type=pa.struct([("x", pa.int32()), ("y", pa.string())])
),
}
)
t2 = pa.table({"a": [3], "d": null_struct_array})
return [t1, t2]
@pytest.fixture
def struct_empty_arrays_expected():
"""Fixture for struct with empty arrays expected results."""
field_names = ["x", "y"]
field_types = [pa.int32(), pa.string()]
additional_fields = [("a", pa.int64())]
schema = _create_simple_struct_schema(field_names, field_types, additional_fields)
content = {
"a": [1, 2, 3],
"d": [
{"x": 1, "y": "a"},
{"x": 2, "y": "b"},
None, # Entire struct is None, as PyArrow handles it
],
}
return _create_struct_expected_result(schema, 3, content)
@pytest.fixture
def unify_schemas_basic_schemas():
"""Fixture for basic unify schemas test data."""
tensor_arr_1 = pa.schema([("tensor_arr", ArrowTensorType((3, 5), pa.int32()))])
tensor_arr_2 = pa.schema([("tensor_arr", ArrowTensorType((2, 1), pa.int32()))])
tensor_arr_3 = pa.schema([("tensor_arr", ArrowTensorType((3, 5), pa.int32()))])
var_tensor_arr = pa.schema(
[
("tensor_arr", ArrowVariableShapedTensorType(pa.int32(), 2)),
]
)
var_tensor_arr_1d = pa.schema(
[
("tensor_arr", ArrowVariableShapedTensorType(pa.int32(), 1)),
]
)
var_tensor_arr_3d = pa.schema(
[
("tensor_arr", ArrowVariableShapedTensorType(pa.int32(), 3)),
]
)
return {
"tensor_arr_1": tensor_arr_1,
"tensor_arr_2": tensor_arr_2,
"tensor_arr_3": tensor_arr_3,
"var_tensor_arr": var_tensor_arr,
"var_tensor_arr_1d": var_tensor_arr_1d,
"var_tensor_arr_3d": var_tensor_arr_3d,
}
@pytest.fixture
def unify_schemas_multicol_schemas():
"""Fixture for multi-column unify schemas test data."""
multicol_schema_1 = pa.schema(
[
("col_int", pa.int32()),
("col_fixed_tensor", ArrowTensorType((4, 2), pa.int32())),
("col_var_tensor", ArrowVariableShapedTensorType(pa.int16(), 5)),
]
)
multicol_schema_2 = pa.schema(
[
("col_int", pa.int32()),
("col_fixed_tensor", ArrowTensorType((4, 2), pa.int32())),
("col_var_tensor", ArrowTensorType((9, 4, 1, 0, 5), pa.int16())),
]
)
multicol_schema_3 = pa.schema(
[
("col_int", pa.int32()),
("col_fixed_tensor", ArrowVariableShapedTensorType(pa.int32(), 3)),
("col_var_tensor", ArrowVariableShapedTensorType(pa.int16(), 5)),
]
)
return {
"multicol_schema_1": multicol_schema_1,
"multicol_schema_2": multicol_schema_2,
"multicol_schema_3": multicol_schema_3,
}
@pytest.fixture
def object_concat_blocks():
"""Fixture for object concat test data."""
obj = types.SimpleNamespace(a=1, b="test")
t1 = pa.table({"a": [3, 4], "b": [7, 8]})
t2 = pa.table({"a": ArrowPythonObjectArray.from_objects([obj, obj]), "b": [0, 1]})
return [t1, t2]
@pytest.fixture
def object_concat_expected():
"""Fixture for object concat expected results."""
obj = types.SimpleNamespace(a=1, b="test")
return {
"length": 4,
"a_type": ArrowPythonObjectType,
"b_type": pa.types.is_integer,
"content": {"a": [3, 4, obj, obj], "b": [7, 8, 0, 1]},
}
@pytest.fixture
def struct_variable_shaped_tensor_blocks():
"""Fixture for struct with variable shaped tensor test data."""
# Create variable-shaped tensor data for the first table
tensor_data1 = np.array(
[
np.ones((2, 2), dtype=np.float32),
np.zeros((3, 3), dtype=np.float32),
],
dtype=object,
)
tensor_array1 = ArrowVariableShapedTensorArray.from_numpy(tensor_data1)
# Create struct data with tensor field for the first table
metadata_array1 = pa.array(["row1", "row2"])
struct_array1 = pa.StructArray.from_arrays(
[metadata_array1, tensor_array1], names=["metadata", "tensor"]
)
t1 = pa.table({"id": [1, 2], "struct_with_tensor": struct_array1})
# Create variable-shaped tensor data for the second table
tensor_data2 = np.array(
[
np.ones((1, 4), dtype=np.float32),
np.zeros((2, 1), dtype=np.float32),
],
dtype=object,
)
tensor_array2 = ArrowVariableShapedTensorArray.from_numpy(tensor_data2)
# Create struct data with tensor field for the second table
metadata_array2 = pa.array(["row3", "row4"])
struct_array2 = pa.StructArray.from_arrays(
[metadata_array2, tensor_array2], names=["metadata", "tensor"]
)
t2 = pa.table({"id": [3, 4], "struct_with_tensor": struct_array2})
return [t1, t2]
@pytest.fixture
def struct_variable_shaped_tensor_expected():
"""Fixture for struct with variable shaped tensor expected results."""
return {
"length": 4,
"schema": pa.schema(
[
("id", pa.int64()),
(
"struct_with_tensor",
pa.struct(
[
("metadata", pa.string()),
("tensor", ArrowVariableShapedTensorType(pa.float32(), 2)),
]
),
),
]
),
"content": {"id": [1, 2, 3, 4]},
}
@pytest.fixture
def unify_schemas_object_types_schemas():
"""Fixture for object types unify schemas test data."""
from ray.air.util.object_extensions.arrow import ArrowPythonObjectType
schema1 = pa.schema([("obj_col", ArrowPythonObjectType())])
schema2 = pa.schema([("obj_col", pa.int32())])
schema3 = pa.schema([("obj_col", pa.float64())])
expected = pa.schema([("obj_col", ArrowPythonObjectType())])
return {
"object_schema": schema1,
"int_schema": schema2,
"float_schema": schema3,
"expected": expected,
}
@pytest.fixture
def unify_schemas_incompatible_tensor_schemas():
"""Fixture for incompatible tensor dtypes unify schemas test data."""
schema1 = pa.schema([("tensor", ArrowTensorType((2, 2), pa.int32()))])
schema2 = pa.schema([("tensor", ArrowTensorType((2, 2), pa.float32()))])
return [schema1, schema2]
@pytest.fixture
def unify_schemas_objects_and_tensors_schemas():
"""Fixture for objects and tensors unify schemas test data."""
from ray.air.util.object_extensions.arrow import ArrowPythonObjectType
schema1 = pa.schema([("col", ArrowPythonObjectType())])
schema2 = pa.schema([("col", ArrowTensorType((2, 2), pa.int32()))])
return [schema1, schema2]
@pytest.fixture
def unify_schemas_missing_tensor_fields_schemas():
"""Fixture for missing tensor fields unify schemas test data."""
schema1 = pa.schema(
[
(
"struct",
pa.struct(
[
("tensor", ArrowTensorType((2, 2), pa.int32())),
("value", pa.int64()),
]
),
)
]
)
schema2 = pa.schema(
[("struct", pa.struct([("value", pa.int64())]))] # Missing tensor field
)
expected = pa.schema(
[
(
"struct",
pa.struct(
[
("tensor", ArrowTensorType((2, 2), pa.int32())),
("value", pa.int64()),
]
),
)
]
)
return {"with_tensor": schema1, "without_tensor": schema2, "expected": expected}
@pytest.fixture
def unify_schemas_nested_struct_tensors_schemas():
"""Fixture for nested struct tensors unify schemas test data."""
schema1 = pa.schema(
[
(
"outer",
pa.struct(
[
(
"inner",
pa.struct(
[
("tensor", ArrowTensorType((3, 3), pa.float32())),
("data", pa.string()),
]
),
),
("id", pa.int64()),
]
),
)
]
)
schema2 = pa.schema(
[
(
"outer",
pa.struct(
[
(
"inner",
pa.struct([("data", pa.string())]), # Missing tensor field
),
("id", pa.int64()),
]
),
)
]
)
expected = pa.schema(
[
(
"outer",
pa.struct(
[
(
"inner",
pa.struct(
[
(
"tensor",
ArrowTensorType((3, 3), pa.float32()),
),
("data", pa.string()),
]
),
),
("id", pa.int64()),
]
),
)
]
)
return {"with_tensor": schema1, "without_tensor": schema2, "expected": expected}
@pytest.mark.parametrize("use_arrow_tensor_v2", [True, False])
@pytest.mark.skipif(
get_pyarrow_version() < MIN_PYARROW_VERSION_TYPE_PROMOTION,
reason="Requires Arrow version of at least 14.0.0",
)
def test_concat_with_mixed_tensor_types_and_native_pyarrow_types(
use_arrow_tensor_v2, restore_data_context
):
DataContext.get_current().use_arrow_tensor_v2 = use_arrow_tensor_v2
num_rows = 1024
# Block A: int is uint64; tensor = Ray tensor extension
t_uint = pa.table(
{
"int": pa.array(np.zeros(num_rows // 2, dtype=np.uint64), type=pa.uint64()),
"tensor": ArrowTensorArray.from_numpy(
np.zeros((num_rows // 2, 3, 3), dtype=np.float32)
),
}
)
# Block B: int is float64 with NaNs; tensor = same extension type
f = np.ones(num_rows // 2, dtype=np.float64)
f[::8] = np.nan
t_float = pa.table(
{
"int": pa.array(f, type=pa.float64()),
"tensor": ArrowTensorArray.from_numpy(
np.zeros((num_rows // 2, 3, 3), dtype=np.float32)
),
}
)
# Two input blocks with different Arrow dtypes for "int"
ds = ray.data.from_arrow([t_uint, t_float])
# Force a concat across blocks
ds = ds.repartition(1)
# This should not raise: RuntimeError: Types mismatch: double != uint64
ds.materialize()
# Ensure that the result is correct
# Determine expected tensor type based on current DataContext setting
if use_arrow_tensor_v2:
expected_tensor_type = ArrowTensorTypeV2((3, 3), pa.float32())
else:
expected_tensor_type = ArrowTensorType((3, 3), pa.float32())
assert ds.schema().base_schema == pa.schema(
[("int", pa.float64()), ("tensor", expected_tensor_type)]
)
assert ds.count() == num_rows
@pytest.fixture
def object_with_tensor_fails_blocks():
"""Blocks that should fail when concatenating objects with tensors."""
obj = types.SimpleNamespace(a=1, b="test")
t1 = pa.table({"a": ArrowPythonObjectArray.from_objects([obj, obj])})
# Create tensor array with proper extension type
tensor_array = ArrowTensorArray.from_numpy(np.array([[1, 2], [3, 4]]))
t2 = pa.table({"a": tensor_array})
return [t1, t2]
@pytest.fixture
def simple_concat_data():
"""Test data for simple concat operations."""
return {"empty": [], "single_block": pa.table({"a": [1, 2]})}
# Helper function for creating tensor arrays
def _create_tensor_array(data, tensor_type="fixed"):
"""Helper function to create tensor arrays with consistent patterns."""
if tensor_type == "fixed":
return ArrowTensorArray.from_numpy(data)
elif tensor_type == "variable":
return ArrowVariableShapedTensorArray.from_numpy(data)
else:
raise ValueError(f"Unknown tensor type: {tensor_type}")
# Helper function for creating expected results
def _create_expected_result(schema, length, **kwargs):
"""Helper function to create expected result dictionaries."""
result = {"schema": schema, "length": length}
result.update(kwargs)
return result
# Helper function for creating tensor blocks
def _create_tensor_blocks(
tensor_data1,
tensor_data2,
tensor_type1="fixed",
tensor_type2="variable",
id_data1=None,
id_data2=None,
column_name="tensor",
):
"""Helper function to create tensor blocks with consistent patterns."""
if id_data1 is None:
id_data1 = [1, 2]
if id_data2 is None:
id_data2 = [3, 4]
tensor_array1 = _create_tensor_array(tensor_data1, tensor_type1)
tensor_array2 = _create_tensor_array(tensor_data2, tensor_type2)
t1 = pa.table({"id": id_data1, column_name: tensor_array1})
t2 = pa.table({"id": id_data2, column_name: tensor_array2})
return t1, t2
# Helper function for creating struct blocks with tensors
def _create_struct_tensor_blocks(
tensor_data1,
tensor_data2,
tensor_type1="fixed",
tensor_type2="variable",
value_data1=None,
value_data2=None,
extra_data2=None,
struct_name="struct",
id_data1=None,
id_data2=None,
):
"""Helper function to create struct blocks with tensor fields."""
if value_data1 is None:
value_data1 = [1, 2]
if value_data2 is None:
value_data2 = [3, 4]
if id_data1 is None:
id_data1 = [1, 2]
if id_data2 is None:
id_data2 = [3, 4]
tensor_array1 = _create_tensor_array(tensor_data1, tensor_type1)
tensor_array2 = _create_tensor_array(tensor_data2, tensor_type2)
value_array1 = pa.array(value_data1, type=pa.int64())
value_array2 = pa.array(value_data2, type=pa.int64())
if extra_data2 is not None:
extra_array2 = pa.array(extra_data2, type=pa.string())
struct_array1 = pa.StructArray.from_arrays(
[tensor_array1, value_array1], names=["tensor", "value"]
)
struct_array2 = pa.StructArray.from_arrays(
[tensor_array2, value_array2, extra_array2],
names=["tensor", "value", "extra"],
)
else:
struct_array1 = pa.StructArray.from_arrays(
[tensor_array1, value_array1], names=["tensor", "value"]
)
struct_array2 = pa.StructArray.from_arrays(
[tensor_array2, value_array2], names=["tensor", "value"]
)
t1 = pa.table({"id": id_data1, struct_name: struct_array1})
t2 = pa.table({"id": id_data2, struct_name: struct_array2})
return t1, t2
# Helper function for creating expected tensor schemas
def _create_tensor_schema(
tensor_type=ArrowVariableShapedTensorType,
dtype=pa.float32(),
ndim=2,
include_id=True,
struct_name="struct",
include_extra=False,
):
"""Helper function to create expected tensor schemas."""
fields = []
if include_id:
fields.append(("id", pa.int64()))
if struct_name == "struct":
struct_fields = [
("tensor", tensor_type(dtype, ndim)),
("value", pa.int64()),
]
if include_extra:
struct_fields.append(("extra", pa.string()))
fields.append((struct_name, pa.struct(struct_fields)))
else:
fields.append(("tensor", tensor_type(dtype, ndim)))
return pa.schema(fields)
# Helper function for creating basic struct blocks
def _create_basic_struct_blocks(
struct_data1,
struct_data2,
column_name="struct",
id_data1=None,
id_data2=None,
other_columns=None,
):
"""Helper function to create basic struct blocks."""
struct_array1 = pa.array(struct_data1)
struct_array2 = pa.array(struct_data2)
t1_data = {column_name: struct_array1}
t2_data = {column_name: struct_array2}
# Only add id columns if they are provided
if id_data1 is not None:
t1_data["id"] = id_data1
if id_data2 is not None:
t2_data["id"] = id_data2
if other_columns:
t1_data.update(other_columns.get("t1", {}))
t2_data.update(other_columns.get("t2", {}))
t1 = pa.table(t1_data)
t2 = pa.table(t2_data)
return t1, t2
# Helper function for creating struct schemas
def _create_struct_schema(struct_fields, include_id=True, other_fields=None):
"""Helper function to create struct schemas."""
fields = []
if include_id:
fields.append(("id", pa.int64()))
fields.append(("struct", pa.struct(struct_fields)))
if other_fields:
fields.extend(other_fields)
return pa.schema(fields)
# Helper function for creating struct blocks with additional columns
def _create_struct_blocks_with_columns(
struct_data1,
struct_data2,
struct_type1,
struct_type2,
additional_columns1=None,
additional_columns2=None,
struct_column="d",
):
"""Helper function to create struct blocks with additional columns."""
t1_data = {}
t2_data = {}
# Add additional columns first to maintain expected order
if additional_columns1:
t1_data.update(additional_columns1)
if additional_columns2:
t2_data.update(additional_columns2)
# Add struct column
t1_data[struct_column] = pa.array(struct_data1, type=struct_type1)
t2_data[struct_column] = pa.array(struct_data2, type=struct_type2)
t1 = pa.table(t1_data)
t2 = pa.table(t2_data)
return t1, t2
# Helper function for creating expected results for struct tests
def _create_struct_expected_result(schema, length, content):
"""Helper function to create expected results for struct tests."""
return {
"length": length,
"schema": schema,
"content": content,
}
# Helper function for creating struct blocks with simple field patterns
def _create_simple_struct_blocks(
struct_data1,
struct_data2,
field_names,
field_types,
additional_columns1=None,
additional_columns2=None,
struct_column="d",
):
"""Helper function to create struct blocks with simple field patterns."""
struct_type = pa.struct(list(zip(field_names, field_types)))
return _create_struct_blocks_with_columns(
struct_data1,
struct_data2,
struct_type,
struct_type,
additional_columns1,
additional_columns2,
struct_column,
)
# Helper function for creating simple struct schemas
def _create_simple_struct_schema(field_names, field_types, additional_fields=None):
"""Helper function to create simple struct schemas."""
struct_fields = list(zip(field_names, field_types))
fields = []
if additional_fields:
fields.extend(additional_fields)
fields.append(("d", pa.struct(struct_fields)))
return pa.schema(fields)
@pytest.fixture
def unify_schemas_edge_cases_data():
"""Test data for unify schemas edge cases."""
return {
"empty_schemas": [],
"single_schema": pa.schema([("col", pa.int32())]),
"no_common_columns": {
"schema1": pa.schema([("col1", pa.int32())]),
"schema2": pa.schema([("col2", pa.string())]),
"expected": pa.schema([("col1", pa.int32()), ("col2", pa.string())]),
},
"all_null_schemas": {
"schema1": pa.schema([("col", pa.null())]),
"schema2": pa.schema([("col", pa.null())]),
},
}
@pytest.fixture
def unify_schemas_mixed_tensor_data():
"""Test data for mixed tensor types in unify schemas."""
return {
"fixed_shape": pa.schema([("tensor", ArrowTensorType((2, 2), pa.int32()))]),
"variable_shaped": pa.schema(
[("tensor", ArrowVariableShapedTensorType(pa.int32(), 2))]
),
"different_shape": pa.schema([("tensor", ArrowTensorType((3, 3), pa.int32()))]),
"expected_variable": pa.schema(
[("tensor", ArrowVariableShapedTensorType(pa.int32(), 2))]
),
}
@pytest.fixture
def unify_schemas_type_promotion_data():
"""Test data for type promotion scenarios."""
return {
"non_null": pa.schema([pa.field("A", pa.int32())]),
"nullable": pa.schema([pa.field("A", pa.int32(), nullable=True)]),
"int64": pa.schema([pa.field("A", pa.int64())]),
"float64": pa.schema([pa.field("A", pa.float64())]),
}
@pytest.fixture
def block_select_data():
"""Test data for block select operations."""
df = pd.DataFrame({"one": [10, 11, 12], "two": [11, 12, 13], "three": [14, 15, 16]})
table = pa.Table.from_pandas(df)
return {
"table": table,
"df": df,
"single_column": {
"columns": ["two"],
"expected_schema": pa.schema([("two", pa.int64())]),
},
"multiple_columns": {
"columns": ["two", "one"],
"expected_schema": pa.schema([("two", pa.int64()), ("one", pa.int64())]),
},
}
@pytest.fixture
def block_slice_data():
"""Test data for block slice operations."""
n = 20
df = pd.DataFrame(
{"one": list(range(n)), "two": ["a"] * n, "three": [np.nan] + [1.5] * (n - 1)}
)
table = pa.Table.from_pandas(df)
empty_df = pd.DataFrame({"one": []})
empty_table = pa.Table.from_pandas(empty_df)
return {
"normal": {"table": table, "df": df, "slice_params": {"a": 5, "b": 10}},
"empty": {"table": empty_table, "slice_params": {"a": 0, "b": 0}},
}
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| UnsupportedType |
python | keras-team__keras | keras/src/ops/nn.py | {
"start": 78811,
"end": 80522
} | class ____(Operation):
def __init__(
self,
max_val,
*,
name=None,
):
super().__init__(name=name)
self.max_val = max_val
def call(self, x1, x2):
return backend.nn.psnr(
x1=x1,
x2=x2,
max_val=self.max_val,
)
def compute_output_spec(self, x1, x2):
if len(x1.shape) != len(x2.shape):
raise ValueError("Inputs must have the same rank")
return KerasTensor(shape=())
@keras_export(
[
"keras.ops.psnr",
"keras.ops.nn.psnr",
]
)
def psnr(
x1,
x2,
max_val,
):
"""Peak Signal-to-Noise Ratio (PSNR) function.
This function computes the Peak Signal-to-Noise Ratio between two signals,
`x1` and `x2`. PSNR is a measure of the quality of a reconstructed signal.
The higher the PSNR, the closer the reconstructed signal is to the original
signal. Note that it can become negative when the signal power is
smaller that the noise power.
Args:
x1: The first input signal.
x2: The second input signal. Must have the same shape as `x1`.
max_val: The maximum possible value in the signals.
Returns:
float: The PSNR value between `x1` and `x2`.
Examples:
>>> x1 = keras.random.normal((2, 4, 4, 3))
>>> x2 = keras.random.normal((2, 4, 4, 3))
>>> max_val = 1.0
>>> keras.ops.nn.psnr(x1, x2, max_val)
-3.1697404
"""
if any_symbolic_tensors(
(
x1,
x2,
)
):
return PSNR(
max_val,
).symbolic_call(x1, x2)
return backend.nn.psnr(
x1,
x2,
max_val,
)
| PSNR |
python | getsentry__sentry | src/sentry/integrations/messaging/linkage.py | {
"start": 10490,
"end": 11391
} | class ____(IdentityLinkageView, ABC):
@property
def confirmation_template(self) -> str:
return "sentry/auth-link-identity.html"
@property
def metrics_operation_key(self) -> str:
return "link_identity_view"
def persist_identity(
self, idp: IdentityProvider | None, external_id: str, request: HttpRequest
) -> None:
if idp is None:
raise ValueError('idp is required for linking (params must include "integration_id")')
if isinstance(request.user, AnonymousUser):
raise TypeError("Cannot link identity without a logged-in user")
try:
Identity.objects.link_identity(user=request.user, idp=idp, external_id=external_id)
except IntegrityError:
event = self.capture_metric("failure.integrity_error")
logger.exception(event)
raise Http404
| LinkIdentityView |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/layout/containers.py | {
"start": 37087,
"end": 45065
} | class ____:
"""
Render information for the last render time of this control.
It stores mapping information between the input buffers (in case of a
:class:`~prompt_toolkit.layout.controls.BufferControl`) and the actual
render position on the output screen.
(Could be used for implementation of the Vi 'H' and 'L' key bindings as
well as implementing mouse support.)
:param ui_content: The original :class:`.UIContent` instance that contains
the whole input, without clipping. (ui_content)
:param horizontal_scroll: The horizontal scroll of the :class:`.Window` instance.
:param vertical_scroll: The vertical scroll of the :class:`.Window` instance.
:param window_width: The width of the window that displays the content,
without the margins.
:param window_height: The height of the window that displays the content.
:param configured_scroll_offsets: The scroll offsets as configured for the
:class:`Window` instance.
:param visible_line_to_row_col: Mapping that maps the row numbers on the
displayed screen (starting from zero for the first visible line) to
(row, col) tuples pointing to the row and column of the :class:`.UIContent`.
:param rowcol_to_yx: Mapping that maps (row, column) tuples representing
coordinates of the :class:`UIContent` to (y, x) absolute coordinates at
the rendered screen.
"""
def __init__(
self,
window: Window,
ui_content: UIContent,
horizontal_scroll: int,
vertical_scroll: int,
window_width: int,
window_height: int,
configured_scroll_offsets: ScrollOffsets,
visible_line_to_row_col: dict[int, tuple[int, int]],
rowcol_to_yx: dict[tuple[int, int], tuple[int, int]],
x_offset: int,
y_offset: int,
wrap_lines: bool,
) -> None:
self.window = window
self.ui_content = ui_content
self.vertical_scroll = vertical_scroll
self.window_width = window_width # Width without margins.
self.window_height = window_height
self.configured_scroll_offsets = configured_scroll_offsets
self.visible_line_to_row_col = visible_line_to_row_col
self.wrap_lines = wrap_lines
self._rowcol_to_yx = rowcol_to_yx # row/col from input to absolute y/x
# screen coordinates.
self._x_offset = x_offset
self._y_offset = y_offset
@property
def visible_line_to_input_line(self) -> dict[int, int]:
return {
visible_line: rowcol[0]
for visible_line, rowcol in self.visible_line_to_row_col.items()
}
@property
def cursor_position(self) -> Point:
"""
Return the cursor position coordinates, relative to the left/top corner
of the rendered screen.
"""
cpos = self.ui_content.cursor_position
try:
y, x = self._rowcol_to_yx[cpos.y, cpos.x]
except KeyError:
# For `DummyControl` for instance, the content can be empty, and so
# will `_rowcol_to_yx` be. Return 0/0 by default.
return Point(x=0, y=0)
else:
return Point(x=x - self._x_offset, y=y - self._y_offset)
@property
def applied_scroll_offsets(self) -> ScrollOffsets:
"""
Return a :class:`.ScrollOffsets` instance that indicates the actual
offset. This can be less than or equal to what's configured. E.g, when
the cursor is completely at the top, the top offset will be zero rather
than what's configured.
"""
if self.displayed_lines[0] == 0:
top = 0
else:
# Get row where the cursor is displayed.
y = self.input_line_to_visible_line[self.ui_content.cursor_position.y]
top = min(y, self.configured_scroll_offsets.top)
return ScrollOffsets(
top=top,
bottom=min(
self.ui_content.line_count - self.displayed_lines[-1] - 1,
self.configured_scroll_offsets.bottom,
),
# For left/right, it probably doesn't make sense to return something.
# (We would have to calculate the widths of all the lines and keep
# double width characters in mind.)
left=0,
right=0,
)
@property
def displayed_lines(self) -> list[int]:
"""
List of all the visible rows. (Line numbers of the input buffer.)
The last line may not be entirely visible.
"""
return sorted(row for row, col in self.visible_line_to_row_col.values())
@property
def input_line_to_visible_line(self) -> dict[int, int]:
"""
Return the dictionary mapping the line numbers of the input buffer to
the lines of the screen. When a line spans several rows at the screen,
the first row appears in the dictionary.
"""
result: dict[int, int] = {}
for k, v in self.visible_line_to_input_line.items():
if v in result:
result[v] = min(result[v], k)
else:
result[v] = k
return result
def first_visible_line(self, after_scroll_offset: bool = False) -> int:
"""
Return the line number (0 based) of the input document that corresponds
with the first visible line.
"""
if after_scroll_offset:
return self.displayed_lines[self.applied_scroll_offsets.top]
else:
return self.displayed_lines[0]
def last_visible_line(self, before_scroll_offset: bool = False) -> int:
"""
Like `first_visible_line`, but for the last visible line.
"""
if before_scroll_offset:
return self.displayed_lines[-1 - self.applied_scroll_offsets.bottom]
else:
return self.displayed_lines[-1]
def center_visible_line(
self, before_scroll_offset: bool = False, after_scroll_offset: bool = False
) -> int:
"""
Like `first_visible_line`, but for the center visible line.
"""
return (
self.first_visible_line(after_scroll_offset)
+ (
self.last_visible_line(before_scroll_offset)
- self.first_visible_line(after_scroll_offset)
)
// 2
)
@property
def content_height(self) -> int:
"""
The full height of the user control.
"""
return self.ui_content.line_count
@property
def full_height_visible(self) -> bool:
"""
True when the full height is visible (There is no vertical scroll.)
"""
return (
self.vertical_scroll == 0
and self.last_visible_line() == self.content_height
)
@property
def top_visible(self) -> bool:
"""
True when the top of the buffer is visible.
"""
return self.vertical_scroll == 0
@property
def bottom_visible(self) -> bool:
"""
True when the bottom of the buffer is visible.
"""
return self.last_visible_line() == self.content_height - 1
@property
def vertical_scroll_percentage(self) -> int:
"""
Vertical scroll as a percentage. (0 means: the top is visible,
100 means: the bottom is visible.)
"""
if self.bottom_visible:
return 100
else:
return 100 * self.vertical_scroll // self.content_height
def get_height_for_line(self, lineno: int) -> int:
"""
Return the height of the given line.
(The height that it would take, if this line became visible.)
"""
if self.wrap_lines:
return self.ui_content.get_height_for_line(
lineno, self.window_width, self.window.get_line_prefix
)
else:
return 1
| WindowRenderInfo |
python | pytorch__pytorch | test/jit/test_complex.py | {
"start": 479,
"end": 15531
} | class ____(JitTestCase):
def test_script(self):
def fn(a: complex):
return a
self.checkScript(fn, (3 + 5j,))
def test_complexlist(self):
def fn(a: List[complex], idx: int):
return a[idx]
input = [1j, 2, 3 + 4j, -5, -7j]
self.checkScript(fn, (input, 2))
def test_complexdict(self):
def fn(a: Dict[complex, complex], key: complex) -> complex:
return a[key]
input = {2 + 3j: 2 - 3j, -4.3 - 2j: 3j}
self.checkScript(fn, (input, -4.3 - 2j))
def test_pickle(self):
class ComplexModule(torch.jit.ScriptModule):
def __init__(self) -> None:
super().__init__()
self.a = 3 + 5j
self.b = [2 + 3j, 3 + 4j, 0 - 3j, -4 + 0j]
self.c = {2 + 3j: 2 - 3j, -4.3 - 2j: 3j}
@torch.jit.script_method
def forward(self, b: int):
return b + 2j
loaded = self.getExportImportCopy(ComplexModule())
self.assertEqual(loaded.a, 3 + 5j)
self.assertEqual(loaded.b, [2 + 3j, 3 + 4j, -3j, -4])
self.assertEqual(loaded.c, {2 + 3j: 2 - 3j, -4.3 - 2j: 3j})
self.assertEqual(loaded(2), 2 + 2j)
def test_complex_parse(self):
def fn(a: int, b: torch.Tensor, dim: int):
# verifies `emitValueToTensor()` 's behavior
b[dim] = 2.4 + 0.5j
return (3 * 2j) + a + 5j - 7.4j - 4
t1 = torch.tensor(1)
t2 = torch.tensor([0.4, 1.4j, 2.35])
self.checkScript(fn, (t1, t2, 2))
def test_complex_constants_and_ops(self):
vals = (
[0.0, 1.0, 2.2, -1.0, -0.0, -2.2, 1, 0, 2]
+ [10.0**i for i in range(2)]
+ [-(10.0**i) for i in range(2)]
)
complex_vals = tuple(complex(x, y) for x, y in product(vals, vals))
funcs_template = dedent(
"""
def func(a: complex):
return cmath.{func_or_const}(a)
"""
)
def checkCmath(func_name, funcs_template=funcs_template):
funcs_str = funcs_template.format(func_or_const=func_name)
scope = {}
execWrapper(funcs_str, globals(), scope)
cu = torch.jit.CompilationUnit(funcs_str)
f_script = cu.func
f = scope["func"]
if func_name in ["isinf", "isnan", "isfinite"]:
new_vals = vals + ([float("inf"), float("nan"), -1 * float("inf")])
final_vals = tuple(
complex(x, y) for x, y in product(new_vals, new_vals)
)
else:
final_vals = complex_vals
for a in final_vals:
res_python = None
res_script = None
try:
res_python = f(a)
except Exception as e:
res_python = e
try:
res_script = f_script(a)
except Exception as e:
res_script = e
if res_python != res_script:
if isinstance(res_python, Exception):
continue
msg = f"Failed on {func_name} with input {a}. Python: {res_python}, Script: {res_script}"
self.assertEqual(res_python, res_script, msg=msg)
unary_ops = [
"log",
"log10",
"sqrt",
"exp",
"sin",
"cos",
"asin",
"acos",
"atan",
"sinh",
"cosh",
"tanh",
"asinh",
"acosh",
"atanh",
"phase",
"isinf",
"isnan",
"isfinite",
]
# --- Unary ops ---
for op in unary_ops:
checkCmath(op)
def fn(x: complex):
return abs(x)
for val in complex_vals:
self.checkScript(fn, (val,))
def pow_complex_float(x: complex, y: float):
return pow(x, y)
def pow_float_complex(x: float, y: complex):
return pow(x, y)
self.checkScript(pow_float_complex, (2, 3j))
self.checkScript(pow_complex_float, (3j, 2))
def pow_complex_complex(x: complex, y: complex):
return pow(x, y)
for x, y in zip(complex_vals, complex_vals):
# Reference: https://github.com/pytorch/pytorch/issues/54622
if x == 0:
continue
self.checkScript(pow_complex_complex, (x, y))
if not IS_MACOS:
# --- Binary op ---
def rect_fn(x: float, y: float):
return cmath.rect(x, y)
for x, y in product(vals, vals):
self.checkScript(
rect_fn,
(
x,
y,
),
)
func_constants_template = dedent(
"""
def func():
return cmath.{func_or_const}
"""
)
float_consts = ["pi", "e", "tau", "inf", "nan"]
complex_consts = ["infj", "nanj"]
for x in float_consts + complex_consts:
checkCmath(x, funcs_template=func_constants_template)
def test_infj_nanj_pickle(self):
class ComplexModule(torch.jit.ScriptModule):
def __init__(self) -> None:
super().__init__()
self.a = 3 + 5j
@torch.jit.script_method
def forward(self, infj: int, nanj: int):
if infj == 2:
return infj + cmath.infj
else:
return nanj + cmath.nanj
loaded = self.getExportImportCopy(ComplexModule())
self.assertEqual(loaded(2, 3), 2 + cmath.infj)
self.assertEqual(loaded(3, 4), 4 + cmath.nanj)
def test_complex_constructor(self):
# Test all scalar types
def fn_int(real: int, img: int):
return complex(real, img)
self.checkScript(
fn_int,
(
0,
0,
),
)
self.checkScript(
fn_int,
(
-1234,
0,
),
)
self.checkScript(
fn_int,
(
0,
-1256,
),
)
self.checkScript(
fn_int,
(
-167,
-1256,
),
)
def fn_float(real: float, img: float):
return complex(real, img)
self.checkScript(
fn_float,
(
0.0,
0.0,
),
)
self.checkScript(
fn_float,
(
-1234.78,
0,
),
)
self.checkScript(
fn_float,
(
0,
56.18,
),
)
self.checkScript(
fn_float,
(
-1.9,
-19.8,
),
)
def fn_bool(real: bool, img: bool):
return complex(real, img)
self.checkScript(
fn_bool,
(
True,
True,
),
)
self.checkScript(
fn_bool,
(
False,
False,
),
)
self.checkScript(
fn_bool,
(
False,
True,
),
)
self.checkScript(
fn_bool,
(
True,
False,
),
)
def fn_bool_int(real: bool, img: int):
return complex(real, img)
self.checkScript(
fn_bool_int,
(
True,
0,
),
)
self.checkScript(
fn_bool_int,
(
False,
0,
),
)
self.checkScript(
fn_bool_int,
(
False,
-1,
),
)
self.checkScript(
fn_bool_int,
(
True,
3,
),
)
def fn_int_bool(real: int, img: bool):
return complex(real, img)
self.checkScript(
fn_int_bool,
(
0,
True,
),
)
self.checkScript(
fn_int_bool,
(
0,
False,
),
)
self.checkScript(
fn_int_bool,
(
-3,
True,
),
)
self.checkScript(
fn_int_bool,
(
6,
False,
),
)
def fn_bool_float(real: bool, img: float):
return complex(real, img)
self.checkScript(
fn_bool_float,
(
True,
0.0,
),
)
self.checkScript(
fn_bool_float,
(
False,
0.0,
),
)
self.checkScript(
fn_bool_float,
(
False,
-1.0,
),
)
self.checkScript(
fn_bool_float,
(
True,
3.0,
),
)
def fn_float_bool(real: float, img: bool):
return complex(real, img)
self.checkScript(
fn_float_bool,
(
0.0,
True,
),
)
self.checkScript(
fn_float_bool,
(
0.0,
False,
),
)
self.checkScript(
fn_float_bool,
(
-3.0,
True,
),
)
self.checkScript(
fn_float_bool,
(
6.0,
False,
),
)
def fn_float_int(real: float, img: int):
return complex(real, img)
self.checkScript(
fn_float_int,
(
0.0,
1,
),
)
self.checkScript(
fn_float_int,
(
0.0,
-1,
),
)
self.checkScript(
fn_float_int,
(
1.8,
-3,
),
)
self.checkScript(
fn_float_int,
(
2.7,
8,
),
)
def fn_int_float(real: int, img: float):
return complex(real, img)
self.checkScript(
fn_int_float,
(
1,
0.0,
),
)
self.checkScript(
fn_int_float,
(
-1,
1.7,
),
)
self.checkScript(
fn_int_float,
(
-3,
0.0,
),
)
self.checkScript(
fn_int_float,
(
2,
-8.9,
),
)
def test_torch_complex_constructor_with_tensor(self):
tensors = [torch.rand(1), torch.randint(-5, 5, (1,)), torch.tensor([False])]
def fn_tensor_float(real, img: float):
return complex(real, img)
def fn_tensor_int(real, img: int):
return complex(real, img)
def fn_tensor_bool(real, img: bool):
return complex(real, img)
def fn_float_tensor(real: float, img):
return complex(real, img)
def fn_int_tensor(real: int, img):
return complex(real, img)
def fn_bool_tensor(real: bool, img):
return complex(real, img)
for tensor in tensors:
self.checkScript(fn_tensor_float, (tensor, 1.2))
self.checkScript(fn_tensor_int, (tensor, 3))
self.checkScript(fn_tensor_bool, (tensor, True))
self.checkScript(fn_float_tensor, (1.2, tensor))
self.checkScript(fn_int_tensor, (3, tensor))
self.checkScript(fn_bool_tensor, (True, tensor))
def fn_tensor_tensor(real, img):
return complex(real, img) + complex(2)
for x, y in product(tensors, tensors):
self.checkScript(
fn_tensor_tensor,
(
x,
y,
),
)
def test_comparison_ops(self):
def fn1(a: complex, b: complex):
return a == b
def fn2(a: complex, b: complex):
return a != b
def fn3(a: complex, b: float):
return a == b
def fn4(a: complex, b: float):
return a != b
x, y = 2 - 3j, 4j
self.checkScript(fn1, (x, x))
self.checkScript(fn1, (x, y))
self.checkScript(fn2, (x, x))
self.checkScript(fn2, (x, y))
x1, y1 = 1 + 0j, 1.0
self.checkScript(fn3, (x1, y1))
self.checkScript(fn4, (x1, y1))
def test_div(self):
def fn1(a: complex, b: complex):
return a / b
x, y = 2 - 3j, 4j
self.checkScript(fn1, (x, y))
def test_complex_list_sum(self):
def fn(x: List[complex]):
return sum(x)
self.checkScript(fn, (torch.randn(4, dtype=torch.cdouble).tolist(),))
def test_tensor_attributes(self):
def tensor_real(x):
return x.real
def tensor_imag(x):
return x.imag
t = torch.randn(2, 3, dtype=torch.cdouble)
self.checkScript(tensor_real, (t,))
self.checkScript(tensor_imag, (t,))
def test_binary_op_complex_tensor(self):
def mul(x: complex, y: torch.Tensor):
return x * y
def add(x: complex, y: torch.Tensor):
return x + y
def eq(x: complex, y: torch.Tensor):
return x == y
def ne(x: complex, y: torch.Tensor):
return x != y
def sub(x: complex, y: torch.Tensor):
return x - y
def div(x: complex, y: torch.Tensor):
return x - y
ops = [mul, add, eq, ne, sub, div]
for shape in [(1,), (2, 2)]:
x = 0.71 + 0.71j
y = torch.randn(shape, dtype=torch.cfloat)
for op in ops:
eager_result = op(x, y)
scripted = torch.jit.script(op)
jit_result = scripted(x, y)
self.assertEqual(eager_result, jit_result)
if __name__ == "__main__":
raise_on_run_directly("test/test_jit.py")
| TestComplex |
python | huggingface__transformers | src/transformers/models/glpn/configuration_glpn.py | {
"start": 791,
"end": 5998
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`GLPNModel`]. It is used to instantiate an GLPN
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the GLPN
[vinvino02/glpn-kitti](https://huggingface.co/vinvino02/glpn-kitti) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
num_encoder_blocks (`int`, *optional*, defaults to 4):
The number of encoder blocks (i.e. stages in the Mix Transformer encoder).
depths (`list[int]`, *optional*, defaults to `[2, 2, 2, 2]`):
The number of layers in each encoder block.
sr_ratios (`list[int]`, *optional*, defaults to `[8, 4, 2, 1]`):
Sequence reduction ratios in each encoder block.
hidden_sizes (`list[int]`, *optional*, defaults to `[32, 64, 160, 256]`):
Dimension of each of the encoder blocks.
patch_sizes (`list[int]`, *optional*, defaults to `[7, 3, 3, 3]`):
Patch size before each encoder block.
strides (`list[int]`, *optional*, defaults to `[4, 2, 2, 2]`):
Stride before each encoder block.
num_attention_heads (`list[int]`, *optional*, defaults to `[1, 2, 5, 8]`):
Number of attention heads for each attention layer in each block of the Transformer encoder.
mlp_ratios (`list[int]`, *optional*, defaults to `[4, 4, 4, 4]`):
Ratio of the size of the hidden layer compared to the size of the input layer of the Mix FFNs in the
encoder blocks.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
drop_path_rate (`float`, *optional*, defaults to 0.1):
The dropout probability for stochastic depth, used in the blocks of the Transformer encoder.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the layer normalization layers.
decoder_hidden_size (`int`, *optional*, defaults to 64):
The dimension of the decoder.
max_depth (`int`, *optional*, defaults to 10):
The maximum depth of the decoder.
head_in_index (`int`, *optional*, defaults to -1):
The index of the features to use in the head.
Example:
```python
>>> from transformers import GLPNModel, GLPNConfig
>>> # Initializing a GLPN vinvino02/glpn-kitti style configuration
>>> configuration = GLPNConfig()
>>> # Initializing a model from the vinvino02/glpn-kitti style configuration
>>> model = GLPNModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "glpn"
def __init__(
self,
num_channels=3,
num_encoder_blocks=4,
depths=[2, 2, 2, 2],
sr_ratios=[8, 4, 2, 1],
hidden_sizes=[32, 64, 160, 256],
patch_sizes=[7, 3, 3, 3],
strides=[4, 2, 2, 2],
num_attention_heads=[1, 2, 5, 8],
mlp_ratios=[4, 4, 4, 4],
hidden_act="gelu",
hidden_dropout_prob=0.0,
attention_probs_dropout_prob=0.0,
initializer_range=0.02,
drop_path_rate=0.1,
layer_norm_eps=1e-6,
decoder_hidden_size=64,
max_depth=10,
head_in_index=-1,
**kwargs,
):
super().__init__(**kwargs)
self.num_channels = num_channels
self.num_encoder_blocks = num_encoder_blocks
self.depths = depths
self.sr_ratios = sr_ratios
self.hidden_sizes = hidden_sizes
self.patch_sizes = patch_sizes
self.strides = strides
self.mlp_ratios = mlp_ratios
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.initializer_range = initializer_range
self.drop_path_rate = drop_path_rate
self.layer_norm_eps = layer_norm_eps
self.decoder_hidden_size = decoder_hidden_size
self.max_depth = max_depth
self.head_in_index = head_in_index
__all__ = ["GLPNConfig"]
| GLPNConfig |
python | pytorch__pytorch | torch/nn/modules/conv.py | {
"start": 39414,
"end": 48291
} | class ____(_ConvTransposeNd):
__doc__ = (
r"""Applies a 2D transposed convolution operator over an input image
composed of several input planes.
This module can be seen as the gradient of Conv2d with respect to its input.
It is also known as a fractionally-strided convolution or
a deconvolution (although it is not an actual deconvolution operation as it does
not compute a true inverse of convolution). For more information, see the visualizations
`here`_ and the `Deconvolutional Networks`_ paper.
This module supports :ref:`TensorFloat32<tf32_on_ampere>`.
On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
* :attr:`stride` controls the stride for the cross-correlation. When stride > 1, ConvTranspose2d inserts zeros between input
elements along the spatial dimensions before applying the convolution kernel. This zero-insertion operation is the standard
behavior of transposed convolutions, which can increase the spatial resolution and is equivalent to a learnable
upsampling operation.
* :attr:`padding` controls the amount of implicit zero padding on both
sides for ``dilation * (kernel_size - 1) - padding`` number of points. See note
below for details.
* :attr:`output_padding` controls the additional size added to one side
of the output shape. See note below for details.
"""
"""
* :attr:`dilation` controls the spacing between the kernel points; also known as the \u00e0 trous algorithm.
It is harder to describe, but the link `here`_ has a nice visualization of what :attr:`dilation` does.
"""
r"""
{groups_note}
The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`output_padding`
can either be:
- a single ``int`` -- in which case the same value is used for the height and width dimensions
- a ``tuple`` of two ints -- in which case, the first `int` is used for the height dimension,
and the second `int` for the width dimension
Note:
The :attr:`padding` argument effectively adds ``dilation * (kernel_size - 1) - padding``
amount of zero padding to both sizes of the input. This is set so that
when a :class:`~torch.nn.Conv2d` and a :class:`~torch.nn.ConvTranspose2d`
are initialized with same parameters, they are inverses of each other in
regard to the input and output shapes. However, when ``stride > 1``,
:class:`~torch.nn.Conv2d` maps multiple input shapes to the same output
shape. :attr:`output_padding` is provided to resolve this ambiguity by
effectively increasing the calculated output shape on one side. Note
that :attr:`output_padding` is only used to find output shape, but does
not actually add zero-padding to output.
Note:
{cudnn_reproducibility_note}
Args:
in_channels (int): Number of channels in the input image
out_channels (int): Number of channels produced by the convolution
kernel_size (int or tuple): Size of the convolving kernel
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): ``dilation * (kernel_size - 1) - padding`` zero-padding
will be added to both sides of each dimension in the input. Default: 0
output_padding (int or tuple, optional): Additional size added to one side
of each dimension in the output shape. Default: 0
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True``
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
""".format(**reproducibility_notes, **convolution_notes)
+ r"""
Shape:
- Input: :math:`(N, C_{in}, H_{in}, W_{in})` or :math:`(C_{in}, H_{in}, W_{in})`
- Output: :math:`(N, C_{out}, H_{out}, W_{out})` or :math:`(C_{out}, H_{out}, W_{out})`, where
.. math::
H_{out} = (H_{in} - 1) \times \text{stride}[0] - 2 \times \text{padding}[0] + \text{dilation}[0]
\times (\text{kernel\_size}[0] - 1) + \text{output\_padding}[0] + 1
.. math::
W_{out} = (W_{in} - 1) \times \text{stride}[1] - 2 \times \text{padding}[1] + \text{dilation}[1]
\times (\text{kernel\_size}[1] - 1) + \text{output\_padding}[1] + 1
Attributes:
weight (Tensor): the learnable weights of the module of shape
:math:`(\text{in\_channels}, \frac{\text{out\_channels}}{\text{groups}},`
:math:`\text{kernel\_size[0]}, \text{kernel\_size[1]})`.
The values of these weights are sampled from
:math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
:math:`k = \frac{groups}{C_\text{out} * \prod_{i=0}^{1}\text{kernel\_size}[i]}`
bias (Tensor): the learnable bias of the module of shape (out_channels)
If :attr:`bias` is ``True``, then the values of these weights are
sampled from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
:math:`k = \frac{groups}{C_\text{out} * \prod_{i=0}^{1}\text{kernel\_size}[i]}`
Examples::
>>> # With square kernels and equal stride
>>> m = nn.ConvTranspose2d(16, 33, 3, stride=2)
>>> # non-square kernels and unequal stride and with padding
>>> m = nn.ConvTranspose2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2))
>>> input = torch.randn(20, 16, 50, 100)
>>> output = m(input)
>>> # exact output size can be also specified as an argument
>>> input = torch.randn(1, 16, 12, 12)
>>> downsample = nn.Conv2d(16, 16, 3, stride=2, padding=1)
>>> upsample = nn.ConvTranspose2d(16, 16, 3, stride=2, padding=1)
>>> h = downsample(input)
>>> h.size()
torch.Size([1, 16, 6, 6])
>>> output = upsample(h, output_size=input.size())
>>> output.size()
torch.Size([1, 16, 12, 12])
.. _`here`:
https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
.. _`Deconvolutional Networks`:
https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf
"""
)
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: _size_2_t,
stride: _size_2_t = 1,
padding: _size_2_t = 0,
output_padding: _size_2_t = 0,
groups: int = 1,
bias: bool = True,
dilation: _size_2_t = 1,
padding_mode: Literal["zeros", "reflect", "replicate", "circular"] = "zeros",
device=None,
dtype=None,
) -> None:
factory_kwargs = {"device": device, "dtype": dtype}
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
output_padding = _pair(output_padding)
super().__init__(
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
True,
output_padding,
groups,
bias,
padding_mode,
**factory_kwargs,
)
def forward(self, input: Tensor, output_size: Optional[list[int]] = None) -> Tensor:
"""
Performs the forward pass.
Attributes:
input (Tensor): The input tensor.
output_size (list[int], optional): A list of integers representing
the size of the output tensor. Default is None.
"""
if self.padding_mode != "zeros":
raise ValueError(
"Only `zeros` padding mode is supported for ConvTranspose2d"
)
assert isinstance(self.padding, tuple)
# One cannot replace List by Tuple or Sequence in "_output_padding" because
# TorchScript does not support `Sequence[T]` or `Tuple[T, ...]`.
num_spatial_dims = 2
output_padding = self._output_padding(
input,
output_size,
self.stride, # type: ignore[arg-type]
self.padding, # type: ignore[arg-type]
self.kernel_size, # type: ignore[arg-type]
num_spatial_dims,
self.dilation, # type: ignore[arg-type]
)
return F.conv_transpose2d(
input,
self.weight,
self.bias,
self.stride,
self.padding,
output_padding,
self.groups,
self.dilation,
)
| ConvTranspose2d |
python | walkccc__LeetCode | solutions/215. Kth Largest Element in an Array/215-3.py | {
"start": 0,
"end": 777
} | class ____:
def findKthLargest(self, nums: list[int], k: int) -> int:
def quickSelect(l: int, r: int, k: int) -> int:
randIndex = random.randint(0, r - l) + l
nums[randIndex], nums[r] = nums[r], nums[randIndex]
pivot = nums[r]
nextSwapped = l
for i in range(l, r):
if nums[i] >= pivot:
nums[nextSwapped], nums[i] = nums[i], nums[nextSwapped]
nextSwapped += 1
nums[nextSwapped], nums[r] = nums[r], nums[nextSwapped]
count = nextSwapped - l + 1 # Number of nums >= pivot
if count == k:
return nums[nextSwapped]
if count > k:
return quickSelect(l, nextSwapped - 1, k)
return quickSelect(nextSwapped + 1, r, k - count)
return quickSelect(0, len(nums) - 1, k)
| Solution |
python | django__django | tests/admin_views/admin.py | {
"start": 13202,
"end": 13270
} | class ____(admin.StackedInline):
model = DooHickey
| DooHickeyInline |
python | huggingface__transformers | src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py | {
"start": 109586,
"end": 115485
} | class ____(BigBirdPegasusPreTrainedModel):
def __init__(self, config: BigBirdPegasusConfig, **kwargs):
super().__init__(config, **kwargs)
self.model = BigBirdPegasusModel(config)
self.classification_head = BigBirdPegasusClassificationHead(
config.d_model,
config.d_model,
config.num_labels,
config.classifier_dropout,
)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
decoder_input_ids: Optional[torch.LongTensor] = None,
decoder_attention_mask: Optional[torch.LongTensor] = None,
encoder_outputs: Optional[list[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
) -> Union[tuple, Seq2SeqSequenceClassifierOutput]:
r"""
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Provide for translation and summarization training. By default, the model will create this tensor by
shifting the `input_ids` to the right, following the paper.
decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
If you want to change padding behavior, you should read
[`modeling_bigbird_pegasus._prepare_decoder_attention_mask`] and modify to your needs. See diagram 1 in
[the paper](https://huggingface.co/papers/1910.13461) for more information on the default strategy.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
use_cache = False
if input_ids is None and inputs_embeds is not None:
raise NotImplementedError(
f"Passing input embeddings is currently not supported for {self.__class__.__name__}"
)
outputs = self.model(
input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
encoder_outputs=encoder_outputs,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
cache_position=cache_position,
)
hidden_states = outputs[0] # last hidden state
eos_mask = input_ids.eq(self.config.eos_token_id).to(hidden_states.device)
if len(torch.unique_consecutive(eos_mask.sum(1))) > 1:
raise ValueError("All examples must have the same number of <eos> tokens.")
sentence_representation = hidden_states[eos_mask, :].view(hidden_states.size(0), -1, hidden_states.size(-1))[
:, -1, :
]
logits = self.classification_head(sentence_representation)
loss = None
if labels is not None:
labels = labels.to(logits.device)
if self.config.problem_type is None:
if self.config.num_labels == 1:
self.config.problem_type = "regression"
elif self.config.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.config.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return Seq2SeqSequenceClassifierOutput(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
decoder_hidden_states=outputs.decoder_hidden_states,
decoder_attentions=outputs.decoder_attentions,
cross_attentions=outputs.cross_attentions,
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
encoder_hidden_states=outputs.encoder_hidden_states,
encoder_attentions=outputs.encoder_attentions,
)
@auto_docstring
| BigBirdPegasusForSequenceClassification |
python | tornadoweb__tornado | demos/facebook/facebook.py | {
"start": 1998,
"end": 2441
} | class ____(BaseHandler, tornado.auth.FacebookGraphMixin):
@tornado.web.authenticated
async def get(self):
stream = await self.facebook_request(
"/me/home", self._on_stream, access_token=self.current_user["access_token"]
)
if stream is None:
# Session may have expired
self.redirect("/auth/login")
return
self.render("stream.html", stream=stream)
| MainHandler |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/sensors.py | {
"start": 7812,
"end": 7996
} | class ____(graphene.Union):
class Meta:
types = (GrapheneSensors, GrapheneRepositoryNotFoundError, GraphenePythonError)
name = "SensorsOrError"
| GrapheneSensorsOrError |
python | allegroai__clearml | clearml/backend_api/services/v2_13/projects.py | {
"start": 13131,
"end": 15052
} | class ____(NonStrictDataModel):
"""
:param active: Stats for active tasks
:type active: StatsStatusCount
:param archived: Stats for archived tasks
:type archived: StatsStatusCount
"""
_schema = {
"properties": {
"active": {
"description": "Stats for active tasks",
"oneOf": [
{"$ref": "#/definitions/stats_status_count"},
{"type": "null"},
],
},
"archived": {
"description": "Stats for archived tasks",
"oneOf": [
{"$ref": "#/definitions/stats_status_count"},
{"type": "null"},
],
},
},
"type": "object",
}
def __init__(self, active: Any = None, archived: Any = None, **kwargs: Any) -> None:
super(Stats, self).__init__(**kwargs)
self.active = active
self.archived = archived
@schema_property("active")
def active(self) -> Any:
return self._property_active
@active.setter
def active(self, value: Any) -> None:
if value is None:
self._property_active = None
return
if isinstance(value, dict):
value = StatsStatusCount.from_dict(value)
else:
self.assert_isinstance(value, "active", StatsStatusCount)
self._property_active = value
@schema_property("archived")
def archived(self) -> Any:
return self._property_archived
@archived.setter
def archived(self, value: Any) -> None:
if value is None:
self._property_archived = None
return
if isinstance(value, dict):
value = StatsStatusCount.from_dict(value)
else:
self.assert_isinstance(value, "archived", StatsStatusCount)
self._property_archived = value
| Stats |
python | getsentry__sentry | src/sentry/integrations/messaging/spec.py | {
"start": 9495,
"end": 9966
} | class ____(ActionHandlerFactory):
def __init__(self, spec: MessagingIntegrationSpec) -> None:
super().__init__(
slug=spec.provider_slug,
service_type=spec.action_service,
supported_target_types=[ActionTarget.SPECIFIC],
integration_provider=spec.provider_slug,
)
self.spec = spec
def build_handler(self) -> ActionHandler:
return MessagingActionHandler(self.spec)
| _MessagingHandlerFactory |
python | encode__django-rest-framework | tests/test_renderers.py | {
"start": 15654,
"end": 16539
} | class ____(TestCase):
"""
Tests specific to caching responses
"""
def test_head_caching(self):
"""
Test caching of HEAD requests
"""
response = self.client.head('/cache')
cache.set('key', response)
cached_response = cache.get('key')
assert isinstance(cached_response, Response)
assert cached_response.content == response.content
assert cached_response.status_code == response.status_code
def test_get_caching(self):
"""
Test caching of GET requests
"""
response = self.client.get('/cache')
cache.set('key', response)
cached_response = cache.get('key')
assert isinstance(cached_response, Response)
assert cached_response.content == response.content
assert cached_response.status_code == response.status_code
| CacheRenderTest |
python | ray-project__ray | python/ray/tune/tests/test_trial_scheduler.py | {
"start": 79927,
"end": 83247
} | class ____(unittest.TestCase):
def setUp(self):
ray.init(num_cpus=4)
register_mock_trainable()
def tearDown(self):
ray.shutdown()
def basicSetup(
self,
resample_prob=0.0,
explore=None,
perturbation_interval=10,
log_config=False,
hyperparams=None,
hyperparam_mutations=None,
step_once=True,
):
hyperparam_mutations = hyperparam_mutations or {
"float_factor": lambda: 100.0,
"int_factor": lambda: 10,
"id_factor": [100],
}
pbt = PopulationBasedTraining(
metric="mean_accuracy",
mode="max",
time_attr="training_iteration",
perturbation_interval=perturbation_interval,
resample_probability=resample_prob,
quantile_fraction=0.25,
hyperparam_mutations=hyperparam_mutations,
custom_explore_fn=explore,
log_config=log_config,
)
return pbt
def testCheckpointing(self):
pbt = self.basicSetup(perturbation_interval=10)
class train(tune.Trainable):
def step(self):
return {"mean_accuracy": self.training_iteration}
def save_checkpoint(self, path):
checkpoint = os.path.join(path, "checkpoint")
with open(checkpoint, "w") as f:
f.write("OK")
def reset_config(self, config):
return True
def load_checkpoint(self, checkpoint):
pass
trial_hyperparams = {
"float_factor": 2.0,
"const_factor": 3,
"int_factor": 10,
"id_factor": 0,
}
analysis = tune.run(
train,
num_samples=3,
scheduler=pbt,
checkpoint_config=CheckpointConfig(checkpoint_frequency=3),
config=trial_hyperparams,
stop={"training_iteration": 30},
)
for trial in analysis.trials:
self.assertEqual(trial.status, Trial.TERMINATED)
self.assertTrue(trial.has_checkpoint())
def testCheckpointDict(self):
pbt = self.basicSetup(perturbation_interval=10)
class train_dict(tune.Trainable):
def setup(self, config):
self.state = {"hi": 1}
def step(self):
return {"mean_accuracy": self.training_iteration}
def save_checkpoint(self, path):
return self.state
def load_checkpoint(self, state):
self.state = state
def reset_config(self, config):
return True
trial_hyperparams = {
"float_factor": 2.0,
"const_factor": 3,
"int_factor": 10,
"id_factor": 0,
}
analysis = tune.run(
train_dict,
num_samples=3,
scheduler=pbt,
checkpoint_config=CheckpointConfig(checkpoint_frequency=3),
config=trial_hyperparams,
stop={"training_iteration": 30},
)
for trial in analysis.trials:
self.assertEqual(trial.status, Trial.TERMINATED)
self.assertTrue(trial.has_checkpoint())
| E2EPopulationBasedTestingSuite |
python | sqlalchemy__sqlalchemy | test/engine/test_ddlevents.py | {
"start": 27845,
"end": 30392
} | class ____(fixtures.TestBase):
"""test DDL transactional behavior as of SQLAlchemy 1.4."""
@testing.fixture
def metadata_fixture(self):
m = MetaData()
Table("t1", m, Column("q", Integer))
Table("t2", m, Column("q", Integer))
try:
yield m
finally:
m.drop_all(testing.db)
@testing.fixture
def listening_engine_fixture(self):
eng = engines.testing_engine()
m1 = mock.Mock()
event.listen(eng, "begin", m1.begin)
event.listen(eng, "commit", m1.commit)
event.listen(eng, "rollback", m1.rollback)
@event.listens_for(eng, "before_cursor_execute")
def before_cursor_execute(
conn, cursor, statement, parameters, context, executemany
):
if "CREATE TABLE" in statement:
m1.cursor_execute("CREATE TABLE ...")
eng.connect().close()
return eng, m1
def test_ddl_engine(self, metadata_fixture, listening_engine_fixture):
eng, m1 = listening_engine_fixture
metadata_fixture.create_all(eng)
eq_(
m1.mock_calls,
[
mock.call.begin(mock.ANY),
mock.call.cursor_execute("CREATE TABLE ..."),
mock.call.cursor_execute("CREATE TABLE ..."),
mock.call.commit(mock.ANY),
],
)
def test_ddl_connection_autobegin_transaction(
self, metadata_fixture, listening_engine_fixture
):
eng, m1 = listening_engine_fixture
with eng.connect() as conn:
metadata_fixture.create_all(conn)
conn.commit()
eq_(
m1.mock_calls,
[
mock.call.begin(mock.ANY),
mock.call.cursor_execute("CREATE TABLE ..."),
mock.call.cursor_execute("CREATE TABLE ..."),
mock.call.commit(mock.ANY),
],
)
def test_ddl_connection_explicit_begin_transaction(
self, metadata_fixture, listening_engine_fixture
):
eng, m1 = listening_engine_fixture
with eng.connect() as conn:
with conn.begin():
metadata_fixture.create_all(conn)
eq_(
m1.mock_calls,
[
mock.call.begin(mock.ANY),
mock.call.cursor_execute("CREATE TABLE ..."),
mock.call.cursor_execute("CREATE TABLE ..."),
mock.call.commit(mock.ANY),
],
)
| DDLTransactionTest |
python | wandb__wandb | wandb/sdk/artifacts/_generated/fetch_registries.py | {
"start": 273,
"end": 365
} | class ____(GQLResult):
organization: Optional[FetchRegistriesOrganization]
| FetchRegistries |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_data_labels44.py | {
"start": 315,
"end": 1752
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_data_labels44.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "column"})
chart.axis_ids = [56179712, 56185600]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series(
{
"values": "=Sheet1!$A$1:$A$5",
"data_labels": {
"value": 1,
"custom": [
{
"value": 33,
"border": {"color": "red"},
"fill": {"color": "#00B050"},
}
],
},
}
)
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | euske__pdfminer | pdfminer/cmapdb.py | {
"start": 4342,
"end": 4588
} | class ____(CMap):
def __init__(self, name, module):
CMap.__init__(self, CMapName=name)
self.code2cid = module.CODE2CID
if module.IS_VERTICAL:
self.attrs['WMode'] = 1
return
## PyUnicodeMap
##
| PyCMap |
python | getsentry__sentry | src/sentry/integrations/discord/client.py | {
"start": 1448,
"end": 8855
} | class ____(ApiClient):
integration_name: str = IntegrationProviderSlug.DISCORD.value
base_url: str = DISCORD_BASE_URL
_METRICS_FAILURE_KEY: str = "sentry.integrations.discord.failure"
_METRICS_SUCCESS_KEY: str = "sentry.integrations.discord.success"
_METRICS_USER_ERROR_KEY: str = "sentry.integrations.discord.failure.user_error"
_METRICS_RATE_LIMIT_KEY: str = "sentry.integrations.discord.failure.rate_limit"
def __init__(self):
super().__init__()
self.application_id = options.get("discord.application-id")
self.client_secret = options.get("discord.client-secret")
self.bot_token = options.get("discord.bot-token")
def prepare_auth_header(self) -> dict[str, str]:
return {"Authorization": f"Bot {self.bot_token}"}
def set_application_command(self, command: object) -> None:
self.post(
APPLICATION_COMMANDS_URL.format(application_id=self.application_id),
headers=self.prepare_auth_header(),
data=command,
)
def has_application_commands(self) -> bool:
response = self.get(
APPLICATION_COMMANDS_URL.format(application_id=self.application_id),
headers=self.prepare_auth_header(),
)
return bool(response)
def get_guild_name(self, guild_id: str) -> str:
response = self.get(GUILD_URL.format(guild_id=guild_id), headers=self.prepare_auth_header())
return response["name"]
def get_access_token(self, code: str, url: str):
data = {
"client_id": self.application_id,
"client_secret": self.client_secret,
"grant_type": "authorization_code",
"code": code,
"redirect_uri": url,
"scope": "identify",
}
headers = {
"Content-Type": "application/x-www-form-urlencoded",
}
response = self.post(TOKEN_URL, json=False, data=urlencode(data), headers=headers)
return response["access_token"]
def get_user_id(self, access_token: str):
headers = {"Authorization": f"Bearer {access_token}"}
response = self.get(
USER_URL,
headers=headers,
)
return response["id"]
def check_user_bot_installation_permission(self, access_token: str, guild_id: str) -> bool:
headers = {"Authorization": f"Bearer {access_token}"}
# We only want information about guild_id and check the user's permission in the guild, but we can't currently do that
# https://github.com/discord/discord-api-docs/discussions/6846
# TODO(ecosystem): Eventually, we should use `/users/@me/guilds/{guild.id}/member`
# Instead, we check if the user in a member of the guild
try:
self.get(f"/users/@me/guilds/{guild_id}/member", headers=headers)
except ApiError as e:
if e.code == 404:
return False
return True
def leave_guild(self, guild_id: str) -> None:
"""
Leave the given guild_id, if the bot is currently a member.
"""
self.delete(USERS_GUILD_URL.format(guild_id=guild_id), headers=self.prepare_auth_header())
def get_channel(self, channel_id: str) -> object | None:
"""
Get a channel by id.
"""
return self.get(
CHANNEL_URL.format(channel_id=channel_id), headers=self.prepare_auth_header()
)
def track_response_data(
self,
code: str | int,
error: Exception | None = None,
resp: Response | None = None,
extra: Mapping[str, str] | None = None,
) -> None:
"""
Handle response from Discord by logging and capturing metrics
"""
log_params = {
"code": code,
"error": str(error),
"extra": extra,
}
if self.integration_type:
log_params[str(self.integration_type)] = self.name
try:
logging_context = getattr(self, "logging_context", None)
log_params["logging_context"] = logging_context
except Exception:
pass
is_ok = code in {
status.HTTP_200_OK,
status.HTTP_201_CREATED,
status.HTTP_202_ACCEPTED,
status.HTTP_204_NO_CONTENT,
}
if not is_ok or error:
code_to_use = code if isinstance(code, int) else None
self._handle_failure(code=code_to_use, log_params=log_params, resp=resp)
else:
self._handle_success(log_params=log_params)
def _handle_failure(
self,
code: int | None,
log_params: dict[str, Any],
resp: Response | None = None,
) -> None:
"""
Do extra logic to handle an error from Discord
"""
discord_error_response: dict | None = None
if resp is not None:
# Try to get the additional error code that Discord sent us to help determine what specific error happened
try:
discord_error_response = orjson.loads(resp.content)
log_params["discord_error_response"] = discord_error_response
except Exception as err:
self.logger.info(
"error trying to handle discord error message", exc_info=err, extra=log_params
)
discord_error_code = None
if discord_error_response is not None:
# Discord sends us a special code for errors in the response data
# https://discord.com/developers/docs/topics/opcodes-and-status-codes#json
discord_error_code = str(discord_error_response.get("code", ""))
log_params["discord_error_code"] = discord_error_code
# Get the specific meaning for those codes
if discord_error_code_message := DISCORD_ERROR_CODES.get(discord_error_code, None):
log_params["discord_error_code_message"] = discord_error_code_message
# Check if the error is due to a user configuration error, which we do not have control over to fix
# An example of this would be if the user deleted the discord guild and never updated the alert action
is_user_error = discord_error_code in DISCORD_USER_ERRORS
log_params["is_user_error"] = is_user_error
if is_user_error:
metrics_key = self._METRICS_USER_ERROR_KEY
else:
metrics_key = (
self._METRICS_RATE_LIMIT_KEY
if code is not None and code == status.HTTP_429_TOO_MANY_REQUESTS
else self._METRICS_FAILURE_KEY
)
metrics.incr(
metrics_key,
sample_rate=1.0,
)
self.logger.info("handled discord error", extra=log_params)
def _handle_success(
self,
log_params: dict[str, Any],
) -> None:
metrics.incr(
self._METRICS_SUCCESS_KEY,
sample_rate=1.0,
)
self.logger.info("handled discord success", extra=log_params)
def send_message(self, channel_id: str, message: DiscordMessage) -> None:
"""
Send a message to the specified channel.
"""
self.post(
MESSAGE_URL.format(channel_id=channel_id),
data=message,
timeout=5,
headers=self.prepare_auth_header(),
)
| DiscordClient |
python | bokeh__bokeh | src/bokeh/document/events.py | {
"start": 24045,
"end": 25866
} | class ____(DocumentPatchedEvent):
''' A concrete event representing a change to add a new Model to a
Document's collection of "root" models.
'''
kind = "RootAdded"
def __init__(self, document: Document, model: Model, setter: Setter | None = None, callback_invoker: Invoker | None = None) -> None:
'''
Args:
document (Document) :
A Bokeh document that is to be updated.
model (Model) :
The Bokeh Model to add as a Document root.
setter (ClientSession or ServerSession or None, optional) :
This is used to prevent "boomerang" updates to Bokeh apps.
(default: None)
See :class:`~bokeh.document.events.DocumentChangedEvent`
for more details.
callback_invoker (callable, optional) :
A callable that will invoke any Model callbacks that should
be executed in response to the change that triggered this
event. (default: None)
'''
super().__init__(document, setter, callback_invoker)
self.model = model
def to_serializable(self, serializer: Serializer) -> RootAdded:
''' Create a JSON representation of this event suitable for sending
to clients.
.. code-block:: python
{
'kind' : 'RootAdded'
'title' : <reference to a Model>
}
Args:
serializer (Serializer):
'''
return RootAdded(
kind = self.kind,
model = serializer.encode(self.model),
)
@staticmethod
def _handle_event(doc: Document, event: RootAddedEvent) -> None:
model = event.model
doc.add_root(model, event.setter)
| RootAddedEvent |
python | dask__distributed | distributed/spill.py | {
"start": 2419,
"end": 10266
} | class ____(zict.Buffer[Key, object]):
"""MutableMapping that automatically spills out dask key/value pairs to disk when
the total size of the stored data exceeds the target. If max_spill is provided the
key/value pairs won't be spilled once this threshold has been reached.
Parameters
----------
spill_directory: str
Location on disk to write the spill files to
target: int
Managed memory, in bytes, to start spilling at
max_spill: int | False, optional
Limit of number of bytes to be spilled on disk. Set to False to disable.
"""
logged_pickle_errors: set[Key]
#: (label, unit) -> ever-increasing cumulative value
cumulative_metrics: defaultdict[tuple[str, str], float]
def __init__(
self,
spill_directory: str,
target: int,
max_spill: int | Literal[False] = False,
):
# If a value is still in use somewhere on the worker since the last time it was
# unspilled, don't duplicate it
slow = Slow(spill_directory, max_spill)
slow_cached = zict.Cache(slow, zict.WeakValueMapping())
super().__init__(fast={}, slow=slow_cached, n=target, weight=_in_memory_weight)
self.logged_pickle_errors = set() # keys logged with pickle error
self.cumulative_metrics = defaultdict(float)
@contextmanager
def _capture_metrics(self) -> Iterator[None]:
"""Capture metrics re. disk read/write, serialize/deserialize, and
compress/decompress.
Note that this duplicates capturing from gather_dep, get_data, and execute. It
is repeated here to make it possible to split serialize/deserialize and
compress/decompress triggered by spill/unspill from those triggered by network
comms.
"""
def metrics_callback(label: Hashable, value: float, unit: str) -> None:
assert isinstance(label, str)
self.cumulative_metrics[label, unit] += value
with context_meter.add_callback(metrics_callback):
yield
@contextmanager
def _handle_errors(self, key: Key | None) -> Iterator[None]:
try:
yield
except MaxSpillExceeded as e:
# key is in self.fast; no keys have been lost on eviction
(key_e,) = e.args
assert key_e in self.fast
assert key_e not in self.slow
logger.warning(
"Spill file on disk reached capacity; keeping data in memory"
)
raise HandledError()
except OSError:
# Typically, this is a disk full error
logger.error("Spill to disk failed; keeping data in memory", exc_info=True)
raise HandledError()
except PickleError as e:
assert e.key in self.fast
assert e.key not in self.slow
if e.key == key:
assert key is not None
# The key we just inserted failed to serialize.
# This happens only when the key is individually larger than target.
# The exception will be caught by Worker and logged; the status of
# the task will be set to error.
del self[key]
raise
else:
# The key we just inserted is smaller than target, but it caused
# another, unrelated key to be spilled out of the LRU, and that key
# failed to serialize. There's nothing wrong with the new key. The older
# key is still in memory.
if e.key not in self.logged_pickle_errors:
logger.error("Failed to pickle %r", e.key, exc_info=True)
self.logged_pickle_errors.add(e.key)
raise HandledError()
def __setitem__(self, key: Key, value: object) -> None:
"""If sizeof(value) < target, write key/value pair to self.fast; this may in
turn cause older keys to be spilled from fast to slow.
If sizeof(value) >= target, write key/value pair directly to self.slow instead.
Raises
------
Exception
sizeof(value) >= target, and value failed to pickle.
The key/value pair has been forgotten.
In all other cases:
- an older value was evicted and failed to pickle,
- this value or an older one caused the disk to fill and raise OSError,
- this value or an older one caused the max_spill threshold to be exceeded,
this method does not raise and guarantees that the key/value that caused the
issue remained in fast.
"""
try:
with self._capture_metrics(), self._handle_errors(key):
super().__setitem__(key, value)
self.logged_pickle_errors.discard(key)
except HandledError:
assert key in self.fast
assert key not in self.slow
def evict(self) -> int:
"""Implementation of :meth:`ManualEvictProto.evict`.
Manually evict the oldest key/value pair, even if target has not been
reached. Returns sizeof(value).
If the eviction failed (value failed to pickle, disk full, or max_spill
exceeded), return -1; the key/value pair that caused the issue will remain in
fast. The exception has been logged internally.
This method never raises.
"""
try:
with self._capture_metrics(), self._handle_errors(None):
_, _, weight = self.fast.evict()
return cast(int, weight)
except HandledError:
return -1
def __getitem__(self, key: Key) -> object:
with self._capture_metrics():
if key in self.fast:
# Note: don't log from self.fast.__getitem__, because that's called
# every time a key is evicted, and we don't want to count those events
# here.
memory_size = cast(int, self.fast.weights[key])
# This is logged not only by the internal metrics callback but also by
# those installed by gather_dep, get_data, and execute
context_meter.digest_metric("memory-read", 1, "count")
context_meter.digest_metric("memory-read", memory_size, "bytes")
return super().__getitem__(key)
def __delitem__(self, key: Key) -> None:
super().__delitem__(key)
self.logged_pickle_errors.discard(key)
def pop(self, key: Key, default: object = None) -> object:
raise NotImplementedError(
"Are you calling .pop(key, None) as a way to discard a key if it exists?"
"It may cause data to be read back from disk! Please use `del` instead."
)
@property
def memory(self) -> Mapping[Key, object]:
"""Key/value pairs stored in RAM. Alias of zict.Buffer.fast.
For inspection only - do not modify directly!
"""
return self.fast
@property
def disk(self) -> Mapping[Key, object]:
"""Key/value pairs spilled out to disk. Alias of zict.Buffer.slow.
For inspection only - do not modify directly!
"""
return self.slow
@property
def _slow_uncached(self) -> Slow:
cache = cast(zict.Cache, self.slow)
return cast(Slow, cache.data)
@property
def spilled_total(self) -> SpilledSize:
"""Number of bytes spilled to disk. Tuple of
- output of sizeof()
- pickled size
The two may differ substantially, e.g. if sizeof() is inaccurate or in case of
compression.
"""
return self._slow_uncached.total_weight
def _in_memory_weight(key: Key, value: object) -> int:
return safe_sizeof(value)
# Internal exceptions. These are never raised by SpillBuffer.
| SpillBuffer |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/metaclass7.py | {
"start": 1080,
"end": 1195
} | class ____(type):
def __call__(cls, *args, **kwargs):
return super().__call__(*args, **kwargs)
| MetaClass4 |
python | davidhalter__parso | parso/python/tree.py | {
"start": 22146,
"end": 22214
} | class ____(Flow):
type = 'while_stmt'
__slots__ = ()
| WhileStmt |
python | neetcode-gh__leetcode | python/0141-linked-list-cycle.py | {
"start": 136,
"end": 408
} | class ____:
def hasCycle(self, head: ListNode) -> bool:
slow, fast = head, head
while fast and fast.next:
slow = slow.next
fast = fast.next.next
if slow == fast:
return True
return False
| Solution |
python | getsentry__sentry | src/sentry/models/projectteam.py | {
"start": 541,
"end": 936
} | class ____(BaseManager["ProjectTeam"]):
def get_for_teams_with_org_cache(self, teams: Sequence["Team"]) -> QuerySet["ProjectTeam"]:
return (
self.filter(team__in=teams, project__status=ObjectStatus.ACTIVE)
.order_by("project__name", "project__slug")
.select_related("project", "project__organization")
)
@region_silo_model
| ProjectTeamManager |
python | huggingface__transformers | src/transformers/image_utils.py | {
"start": 37076,
"end": 37545
} | class ____:
"""
Hashable dictionary to store image size information.
"""
height: Optional[int] = None
width: Optional[int] = None
longest_edge: Optional[int] = None
shortest_edge: Optional[int] = None
max_height: Optional[int] = None
max_width: Optional[int] = None
def __getitem__(self, key):
if hasattr(self, key):
return getattr(self, key)
raise KeyError(f"Key {key} not found in SizeDict.")
| SizeDict |
python | networkx__networkx | networkx/algorithms/tests/test_cycles.py | {
"start": 30906,
"end": 33821
} | class ____:
@classmethod
def setup_class(cls):
T = nx.Graph()
nx.add_cycle(T, [1, 2, 3, 4], weight=1)
T.add_edge(2, 4, weight=5)
cls.diamond_graph = T
def test_unweighted_diamond(self):
mcb = nx.minimum_cycle_basis(self.diamond_graph)
assert_basis_equal(mcb, [[2, 4, 1], [3, 4, 2]])
def test_weighted_diamond(self):
mcb = nx.minimum_cycle_basis(self.diamond_graph, weight="weight")
assert_basis_equal(mcb, [[2, 4, 1], [4, 3, 2, 1]])
def test_dimensionality(self):
# checks |MCB|=|E|-|V|+|NC|
ntrial = 10
for seed in range(1234, 1234 + ntrial):
rg = nx.erdos_renyi_graph(10, 0.3, seed=seed)
nnodes = rg.number_of_nodes()
nedges = rg.number_of_edges()
ncomp = nx.number_connected_components(rg)
mcb = nx.minimum_cycle_basis(rg)
assert len(mcb) == nedges - nnodes + ncomp
check_independent(mcb)
def test_complete_graph(self):
cg = nx.complete_graph(5)
mcb = nx.minimum_cycle_basis(cg)
assert all(len(cycle) == 3 for cycle in mcb)
check_independent(mcb)
def test_tree_graph(self):
tg = nx.balanced_tree(3, 3)
assert not nx.minimum_cycle_basis(tg)
def test_petersen_graph(self):
G = nx.petersen_graph()
mcb = list(nx.minimum_cycle_basis(G))
expected = [
[4, 9, 7, 5, 0],
[1, 2, 3, 4, 0],
[1, 6, 8, 5, 0],
[4, 3, 8, 5, 0],
[1, 6, 9, 4, 0],
[1, 2, 7, 5, 0],
]
assert len(mcb) == len(expected)
assert all(c in expected for c in mcb)
# check that order of the nodes is a path
for c in mcb:
assert all(G.has_edge(u, v) for u, v in nx.utils.pairwise(c, cyclic=True))
# check independence of the basis
check_independent(mcb)
def test_gh6787_variable_weighted_complete_graph(self):
N = 8
cg = nx.complete_graph(N)
cg.add_weighted_edges_from([(u, v, 9) for u, v in cg.edges])
cg.add_weighted_edges_from([(u, v, 1) for u, v in nx.cycle_graph(N).edges])
mcb = nx.minimum_cycle_basis(cg, weight="weight")
check_independent(mcb)
def test_gh6787_and_edge_attribute_names(self):
G = nx.cycle_graph(4)
G.add_weighted_edges_from([(0, 2, 10), (1, 3, 10)], weight="dist")
expected = [[1, 3, 0], [3, 2, 1, 0], [1, 2, 0]]
mcb = list(nx.minimum_cycle_basis(G, weight="dist"))
assert len(mcb) == len(expected)
assert all(c in expected for c in mcb)
# test not using a weight with weight attributes
expected = [[1, 3, 0], [1, 2, 0], [3, 2, 0]]
mcb = list(nx.minimum_cycle_basis(G))
assert len(mcb) == len(expected)
assert all(c in expected for c in mcb)
| TestMinimumCycleBasis |
python | optuna__optuna | optuna/samplers/_nsgaiii/_sampler.py | {
"start": 1100,
"end": 8767
} | class ____(BaseGASampler):
"""Multi-objective sampler using the NSGA-III algorithm.
NSGA-III stands for "Nondominated Sorting Genetic Algorithm III",
which is a modified version of NSGA-II for many objective optimization problem.
For further information about NSGA-III, please refer to the following papers:
- `An Evolutionary Many-Objective Optimization Algorithm Using Reference-Point-Based
Nondominated Sorting Approach, Part I: Solving Problems With Box Constraints
<https://doi.org/10.1109/TEVC.2013.2281535>`__
- `An Evolutionary Many-Objective Optimization Algorithm Using Reference-Point-Based
Nondominated Sorting Approach, Part II: Handling Constraints and Extending to an Adaptive
Approach
<https://doi.org/10.1109/TEVC.2013.2281534>`__
Args:
reference_points:
A 2 dimension ``numpy.ndarray`` with objective dimension columns. Represents
a list of reference points which is used to determine who to survive.
After non-dominated sort, who out of borderline front are going to survived is
determined according to how sparse the closest reference point of each individual is.
In the default setting the algorithm uses `uniformly` spread points to diversify the
result. It is also possible to reflect your `preferences` by giving an arbitrary set of
`target` points since the algorithm prioritizes individuals around reference points.
dividing_parameter:
A parameter to determine the density of default reference points. This parameter
determines how many divisions are made between reference points on each axis. The
smaller this value is, the less reference points you have. The default value is 3.
Note that this parameter is not used when ``reference_points`` is not :obj:`None`.
.. note::
Other parameters than ``reference_points`` and ``dividing_parameter`` are the same as
:class:`~optuna.samplers.NSGAIISampler`.
"""
def __init__(
self,
*,
population_size: int = 50,
mutation_prob: float | None = None,
crossover: BaseCrossover | None = None,
crossover_prob: float = 0.9,
swapping_prob: float = 0.5,
seed: int | None = None,
constraints_func: Callable[[FrozenTrial], Sequence[float]] | None = None,
reference_points: np.ndarray | None = None,
dividing_parameter: int = 3,
elite_population_selection_strategy: (
Callable[[Study, list[FrozenTrial]], list[FrozenTrial]] | None
) = None,
child_generation_strategy: (
Callable[[Study, dict[str, BaseDistribution], list[FrozenTrial]], dict[str, Any]]
| None
) = None,
after_trial_strategy: (
Callable[[Study, FrozenTrial, TrialState, Sequence[float] | None], None] | None
) = None,
) -> None:
# TODO(ohta): Reconsider the default value of each parameter.
if population_size < 2:
raise ValueError("`population_size` must be greater than or equal to 2.")
if crossover is None:
crossover = UniformCrossover(swapping_prob)
if not isinstance(crossover, BaseCrossover):
raise ValueError(
f"'{crossover}' is not a valid crossover."
" For valid crossovers see"
" https://optuna.readthedocs.io/en/stable/reference/samplers.html."
)
if population_size < crossover.n_parents:
raise ValueError(
f"Using {crossover},"
f" the population size should be greater than or equal to {crossover.n_parents}."
f" The specified `population_size` is {population_size}."
)
super().__init__(population_size=population_size)
self._random_sampler = RandomSampler(seed=seed)
self._rng = LazyRandomState(seed)
self._constraints_func = constraints_func
self._search_space = IntersectionSearchSpace()
self._elite_population_selection_strategy = (
elite_population_selection_strategy
or NSGAIIIElitePopulationSelectionStrategy(
population_size=population_size,
constraints_func=constraints_func,
reference_points=reference_points,
dividing_parameter=dividing_parameter,
rng=self._rng,
)
)
self._child_generation_strategy = (
child_generation_strategy
or NSGAIIChildGenerationStrategy(
crossover_prob=crossover_prob,
mutation_prob=mutation_prob,
swapping_prob=swapping_prob,
crossover=crossover,
constraints_func=constraints_func,
rng=self._rng,
)
)
self._after_trial_strategy = after_trial_strategy or NSGAIIAfterTrialStrategy(
constraints_func=constraints_func
)
def reseed_rng(self) -> None:
self._random_sampler.reseed_rng()
self._rng.rng.seed()
def infer_relative_search_space(
self, study: Study, trial: FrozenTrial
) -> dict[str, BaseDistribution]:
search_space: dict[str, BaseDistribution] = {}
for name, distribution in self._search_space.calculate(study).items():
if distribution.single():
# The `untransform` method of `optuna._transform._SearchSpaceTransform`
# does not assume a single value,
# so single value objects are not sampled with the `sample_relative` method,
# but with the `sample_independent` method.
continue
search_space[name] = distribution
return search_space
def select_parent(self, study: Study, generation: int) -> list[FrozenTrial]:
return self._elite_population_selection_strategy(
study,
self.get_population(study, generation - 1)
+ self.get_parent_population(study, generation - 1),
)
def sample_relative(
self,
study: Study,
trial: FrozenTrial,
search_space: dict[str, BaseDistribution],
) -> dict[str, Any]:
generation = self.get_trial_generation(study, trial)
parent_population = self.get_parent_population(study, generation)
if len(parent_population) == 0:
return {}
return self._child_generation_strategy(study, search_space, parent_population)
def sample_independent(
self,
study: Study,
trial: FrozenTrial,
param_name: str,
param_distribution: BaseDistribution,
) -> Any:
# Following parameters are randomly sampled here.
# 1. A parameter in the initial population/first generation.
# 2. A parameter to mutate.
# 3. A parameter excluded from the intersection search space.
return self._random_sampler.sample_independent(
study, trial, param_name, param_distribution
)
def before_trial(self, study: Study, trial: FrozenTrial) -> None:
self._random_sampler.before_trial(study, trial)
def after_trial(
self,
study: Study,
trial: FrozenTrial,
state: TrialState,
values: Sequence[float] | None,
) -> None:
assert state in [TrialState.COMPLETE, TrialState.FAIL, TrialState.PRUNED]
self._after_trial_strategy(study, trial, state, values)
self._random_sampler.after_trial(study, trial, state, values)
| NSGAIIISampler |
python | scikit-learn__scikit-learn | sklearn/tests/test_base.py | {
"start": 2549,
"end": 2715
} | class ____:
def __init__(self):
pass
def fit(self, X=None, y=None):
return self
def predict(self, X=None):
return None
| NoEstimator |
python | django__django | django/contrib/gis/db/models/functions.py | {
"start": 3869,
"end": 3915
} | class ____(GeoFuncMixin, Func):
pass
| GeoFunc |
python | walkccc__LeetCode | solutions/2930. Number of Strings Which Can Be Rearranged to Contain Substring/2930.py | {
"start": 0,
"end": 681
} | class ____:
def stringCount(self, n: int) -> int:
# There're three invalid conditions:
# a. count('l') == 0
# b. count('e') < 2
# c. count('t') == 0
#
# By Principle of Inclusion-Exclusion (PIE):
# ans = allCount - a - b - c + ab + ac + bc - abc
MOD = 1_000_000_007
allCount = pow(26, n, MOD)
a = pow(25, n, MOD)
b = pow(25, n, MOD)
c = pow(25, n, MOD) + n * pow(25, n - 1, MOD)
ab = pow(24, n, MOD) + n * pow(24, n - 1, MOD)
ac = pow(24, n, MOD)
bc = pow(24, n, MOD) + n * pow(24, n - 1, MOD)
abc = pow(23, n, MOD) + n * pow(23, n - 1, MOD)
return (allCount - a - b - c + ab + ac + bc - abc) % MOD
| Solution |
python | numba__numba | numba/tests/test_extending.py | {
"start": 3967,
"end": 10326
} | class ____(ConcreteTemplate):
key = "print_item"
cases = [signature(types.none, mydummy_type)]
@lower_builtin("print_item", MyDummyType)
def print_dummy(context, builder, sig, args):
[x] = args
pyapi = context.get_python_api(builder)
strobj = pyapi.unserialize(pyapi.serialize_object("hello!"))
pyapi.print_object(strobj)
pyapi.decref(strobj)
return context.get_dummy_value()
# -----------------------------------------------------------------------
# Define an overloaded function (combined API)
def where(cond, x, y):
raise NotImplementedError
def np_where(cond, x, y):
"""
Wrap np.where() to allow for keyword arguments
"""
return np.where(cond, x, y)
def call_where(cond, x, y):
return where(cond, y=y, x=x)
@overload(where)
def overload_where_arrays(cond, x, y):
"""
Implement where() for arrays.
"""
# Choose implementation based on argument types.
if isinstance(cond, types.Array):
if x.dtype != y.dtype:
raise errors.TypingError("x and y should have the same dtype")
# Array where() => return an array of the same shape
if all(ty.layout == "C" for ty in (cond, x, y)):
def where_impl(cond, x, y):
"""
Fast implementation for C-contiguous arrays
"""
shape = cond.shape
if x.shape != shape or y.shape != shape:
raise ValueError("all inputs should have the same shape")
res = np.empty_like(x)
cf = cond.flat
xf = x.flat
yf = y.flat
rf = res.flat
for i in range(cond.size):
rf[i] = xf[i] if cf[i] else yf[i]
return res
else:
def where_impl(cond, x, y):
"""
Generic implementation for other arrays
"""
shape = cond.shape
if x.shape != shape or y.shape != shape:
raise ValueError("all inputs should have the same shape")
res = np.empty_like(x)
for idx, c in np.ndenumerate(cond):
res[idx] = x[idx] if c else y[idx]
return res
return where_impl
# We can define another overload function for the same function, they
# will be tried in turn until one succeeds.
@overload(where)
def overload_where_scalars(cond, x, y):
"""
Implement where() for scalars.
"""
if not isinstance(cond, types.Array):
if x != y:
raise errors.TypingError("x and y should have the same type")
def where_impl(cond, x, y):
"""
Scalar where() => return a 0-dim array
"""
scal = x if cond else y
# Can't use full_like() on Numpy < 1.8
arr = np.empty_like(scal)
arr[()] = scal
return arr
return where_impl
# -----------------------------------------------------------------------
# Overload an already defined built-in function, extending it for new types.
@overload(len)
def overload_len_dummy(arg):
if isinstance(arg, MyDummyType):
def len_impl(arg):
return 13
return len_impl
@overload(operator.add)
def overload_add_dummy(arg1, arg2):
if isinstance(arg1, (MyDummyType, MyDummyType2)) and isinstance(
arg2, (MyDummyType, MyDummyType2)
):
def dummy_add_impl(arg1, arg2):
return 42
return dummy_add_impl
@overload(operator.delitem)
def overload_dummy_delitem(obj, idx):
if isinstance(obj, MyDummyType) and isinstance(idx, types.Integer):
def dummy_delitem_impl(obj, idx):
print("del", obj, idx)
return dummy_delitem_impl
@overload(operator.getitem)
def overload_dummy_getitem(obj, idx):
if isinstance(obj, MyDummyType) and isinstance(idx, types.Integer):
def dummy_getitem_impl(obj, idx):
return idx + 123
return dummy_getitem_impl
@overload(operator.setitem)
def overload_dummy_setitem(obj, idx, val):
if all(
[
isinstance(obj, MyDummyType),
isinstance(idx, types.Integer),
isinstance(val, types.Integer),
]
):
def dummy_setitem_impl(obj, idx, val):
print(idx, val)
return dummy_setitem_impl
def call_add_operator(arg1, arg2):
return operator.add(arg1, arg2)
def call_add_binop(arg1, arg2):
return arg1 + arg2
@overload(operator.iadd)
def overload_iadd_dummy(arg1, arg2):
if isinstance(arg1, (MyDummyType, MyDummyType2)) and isinstance(
arg2, (MyDummyType, MyDummyType2)
):
def dummy_iadd_impl(arg1, arg2):
return 42
return dummy_iadd_impl
def call_iadd_operator(arg1, arg2):
return operator.add(arg1, arg2)
def call_iadd_binop(arg1, arg2):
arg1 += arg2
return arg1
def call_delitem(obj, idx):
del obj[idx]
def call_getitem(obj, idx):
return obj[idx]
def call_setitem(obj, idx, val):
obj[idx] = val
@overload_method(MyDummyType, "length")
def overload_method_length(arg):
def imp(arg):
return len(arg)
return imp
def cache_overload_method_usecase(x):
return x.length()
def call_func1_nullary():
return func1()
def call_func1_unary(x):
return func1(x)
def len_usecase(x):
return len(x)
def print_usecase(x):
print(x)
def getitem_usecase(x, key):
return x[key]
def npyufunc_usecase(x):
return np.cos(np.sin(x))
def get_data_usecase(x):
return x._data
def get_index_usecase(x):
return x._index
def is_monotonic_usecase(x):
return x.is_monotonic_increasing
def make_series_usecase(data, index):
return Series(data, index)
def clip_usecase(x, lo, hi):
return x.clip(lo, hi)
# -----------------------------------------------------------------------
def return_non_boxable():
return np
@overload(return_non_boxable)
def overload_return_non_boxable():
def imp():
return np
return imp
def non_boxable_ok_usecase(sz):
mod = return_non_boxable()
return mod.arange(sz)
def non_boxable_bad_usecase():
return return_non_boxable()
def mk_func_input(f):
pass
@infer_global(mk_func_input)
| PrintDummy |
python | allegroai__clearml | clearml/automation/controller.py | {
"start": 1235,
"end": 184560
} | class ____(object):
"""
Pipeline controller.
Pipeline is a DAG of base tasks, each task will be cloned (arguments changed as required), executed, and monitored.
The pipeline process (task) itself can be executed manually or by the clearml-agent services queue.
Notice: The pipeline controller lives as long as the pipeline itself is being executed.
"""
_tag = "pipeline"
_project_system_tags = ["pipeline", "hidden"]
_node_tag_prefix = "pipe:"
_step_pattern = r"\${[^}]*}"
_config_section = "Pipeline"
_state_artifact_name = "pipeline_state"
_args_section = "Args"
_pipeline_section = "pipeline"
_pipeline_step_ref = "pipeline"
_runtime_property_hash = "_pipeline_hash"
_relaunch_status_message = "Relaunching pipeline step..."
_reserved_pipeline_names = (_pipeline_step_ref,)
_task_project_lookup = {}
_clearml_job_class = ClearmlJob
_update_execution_plot_interval = 5.0 * 60
_update_progress_interval = 10.0
_monitor_node_interval = 5.0 * 60
_pipeline_as_sub_project_cached = None
_report_plot_execution_flow = dict(title="Pipeline", series="Execution Flow")
_report_plot_execution_details = dict(title="Pipeline Details", series="Execution Details")
_evaluated_return_values = {} # TID: pipeline_name
_add_to_evaluated_return_values = {} # TID: bool
_retries = {} # Node.name: int
_retries_callbacks = {} # Node.name: Callable[[PipelineController, PipelineController.Node, int], bool] # noqa
_status_change_callbacks = {} # Node.name: Callable[PipelineController, PipelineController.Node, str]
_final_failure = {} # Node.name: bool
_task_template_header = CreateFromFunction.default_task_template_header
_default_pipeline_version = "1.0.0"
_project_section = ".pipelines"
valid_job_status = [
"failed",
"cached",
"completed",
"aborted",
"queued",
"running",
"skipped",
"pending",
]
@attrs
class Node(object):
# pipeline step name
name = attrib(type=str)
# base Task ID to be cloned and launched
base_task_id = attrib(type=str, default=None)
# alternative to base_task_id, function creating a Task
task_factory_func = attrib(type=Callable, default=None)
# execution queue name to use
queue = attrib(type=str, default=None)
# list of parent DAG steps
parents = attrib(type=list, default=None)
# execution timeout limit
timeout = attrib(type=float, default=None)
# Task hyper-parameters to change
parameters = attrib(type=dict, default=None)
# Task configuration objects to change
configurations = attrib(type=dict, default=None)
# Task overrides to change
task_overrides = attrib(type=dict, default=None)
# The actual executed Task ID (None if not executed yet)
executed = attrib(type=str, default=None)
# The Node Task status (cached, aborted, etc.)
status = attrib(type=str, default="pending")
# If True cline the base_task_id, then execute the cloned Task
clone_task = attrib(type=bool, default=True)
# ClearMLJob object
job = attrib(type=ClearmlJob, default=None)
# task type (string)
job_type = attrib(type=str, default=None)
# job startup timestamp (epoch ts in seconds)
job_started = attrib(type=float, default=None)
# job startup timestamp (epoch ts in seconds)
job_ended = attrib(type=float, default=None)
# pipeline code configuration section name
job_code_section = attrib(type=str, default=None)
# if True, this step should be skipped
skip_job = attrib(type=bool, default=False)
# if True this pipeline step should be cached
cache_executed_step = attrib(type=bool, default=False)
# List of artifact names returned by the step
return_artifacts = attrib(type=list, default=None)
# List of metric title/series to monitor
monitor_metrics = attrib(type=list, default=None)
# List of artifact names to monitor
monitor_artifacts = attrib(type=list, default=None)
# List of models to monitor
monitor_models = attrib(type=list, default=None)
# The Docker image the node uses, specified at creation
explicit_docker_image = attrib(type=str, default=None)
# if True, recursively parse parameters in lists, dicts, or tuples
recursively_parse_parameters = attrib(type=bool, default=False)
# The default location for output models and other artifacts
output_uri = attrib(type=Union[bool, str], default=None)
# Specify whether to create the Task as a draft
draft = attrib(type=bool, default=False)
# continue_behaviour dict, for private use. used to initialize fields related to continuation behaviour
continue_behaviour = attrib(type=dict, default=None)
# if True, the pipeline continues even if the step failed
continue_on_fail = attrib(type=bool, default=False)
# if True, the pipeline continues even if the step was aborted
continue_on_abort = attrib(type=bool, default=False)
# if True, the children of aborted steps are skipped
skip_children_on_abort = attrib(type=bool, default=True)
# if True, the children of failed steps are skipped
skip_children_on_fail = attrib(type=bool, default=True)
# the stage of the step
stage = attrib(type=str, default=None)
def __attrs_post_init__(self) -> None:
if self.parents is None:
self.parents = []
if self.parameters is None:
self.parameters = {}
if self.configurations is None:
self.configurations = {}
if self.task_overrides is None:
self.task_overrides = {}
if self.return_artifacts is None:
self.return_artifacts = []
if self.monitor_metrics is None:
self.monitor_metrics = []
if self.monitor_artifacts is None:
self.monitor_artifacts = []
if self.monitor_models is None:
self.monitor_models = []
if self.continue_behaviour is not None:
self.continue_on_fail = self.continue_behaviour.get("continue_on_fail", True)
self.continue_on_abort = self.continue_behaviour.get("continue_on_abort", True)
self.skip_children_on_fail = self.continue_behaviour.get("skip_children_on_fail", True)
self.skip_children_on_abort = self.continue_behaviour.get("skip_children_on_abort", True)
self.continue_behaviour = None
def copy(self) -> "PipelineController.Node":
"""
return a copy of the current Node, excluding the `job`, `executed`, fields
:return: new Node copy
"""
new_copy = PipelineController.Node(
name=self.name,
**dict(
(k, deepcopy(v))
for k, v in self.__dict__.items()
if k not in ("name", "job", "executed", "task_factory_func")
),
)
new_copy.task_factory_func = self.task_factory_func
return new_copy
def set_job_ended(self) -> None:
if self.job_ended:
return
# noinspection PyBroadException
try:
self.job.task.reload()
self.job_ended = self.job_started + self.job.task.data.active_duration
except Exception:
pass
def set_job_started(self) -> None:
if self.job_started:
return
# noinspection PyBroadException
try:
self.job_started = self.job.task.data.started.timestamp()
except Exception:
pass
def __init__(
self,
name: str,
project: str,
version: Optional[str] = None,
pool_frequency: float = 0.2,
add_pipeline_tags: bool = False,
target_project: Optional[Union[str, bool]] = True,
auto_version_bump: Optional[bool] = None,
abort_on_failure: bool = False,
add_run_number: bool = True,
retry_on_failure: Optional[
Union[
int,
Callable[["PipelineController", "PipelineController.Node", int], bool],
]
] = None, # noqa
docker: Optional[str] = None,
docker_args: Optional[str] = None,
docker_bash_setup_script: Optional[str] = None,
packages: Optional[Union[bool, str, Sequence[str]]] = None,
repo: Optional[str] = None,
repo_branch: Optional[str] = None,
repo_commit: Optional[str] = None,
always_create_from_code: bool = True,
artifact_serialization_function: Optional[Callable[[Any], Union[bytes, bytearray]]] = None,
artifact_deserialization_function: Optional[Callable[[bytes], Any]] = None,
output_uri: Optional[Union[str, bool]] = None,
skip_global_imports: bool = False,
working_dir: Optional[str] = None,
enable_local_imports: bool = True,
) -> None:
"""
Create a new pipeline controller. The newly created object will launch and monitor the new experiments.
:param name: Provide pipeline name (if main Task exists it overrides its name)
:param project: Provide project storing the pipeline (if main Task exists it overrides its project)
:param version: Pipeline version. This version allows to uniquely identify the pipeline
template execution. Examples for semantic versions: version='1.0.1' , version='23', version='1.2'.
If not set, find the latest version of the pipeline and increment it. If no such version is found,
default to '1.0.0'
:param float pool_frequency: The pooling frequency (in minutes) for monitoring experiments / states.
:param bool add_pipeline_tags: (default: False) if True, add `pipe: <pipeline_task_id>` tag to all
steps (Tasks) created by this pipeline.
:param str target_project: If provided, all pipeline steps are cloned into the target project.
If True, pipeline steps are stored into the pipeline project
:param bool auto_version_bump: (Deprecated) If True, if the same pipeline version already exists
(with any difference from the current one), the current pipeline version will be bumped to a new version
version bump examples: 1.0.0 -> 1.0.1 , 1.2 -> 1.3, 10 -> 11 etc.
:param bool abort_on_failure: If False (default), failed pipeline steps will not cause the pipeline
to stop immediately, instead any step that is not connected (or indirectly connected) to the failed step,
will still be executed. Nonetheless the pipeline itself will be marked failed, unless the failed step
was specifically defined with "continue_on_fail=True".
If True, any failed step will cause the pipeline to immediately abort, stop all running steps,
and mark the pipeline as failed.
:param add_run_number: If True (default), add the run number of the pipeline to the pipeline name.
Example, the second time we launch the pipeline "best pipeline", we rename it to "best pipeline #2"
:param retry_on_failure: Integer (number of retries) or Callback function that returns True to allow a retry
- Integer: In case of node failure, retry the node the number of times indicated by this parameter.
- Callable: A function called on node failure. Takes as parameters:
the PipelineController instance, the PipelineController.Node that failed and an int
representing the number of previous retries for the node that failed.
The function must return ``True`` if the node should be retried and ``False`` otherwise.
If True, the node will be re-queued and the number of retries left will be decremented by 1.
By default, if this callback is not specified, the function will be retried the number of
times indicated by `retry_on_failure`.
.. code-block:: py
def example_retry_on_failure_callback(pipeline, node, retries):
print(node.name, ' failed')
# allow up to 5 retries (total of 6 runs)
return retries < 5
:param docker: Select the docker image to be executed in by the remote session
:param docker_args: Add docker arguments, pass a single string
:param docker_bash_setup_script: Add bash script to be executed
inside the docker before setting up the Task's environment
:param packages: Manually specify a list of required packages or a local requirements.txt file.
Example: ["tqdm>=2.1", "scikit-learn"] or "./requirements.txt"
If not provided, packages are automatically added.
Use `False` to install requirements from "requirements.txt" inside your git repository
:param repo: Optional, specify a repository to attach to the pipeline controller, when remotely executing.
Allow users to execute the controller inside the specified repository, enabling them to load modules/script
from the repository. Notice the execution work directory will be the repository root folder.
Supports both git repo url link, and local repository path (automatically converted into the remote
git/commit as is currently checkout).
Example remote url: 'https://github.com/user/repo.git'
Example local repo copy: './repo' -> will automatically store the remote
repo url and commit ID based on the locally cloned copy
Use empty string ("") to disable any repository auto-detection
:param repo_branch: Optional, specify the remote repository branch (Ignored, if local repo path is used)
:param repo_commit: Optional, specify the repository commit ID (Ignored, if local repo path is used)
:param always_create_from_code: If True (default) the pipeline is always constructed from code,
if False, pipeline is generated from pipeline configuration section on the pipeline Task itsef.
this allows to edit (also add/remove) pipeline steps without changing the original codebase
:param artifact_serialization_function: A serialization function that takes one
parameter of any type which is the object to be serialized. The function should return
a `bytes` or `bytearray` object, which represents the serialized object. All parameter/return
artifacts uploaded by the pipeline will be serialized using this function.
All relevant imports must be done in this function. For example:
.. code-block:: py
def serialize(obj):
import dill
return dill.dumps(obj)
:param artifact_deserialization_function: A deserialization function that takes one parameter of type `bytes`,
which represents the serialized object. This function should return the deserialized object.
All parameter/return artifacts fetched by the pipeline will be deserialized using this function.
All relevant imports must be done in this function. For example:
.. code-block:: py
def deserialize(bytes_):
import dill
return dill.loads(bytes_)
:param output_uri: The storage / output url for this pipeline. This is the default location for output
models and other artifacts. Check Task.init reference docs for more info (output_uri is a parameter).
The `output_uri` of this pipeline's steps will default to this value.
:param skip_global_imports: If True, global imports will not be included in the steps' execution when creating
the steps from a functions, otherwise all global imports will be automatically imported in a safe manner at
the beginning of each step’s execution. Default is False
:param working_dir: Working directory to launch the pipeline from.
:param enable_local_imports: If True, allow pipeline steps to import from local files
by appending to the PYTHONPATH of each step the directory the pipeline controller
script resides in (sys.path[0]).
If False, the directory won't be appended to PYTHONPATH. Default is True.
Ignored while running remotely.
"""
if auto_version_bump is not None:
warnings.warn(
"PipelineController.auto_version_bump is deprecated. It will be ignored",
DeprecationWarning,
)
self._nodes = {}
self._running_nodes = []
self._start_time = None
self._pipeline_time_limit = None
self._default_execution_queue = None
self._always_create_from_code = bool(always_create_from_code)
self._version = str(version).strip() if version else None
if self._version and not Version.is_valid_version_string(self._version):
raise ValueError("Setting non-semantic pipeline version '{}'".format(self._version))
self._pool_frequency = pool_frequency * 60.0
self._thread = None
self._pipeline_args = dict()
self._pipeline_args_desc = dict()
self._pipeline_args_type = dict()
self._args_map = dict()
self._stop_event = None
self._experiment_created_cb = None
self._experiment_completed_cb = None
self._pre_step_callbacks = {}
self._post_step_callbacks = {}
self._target_project = target_project
self._add_pipeline_tags = add_pipeline_tags
self._task = Task.current_task()
self._step_ref_pattern = re.compile(self._step_pattern)
self._reporting_lock = RLock()
self._pipeline_task_status_failed = None
self._mock_execution = False # used for nested pipelines (eager execution)
self._last_progress_update_time = 0
self._artifact_serialization_function = artifact_serialization_function
self._artifact_deserialization_function = artifact_deserialization_function
self._skip_global_imports = skip_global_imports
self._enable_local_imports = enable_local_imports
if not self._task:
pipeline_project_args = self._create_pipeline_project_args(name, project)
# if user disabled the auto-repo, we force local script storage (repo="" or repo=False)
set_force_local_repo = False
if Task.running_locally() and repo is not None and not repo:
Task.force_store_standalone_script(force=True)
set_force_local_repo = True
self._task = Task.init(
project_name=pipeline_project_args["project_name"],
task_name=pipeline_project_args["task_name"],
task_type=Task.TaskTypes.controller,
auto_resource_monitoring=False,
reuse_last_task_id=False,
)
# if user disabled the auto-repo, set it back to False (just in case)
if set_force_local_repo:
# noinspection PyProtectedMember
self._task._wait_for_repo_detection(timeout=300.0)
Task.force_store_standalone_script(force=False)
self._create_pipeline_projects(
task=self._task,
parent_project=pipeline_project_args["parent_project"],
project_name=pipeline_project_args["project_name"],
)
self._task.set_system_tags((self._task.get_system_tags() or []) + [self._tag])
if output_uri is not None:
self._task.output_uri = output_uri
self._output_uri = output_uri
self._task.set_base_docker(
docker_image=docker,
docker_arguments=docker_args,
docker_setup_bash_script=docker_bash_setup_script,
)
self._task.set_packages(packages)
self._task.set_script(
repository=repo,
branch=repo_branch,
commit=repo_commit,
working_dir=working_dir,
)
self._auto_connect_task = bool(self._task)
# make sure we add to the main Task the pipeline tag
if self._task and not self._pipeline_as_sub_project():
self._task.add_tags([self._tag])
self._monitored_nodes: Dict[str, dict] = {}
self._abort_running_steps_on_failure = abort_on_failure
self._def_max_retry_on_failure = retry_on_failure if isinstance(retry_on_failure, int) else 0
self._retry_on_failure_callback = (
retry_on_failure if callable(retry_on_failure) else self._default_retry_on_failure_callback
)
# add direct link to the pipeline page
if self._pipeline_as_sub_project() and self._task:
if add_run_number and self._task.running_locally():
self._add_pipeline_name_run_number(self._task)
# noinspection PyProtectedMember
self._task.get_logger().report_text(
"ClearML pipeline page: {}".format(
"{}/pipelines/{}/experiments/{}".format(
self._task._get_app_server(),
self._task.project if self._task.project is not None else "*",
self._task.id,
)
)
)
@classmethod
def _pipeline_as_sub_project(cls) -> bool:
if cls._pipeline_as_sub_project_cached is None:
cls._pipeline_as_sub_project_cached = bool(Session.check_min_api_server_version("2.17"))
return cls._pipeline_as_sub_project_cached
def set_default_execution_queue(self, default_execution_queue: Optional[str]) -> None:
"""
Set the default execution queue if pipeline step does not specify an execution queue
:param default_execution_queue: The execution queue to use if no execution queue is provided
"""
self._default_execution_queue = str(default_execution_queue) if default_execution_queue else None
def set_pipeline_execution_time_limit(self, max_execution_minutes: Optional[float]) -> None:
"""
Set maximum execution time (minutes) for the entire pipeline. Pass None or 0 to disable execution time limit.
:param float max_execution_minutes: The maximum time (minutes) for the entire pipeline process. The
default is ``None``, indicating no time limit.
"""
self._pipeline_time_limit = max_execution_minutes * 60.0 if max_execution_minutes else None
def add_step(
self,
name: str,
base_task_id: Optional[str] = None,
parents: Optional[Sequence[str]] = None,
parameter_override: Optional[Mapping[str, Any]] = None,
configuration_overrides: Optional[Mapping[str, Union[str, Mapping]]] = None,
task_overrides: Optional[Mapping[str, Any]] = None,
execution_queue: Optional[str] = None,
monitor_metrics: Optional[List[Union[Tuple[str, str], Tuple]]] = None,
monitor_artifacts: Optional[List[Union[str, Tuple[str, str]]]] = None,
monitor_models: Optional[List[Union[str, Tuple[str, str]]]] = None,
time_limit: Optional[float] = None,
base_task_project: Optional[str] = None,
base_task_name: Optional[str] = None,
clone_base_task: bool = True,
continue_on_fail: bool = False,
pre_execute_callback: Optional[
Callable[["PipelineController", "PipelineController.Node", dict], bool]
] = None, # noqa
post_execute_callback: Optional[Callable[["PipelineController", "PipelineController.Node"], None]] = None,
# noqa
cache_executed_step: bool = False,
base_task_factory: Optional[Callable[["PipelineController.Node"], Task]] = None,
retry_on_failure: Optional[
Union[
int,
Callable[["PipelineController", "PipelineController.Node", int], bool],
]
] = None, # noqa
status_change_callback: Optional[
Callable[["PipelineController", "PipelineController.Node", str], None]
] = None, # noqa
recursively_parse_parameters: bool = False,
output_uri: Optional[Union[str, bool]] = None,
continue_behaviour: Optional[dict] = None,
stage: Optional[str] = None
) -> bool:
"""
Add a step to the pipeline execution DAG.
Each step must have a unique name (this name will later be used to address the step)
:param name: Unique of the step. For example `stage1`
:param base_task_id: The Task ID to use for the step. Each time the step is executed,
the base Task is cloned, then the cloned task will be sent for execution.
:param parents: Optional list of parent nodes in the DAG.
The current step in the pipeline will be sent for execution only after all the parent nodes
have been executed successfully.
:param parameter_override: Optional parameter overriding dictionary.
The dict values can reference a previously executed step using the following form ``'${step_name}'``. Examples:
- Artifact access ``parameter_override={'Args/input_file': '${<step_name>.artifacts.<artifact_name>.url}' }``
- Model access (last model used) ``parameter_override={'Args/input_file': '${<step_name>.models.output.-1.url}' }``
- Parameter access ``parameter_override={'Args/input_file': '${<step_name>.parameters.Args/input_file}' }``
- Pipeline Task argument (see `Pipeline.add_parameter`) ``parameter_override={'Args/input_file': '${pipeline.<pipeline_parameter>}' }``
- Task ID ``parameter_override={'Args/input_file': '${stage3.id}' }``
:param recursively_parse_parameters: If True, recursively parse parameters from parameter_override in lists, dicts, or tuples.
Example:
- ``parameter_override={'Args/input_file': ['${<step_name>.artifacts.<artifact_name>.url}', 'file2.txt']}`` will be correctly parsed.
- ``parameter_override={'Args/input_file': ('${<step_name_1>.parameters.Args/input_file}', '${<step_name_2>.parameters.Args/input_file}')}`` will be correctly parsed.
:param configuration_overrides: Optional, override Task configuration objects.
Expected dictionary of configuration object name and configuration object content.
Examples:
- ``{'General': dict(key='value')}``
- ``{'General': 'configuration file content'}``
- ``{'OmegaConf': YAML.dumps(full_hydra_dict)}``
:param task_overrides: Optional task section overriding dictionary.
The dict values can reference a previously executed step using the following form ``'${step_name}'``. Examples:
- Get the latest commit from a specific branch ``task_overrides={'script.version_num': '', 'script.branch': 'main'}``
- Match git repository branch to a previous step ``task_overrides={'script.branch': '${stage1.script.branch}', 'script.version_num': ''}``
- Change container image ``task_overrides={'container.image': 'nvidia/cuda:11.6.0-devel-ubuntu20.04', 'container.arguments': '--ipc=host'}``
- Match container image to a previous step ``task_overrides={'container.image': '${stage1.container.image}'}``
- Reset requirements (the agent will use the "requirements.txt" inside the repo) ``task_overrides={'script.requirements.pip': ""}``
:param execution_queue: Optional, the queue to use for executing this specific step.
If not provided, the task will be sent to the default execution queue, as defined on the class
:param monitor_metrics: Optional, log the step's metrics on the pipeline Task.
Format is a list of pairs metric (title, series) to log: ``[(step_metric_title, step_metric_series), ]``.
For example: ``[('test', 'accuracy'), ]``.
Or a list of tuple pairs, to specify a different target metric for to use on the pipeline Task:
``[((step_metric_title, step_metric_series), (target_metric_title, target_metric_series)), ]``.
For example: ``[[('test', 'accuracy'), ('model', 'accuracy')], ]``
:param monitor_artifacts: Optional, log the step's artifacts on the pipeline Task.
Provided a list of artifact names existing on the step's Task, they will also appear on the Pipeline itself.
Example: ``[('processed_data', 'final_processed_data'), ]``.
Alternatively user can also provide a list of artifacts to monitor
(target artifact name will be the same as original artifact name).
Example: ``['processed_data', ]``
:param monitor_models: Optional, log the step's output models on the pipeline Task.
Provided a list of model names existing on the step's Task, they will also appear on the Pipeline itself.
Example: ``[('model_weights', 'final_model_weights'), ]``.
Alternatively user can also provide a list of models to monitor
(target models name will be the same as original model).
Example: ``['model_weights', ]``.
To select the latest (lexicographic) model use "model_*", or the last created model with just "*".
Example: ``['model_weights_*', ]``
:param time_limit: Default None, no time limit.
Step execution time limit, if exceeded the Task is aborted and the pipeline is stopped and marked failed.
:param base_task_project: If base_task_id is not given,
use the base_task_project and base_task_name combination to retrieve the base_task_id to use for the step.
:param base_task_name: If base_task_id is not given,
use the base_task_project and base_task_name combination to retrieve the base_task_id to use for the step.
:param clone_base_task: If True (default), the pipeline will clone the base task, and modify/enqueue
the cloned Task. If False, the base-task is used directly, notice it has to be in draft-mode (created).
:param continue_on_fail: (Deprecated, use `continue_behaviour` instead).
If True, failed step will not cause the pipeline to stop
(or marked as failed). Notice, that steps that are connected (or indirectly connected)
to the failed step will be skipped. Defaults to False
:param pre_execute_callback: Callback function, called when the step (Task) is created
and before it is sent for execution. Allows a user to modify the Task before launch.
Use `node.job` to access the ClearmlJob object, or `node.job.task` to directly access the Task object.
``parameters`` are the configuration arguments passed to the ClearmlJob.
If the callback returned value is `False`,
the Node is skipped and so is any node in the DAG that relies on this node.
Notice the `parameters` are already parsed,
e.g. ``${step1.parameters.Args/param}`` is replaced with relevant value.
.. code-block:: py
def step_created_callback(
pipeline, # type: PipelineController,
node, # type: PipelineController.Node,
parameters, # type: dict
):
pass
:param post_execute_callback: Callback function, called when a step (Task) is completed
and other jobs are executed. Allows a user to modify the Task status after completion.
.. code-block:: py
def step_completed_callback(
pipeline, # type: PipelineController,
node, # type: PipelineController.Node,
):
pass
:param cache_executed_step: If True, before launching the new step,
after updating with the latest configuration, check if an exact Task with the same parameter/code
was already executed. If it was found, use it instead of launching a new Task.
Default: False, a new cloned copy of base_task is always used.
Notice: If the git repo reference does not have a specific commit ID, the Task will never be used.
If `clone_base_task` is False there is no cloning, hence the base_task is used.
:param base_task_factory: Optional, instead of providing a pre-existing Task,
provide a Callable function to create the Task (returns Task object)
:param retry_on_failure: Integer (number of retries) or Callback function that returns True to allow a retry
- Integer: In case of node failure, retry the node the number of times indicated by this parameter.
- Callable: A function called on node failure. Takes as parameters:
the PipelineController instance, the PipelineController.Node that failed and an int
representing the number of previous retries for the node that failed.
The function must return ``True`` if the node should be retried and ``False`` otherwise.
If True, the node will be re-queued and the number of retries left will be decremented by 1.
By default, if this callback is not specified, the function will be retried the number of
times indicated by `retry_on_failure`.
.. code-block:: py
def example_retry_on_failure_callback(pipeline, node, retries):
print(node.name, ' failed')
# allow up to 5 retries (total of 6 runs)
return retries < 5
:param status_change_callback: Callback function, called when the status of a step (Task) changes.
Use `node.job` to access the ClearmlJob object, or `node.job.task` to directly access the Task object.
The signature of the function must look the following way:
.. code-block:: py
def status_change_callback(
pipeline, # type: PipelineController,
node, # type: PipelineController.Node,
previous_status # type: str
):
pass
:param output_uri: The storage / output url for this step. This is the default location for output
models and other artifacts. Check Task.init reference docs for more info (output_uri is a parameter).
:param continue_behaviour: Controls whether the pipeline will continue running after a step failed/was aborted.
Different behaviours can be set using a dictionary of boolean options. Supported options are:
- continue_on_fail - If True, the pipeline will continue even if the step failed.
If False, the pipeline will stop
- continue_on_abort - If True, the pipeline will continue even if the step was aborted.
If False, the pipeline will stop
- skip_children_on_fail - If True, the children of this step will be skipped if it failed.
If False, the children will run even if this step failed.
Any parameters passed from the failed step to its children will default to None
- skip_children_on_abort - If True, the children of this step will be skipped if it was aborted.
If False, the children will run even if this step was aborted.
Any parameters passed from the failed step to its children will default to None
- If the keys are not present in the dictionary, their values will default to True
:param stage: Name of the stage. This parameter enables pipeline step grouping into stages
:return: True if successful
"""
if continue_on_fail:
warnings.warn(
"`continue_on_fail` is deprecated. Use `continue_behaviour` instead",
DeprecationWarning,
)
# always store callback functions (even when running remotely)
if pre_execute_callback:
self._pre_step_callbacks[name] = pre_execute_callback
if post_execute_callback:
self._post_step_callbacks[name] = post_execute_callback
self._verify_node_name(name)
if not base_task_factory and not base_task_id:
if not base_task_project or not base_task_name:
raise ValueError("Either base_task_id or base_task_project/base_task_name must be provided")
base_task = Task.get_task(
project_name=base_task_project,
task_name=base_task_name,
allow_archived=True,
task_filter=dict(
status=[
str(Task.TaskStatusEnum.created),
str(Task.TaskStatusEnum.queued),
str(Task.TaskStatusEnum.in_progress),
str(Task.TaskStatusEnum.published),
str(Task.TaskStatusEnum.stopped),
str(Task.TaskStatusEnum.completed),
str(Task.TaskStatusEnum.closed),
],
),
)
if not base_task:
raise ValueError(
"Could not find base_task_project={} base_task_name={}".format(base_task_project, base_task_name)
)
if Task.archived_tag in base_task.get_system_tags():
LoggerRoot.get_base_logger().warning(
"Found base_task_project={} base_task_name={} but it is archived".format(
base_task_project, base_task_name
)
)
base_task_id = base_task.id
if configuration_overrides is not None:
# verify we have a dict or a string on all values
if not isinstance(configuration_overrides, dict) or not all(
isinstance(v, (str, dict)) for v in configuration_overrides.values()
):
raise ValueError(
"configuration_overrides must be a dictionary, with all values "
"either dicts or strings, got '{}' instead".format(configuration_overrides)
)
if task_overrides:
task_overrides = flatten_dictionary(task_overrides, sep=".")
self._nodes[name] = self.Node(
name=name,
base_task_id=base_task_id,
parents=parents or [],
queue=execution_queue,
timeout=time_limit,
parameters=parameter_override or {},
recursively_parse_parameters=recursively_parse_parameters,
configurations=configuration_overrides,
clone_task=clone_base_task,
task_overrides=task_overrides,
cache_executed_step=cache_executed_step,
continue_on_fail=continue_on_fail,
task_factory_func=base_task_factory,
monitor_metrics=monitor_metrics or [],
monitor_artifacts=monitor_artifacts or [],
monitor_models=monitor_models or [],
output_uri=self._output_uri if output_uri is None else output_uri,
continue_behaviour=continue_behaviour,
stage=stage
)
self._retries[name] = 0
self._retries_callbacks[name] = (
retry_on_failure
if callable(retry_on_failure)
else (
functools.partial(
self._default_retry_on_failure_callback,
max_retries=retry_on_failure,
)
if isinstance(retry_on_failure, int)
else self._retry_on_failure_callback
)
)
if status_change_callback:
self._status_change_callbacks[name] = status_change_callback
if self._task and not self._task.running_locally():
self.update_execution_plot()
return True
def add_function_step(
self,
name: str,
function: Callable,
function_kwargs: Optional[Dict[str, Any]] = None,
function_return: Optional[List[str]] = None,
project_name: Optional[str] = None,
task_name: Optional[str] = None,
task_type: Optional[str] = None,
auto_connect_frameworks: Optional[dict] = None,
auto_connect_arg_parser: Optional[dict] = None,
packages: Optional[Union[bool, str, Sequence[str]]] = None,
repo: Optional[str] = None,
repo_branch: Optional[str] = None,
repo_commit: Optional[str] = None,
helper_functions: Optional[Sequence[Callable]] = None,
docker: Optional[str] = None,
docker_args: Optional[str] = None,
docker_bash_setup_script: Optional[str] = None,
parents: Optional[Sequence[str]] = None,
execution_queue: Optional[str] = None,
monitor_metrics: Optional[List[Tuple]] = None,
monitor_artifacts: Optional[List[Union[str, Tuple]]] = None,
monitor_models: Optional[List[Union[str, Tuple]]] = None,
time_limit: Optional[float] = None,
continue_on_fail: bool = False,
pre_execute_callback: Optional[
Callable[["PipelineController", "PipelineController.Node", dict], bool]
] = None, # noqa
post_execute_callback: Optional[Callable[["PipelineController", "PipelineController.Node"], None]] = None,
# noqa
cache_executed_step: bool = False,
retry_on_failure: Optional[
Union[
int,
Callable[["PipelineController", "PipelineController.Node", int], bool],
]
] = None, # noqa
status_change_callback: Optional[
Callable[["PipelineController", "PipelineController.Node", str], None]
] = None, # noqa
tags: Optional[Union[str, Sequence[str]]] = None,
output_uri: Optional[Union[str, bool]] = None,
draft: Optional[bool] = False,
working_dir: Optional[str] = None,
continue_behaviour: Optional[dict] = None,
stage: Optional[str] = None
) -> bool:
"""
Create a Task from a function, including wrapping the function input arguments
into the hyper-parameter section as kwargs, and storing function results as named artifacts
Example:
.. code-block:: py
def mock_func(a=6, b=9):
c = a*b
print(a, b, c)
return c, c**2
create_task_from_function(mock_func, function_return=['mul', 'square'])
Example arguments from other Tasks (artifact):
.. code-block:: py
def mock_func(matrix_np):
c = matrix_np*matrix_np
print(matrix_np, c)
return c
create_task_from_function(
mock_func,
function_kwargs={'matrix_np': 'aabb1122.previous_matrix'},
function_return=['square_matrix']
)
:param name: Unique of the step. For example `stage1`
:param function: A global function to convert into a standalone Task
:param function_kwargs: Optional, provide subset of function arguments and default values to expose.
If not provided automatically take all function arguments & defaults
Optional, pass input arguments to the function from other Tasks' output artifact.
Example argument named `numpy_matrix` from Task ID `aabbcc` artifact name `answer`:
``{'numpy_matrix': 'aabbcc.answer'}``
:param function_return: Provide a list of names for all the results.
If not provided, no results will be stored as artifacts.
:param project_name: Set the project name for the task. Required if base_task_id is None.
:param task_name: Set the name of the remote task, if not provided use `name` argument.
:param task_type: Optional, The task type to be created. Supported values: 'training', 'testing', 'inference',
'data_processing', 'application', 'monitor', 'controller', 'optimizer', 'service', 'qc', 'custom'
:param auto_connect_frameworks: Control the frameworks auto connect, see `Task.init` auto_connect_frameworks
:param auto_connect_arg_parser: Control the ArgParser auto connect, see `Task.init` auto_connect_arg_parser
:param packages: Manually specify a list of required packages or a local requirements.txt file.
Example: ["tqdm>=2.1", "scikit-learn"] or "./requirements.txt"
If not provided, packages are automatically added based on the imports used in the function.
Use `False` to install requirements from "requirements.txt" inside your git repository
:param repo: Optional, specify a repository to attach to the function, when remotely executing.
Allow users to execute the function inside the specified repository, enabling to load modules/script
from a repository Notice the execution work directory will be the repository root folder.
Supports both git repo url link, and local repository path.
Example remote url: 'https://github.com/user/repo.git'
Example local repo copy: './repo' -> will automatically store the remote
repo url and commit ID based on the locally cloned copy
:param repo_branch: Optional, specify the remote repository branch (Ignored, if local repo path is used)
:param repo_commit: Optional, specify the repository commit ID (Ignored, if local repo path is used)
:param helper_functions: Optional, a list of helper functions to make available
for the standalone function Task.
:param docker: Select the docker image to be executed in by the remote session
:param docker_args: Add docker arguments, pass a single string
:param docker_bash_setup_script: Add bash script to be executed
inside the docker before setting up the Task's environment
:param parents: Optional list of parent nodes in the DAG.
The current step in the pipeline will be sent for execution only after all the parent nodes
have been executed successfully.
:param execution_queue: Optional, the queue to use for executing this specific step.
If not provided, the task will be sent to the default execution queue, as defined on the class
:param monitor_metrics: Optional, log the step's metrics on the pipeline Task.
Format is a list of pairs metric (title, series) to log: ``[(step_metric_title, step_metric_series), ]``.
For example: ``[('test', 'accuracy'), ]``.
Or a list of tuple pairs, to specify a different target metric for to use on the pipeline Task:
``[((step_metric_title, step_metric_series), (target_metric_title, target_metric_series)), ]``.
For example: ``[[('test', 'accuracy'), ('model', 'accuracy')], ]``
:param monitor_artifacts: Optional, log the step's artifacts on the pipeline Task.
Provided a list of artifact names existing on the step's Task, they will also appear on the Pipeline itself.
Example: ``[('processed_data', 'final_processed_data'), ]``.
Alternatively user can also provide a list of artifacts to monitor
(target artifact name will be the same as original artifact name).
Example: ``['processed_data', ]``
:param monitor_models: Optional, log the step's output models on the pipeline Task.
Provided a list of model names existing on the step's Task, they will also appear on the Pipeline itself.
Example: ``[('model_weights', 'final_model_weights'), ]``.
Alternatively user can also provide a list of models to monitor
(target models name will be the same as original model).
Example: ``['model_weights', ]``.
To select the latest (lexicographic) model use "model_*", or the last created model with just "*".
Example: ``['model_weights_*', ]``
:param time_limit: Default None, no time limit.
Step execution time limit, if exceeded the Task is aborted and the pipeline is stopped and marked failed.
:param continue_on_fail: (Deprecated, use `continue_behaviour` instead).
If True, failed step will not cause the pipeline to stop
(or marked as failed). Notice, that steps that are connected (or indirectly connected)
to the failed step will be skipped. Defaults to False
:param pre_execute_callback: Callback function, called when the step (Task) is created
and before it is sent for execution. Allows a user to modify the Task before launch.
Use `node.job` to access the ClearmlJob object, or `node.job.task` to directly access the Task object.
``parameters`` are the configuration arguments passed to the ClearmlJob.
If the callback returned value is `False`,
the Node is skipped and so is any node in the DAG that relies on this node.
Notice the `parameters` are already parsed,
e.g. ``${step1.parameters.Args/param}`` is replaced with relevant value.
.. code-block:: py
def step_created_callback(
pipeline, # type: PipelineController,
node, # type: PipelineController.Node,
parameters, # type: dict
):
pass
:param post_execute_callback: Callback function, called when a step (Task) is completed
and other jobs are executed. Allows a user to modify the Task status after completion.
.. code-block:: py
def step_completed_callback(
pipeline, # type: PipelineController,
node, # type: PipelineController.Node,
):
pass
:param cache_executed_step: If True, before launching the new step,
after updating with the latest configuration, check if an exact Task with the same parameter/code
was already executed. If it was found, use it instead of launching a new Task.
Default: False, a new cloned copy of base_task is always used.
Notice: If the git repo reference does not have a specific commit ID, the Task will never be used.
:param retry_on_failure: Integer (number of retries) or Callback function that returns True to allow a retry
- Integer: In case of node failure, retry the node the number of times indicated by this parameter.
- Callable: A function called on node failure. Takes as parameters:
the PipelineController instance, the PipelineController.Node that failed and an int
representing the number of previous retries for the node that failed.
The function must return ``True`` if the node should be retried and ``False`` otherwise.
If True, the node will be re-queued and the number of retries left will be decremented by 1.
By default, if this callback is not specified, the function will be retried the number of
times indicated by `retry_on_failure`.
.. code-block:: py
def example_retry_on_failure_callback(pipeline, node, retries):
print(node.name, ' failed')
# allow up to 5 retries (total of 6 runs)
return retries < 5
:param status_change_callback: Callback function, called when the status of a step (Task) changes.
Use `node.job` to access the ClearmlJob object, or `node.job.task` to directly access the Task object.
The signature of the function must look the following way:
.. code-block:: py
def status_change_callback(
pipeline, # type: PipelineController,
node, # type: PipelineController.Node,
previous_status # type: str
):
pass
:param tags: A list of tags for the specific pipeline step.
When executing a Pipeline remotely
(i.e. launching the pipeline from the UI/enqueuing it), this method has no effect.
:param output_uri: The storage / output url for this step. This is the default location for output
models and other artifacts. Check Task.init reference docs for more info (output_uri is a parameter).
:param draft: (default False). If True, the Task will be created as a draft task.
:param working_dir: Working directory to launch the script from.
:param continue_behaviour: Controls whether the pipeline will continue running after a step failed/was aborted.
Different behaviours can be set using a dictionary of boolean options. Supported options are:
- continue_on_fail - If True, the pipeline will continue even if the step failed.
If False, the pipeline will stop
- continue_on_abort - If True, the pipeline will continue even if the step was aborted.
If False, the pipeline will stop
- skip_children_on_fail - If True, the children of this step will be skipped if it failed.
If False, the children will run even if this step failed. Any parameters passed from the failed step to its
children will default to None
- skip_children_on_abort - If True, the children of this step will be skipped if it was aborted.
If False, the children will run even if this step was aborted.
Any parameters passed from the failed step to its children will default to None
- If the keys are not present in the dictionary, their values will default to True
:param stage: Name of the stage. This parameter enables pipeline step grouping into stages
:return: True if successful
"""
if continue_on_fail:
warnings.warn(
"`continue_on_fail` is deprecated. Use `continue_behaviour` instead",
DeprecationWarning,
)
function_kwargs = function_kwargs or {}
default_kwargs = inspect.getfullargspec(function)
if default_kwargs and default_kwargs.args and default_kwargs.defaults:
for key, val in zip(
default_kwargs.args[-len(default_kwargs.defaults) :],
default_kwargs.defaults,
):
function_kwargs.setdefault(key, val)
return self._add_function_step(
name=name,
function=function,
function_kwargs=function_kwargs,
function_return=function_return,
project_name=project_name,
task_name=task_name,
task_type=task_type,
auto_connect_frameworks=auto_connect_frameworks,
auto_connect_arg_parser=auto_connect_arg_parser,
packages=packages,
repo=repo,
repo_branch=repo_branch,
repo_commit=repo_commit,
helper_functions=helper_functions,
docker=docker,
docker_args=docker_args,
docker_bash_setup_script=docker_bash_setup_script,
parents=parents,
execution_queue=execution_queue,
monitor_metrics=monitor_metrics,
monitor_artifacts=monitor_artifacts,
monitor_models=monitor_models,
time_limit=time_limit,
continue_on_fail=continue_on_fail,
pre_execute_callback=pre_execute_callback,
post_execute_callback=post_execute_callback,
cache_executed_step=cache_executed_step,
retry_on_failure=retry_on_failure,
status_change_callback=status_change_callback,
tags=tags,
output_uri=output_uri,
draft=draft,
working_dir=working_dir,
continue_behaviour=continue_behaviour,
stage=stage
)
def start(
self,
queue: str = "services",
step_task_created_callback: Optional[
Callable[["PipelineController", "PipelineController.Node", dict], bool]
] = None, # noqa
step_task_completed_callback: Optional[
Callable[["PipelineController", "PipelineController.Node"], None]
] = None, # noqa
wait: bool = True,
) -> bool:
"""
Start the current pipeline remotely (on the selected services queue).
The current process will be stopped and launched remotely.
:param queue: queue name to launch the pipeline on
:param Callable step_task_created_callback: Callback function, called when a step (Task) is created
and before it is sent for execution. Allows a user to modify the Task before launch.
Use `node.job` to access the ClearmlJob object, or `node.job.task` to directly access the Task object.
`parameters` are the configuration arguments passed to the ClearmlJob.
If the callback returned value is `False`,
the Node is skipped and so is any node in the DAG that relies on this node.
Notice the `parameters` are already parsed,
e.g. ``${step1.parameters.Args/param}`` is replaced with relevant value.
.. code-block:: py
def step_created_callback(
pipeline, # type: PipelineController,
node, # type: PipelineController.Node,
parameters, # type: dict
):
pass
:param Callable step_task_completed_callback: Callback function, called when a step (Task) is completed
and other jobs are executed. Allows a user to modify the Task status after completion.
.. code-block:: py
def step_completed_callback(
pipeline, # type: PipelineController,
node, # type: PipelineController.Node,
):
pass
:param wait: If True (default), start the pipeline controller, return only
after the pipeline is done (completed/aborted/failed)
:return: True, if the controller started. False, if the controller did not start.
"""
if not self._task:
raise ValueError(
"Could not find main Task, PipelineController must be created with `always_create_task=True`"
)
# serialize state only if we are running locally
if Task.running_locally() or not self._task.is_main_task():
self._verify()
self._serialize_pipeline_task()
self.update_execution_plot()
# stop current Task and execute remotely or no-op
self._task.execute_remotely(queue_name=queue, exit_process=True, clone=False)
if not Task.running_locally() and self._task.is_main_task():
self._start(
step_task_created_callback=step_task_created_callback,
step_task_completed_callback=step_task_completed_callback,
wait=wait,
)
return True
def start_locally(self, run_pipeline_steps_locally: bool = False) -> None:
"""
Start the current pipeline locally, meaning the pipeline logic is running on the current machine,
instead of on the `services` queue.
Using run_pipeline_steps_locally=True you can run all the pipeline steps locally as sub-processes.
Notice: when running pipeline steps locally, it assumes local code execution
(i.e. it is running the local code as is, regardless of the git commit/diff on the pipeline steps Task)
:param run_pipeline_steps_locally: (default False) If True, run the pipeline steps themselves locally as a
subprocess (use for debugging the pipeline locally, notice the pipeline code is expected to be available
on the local machine)
"""
if not self._task:
raise ValueError(
"Could not find main Task, PipelineController must be created with `always_create_task=True`"
)
if run_pipeline_steps_locally:
self._clearml_job_class = LocalClearmlJob
self._default_execution_queue = self._default_execution_queue or "mock"
# serialize state only if we are running locally
if Task.running_locally() or not self._task.is_main_task():
self._verify()
self._serialize_pipeline_task()
self._start(wait=True)
def create_draft(self) -> None:
"""
Optional, manually create & serialize the Pipeline Task (use with care for manual multi pipeline creation).
**Notice** The recommended flow would be to call `pipeline.start(queue=None)`
which would have a similar effect and will allow you to clone/enqueue later on.
After calling Pipeline.create(), users can edit the pipeline in the UI and enqueue it for execution.
Notice: this function should be used to programmatically create pipeline for later usage.
To automatically create and launch pipelines, call the `start()` method.
"""
self._verify()
self._serialize_pipeline_task()
self._task.close()
self._task.reset()
def connect_configuration(
self,
configuration: Union[Mapping, list, Path, str],
name: Optional[str] = None,
description: Optional[str] = None,
) -> Union[dict, Path, str]:
"""
Connect a configuration dictionary or configuration file (pathlib.Path / str) to the PipelineController object.
This method should be called before reading the configuration file.
For example, a local file:
.. code-block:: py
config_file = pipe.connect_configuration(config_file)
my_params = json.load(open(config_file,'rt'))
A parameter dictionary/list:
.. code-block:: py
my_params = pipe.connect_configuration(my_params)
:param configuration: The configuration. This is usually the configuration used in the model training process.
Specify one of the following:
- A dictionary/list - A dictionary containing the configuration. ClearML stores the configuration in
the **ClearML Server** (backend), in a HOCON format (JSON-like format) which is editable.
- A ``pathlib2.Path`` string - A path to the configuration file. ClearML stores the content of the file.
A local path must be relative path. When executing a pipeline remotely in a worker, the contents brought
from the **ClearML Server** (backend) overwrites the contents of the file.
:param str name: Configuration section name. default: 'General'
Allowing users to store multiple configuration dicts/files
:param str description: Configuration section description (text). default: None
:return: If a dictionary is specified, then a dictionary is returned. If pathlib2.Path / string is
specified, then a path to a local configuration file is returned. Configuration object.
"""
return self._task.connect_configuration(configuration, name=name, description=description)
@classmethod
def get_logger(cls) -> Logger:
"""
Return a logger connected to the Pipeline Task.
The logger can be used by any function/tasks executed by the pipeline, in order to report
directly to the pipeline Task itself. It can also be called from the main pipeline control Task.
Raise ValueError if main Pipeline task could not be located.
:return: Logger object for reporting metrics (scalars, plots, debug samples etc.)
"""
return cls._get_pipeline_task().get_logger()
@classmethod
def upload_model(
cls,
model_name: str,
model_local_path: str,
upload_uri: Optional[str] = None,
) -> OutputModel:
"""
Upload (add) a model to the main Pipeline Task object.
This function can be called from any pipeline component to directly add models into the main pipeline Task
The model file/path will be uploaded to the Pipeline Task and registered on the model repository.
Raise ValueError if main Pipeline task could not be located.
:param model_name: Model name as will appear in the model registry (in the pipeline's project)
:param model_local_path: Path to the local model file or directory to be uploaded.
If a local directory is provided the content of the folder (recursively) will be
packaged into a zip file and uploaded
:param upload_uri: The URI of the storage destination for model weights upload. The default value
is the previously used URI.
:return: The uploaded OutputModel
"""
task = cls._get_pipeline_task()
model_name = str(model_name)
model_local_path = Path(model_local_path)
out_model = OutputModel(task=task, name=model_name)
out_model.update_weights(weights_filename=model_local_path.as_posix(), upload_uri=upload_uri)
return out_model
@classmethod
def upload_artifact(
cls,
name: str,
artifact_object: Any,
metadata: Optional[Mapping] = None,
delete_after_upload: bool = False,
auto_pickle: Optional[bool] = None,
preview: Any = None,
wait_on_upload: bool = False,
serialization_function: Optional[Callable[[Any], Union[bytes, bytearray]]] = None,
sort_keys: bool = True,
) -> bool:
"""
Upload (add) an artifact to the main Pipeline Task object.
This function can be called from any pipeline component to directly add artifacts into the main pipeline Task.
The artifact can be uploaded by any function/tasks executed by the pipeline, in order to report
directly to the pipeline Task itself. It can also be called from the main pipeline control Task.
Raise ValueError if main Pipeline task could not be located.
The currently supported upload artifact types include:
- string / Path - A path to artifact file. If a wildcard or a folder is specified, then ClearML
creates and uploads a ZIP file.
- dict - ClearML stores a dictionary as ``.json`` file and uploads it.
- pandas.DataFrame - ClearML stores a pandas.DataFrame as ``.csv.gz`` (compressed CSV) file and uploads it.
- numpy.ndarray - ClearML stores a numpy.ndarray as ``.npz`` file and uploads it.
- PIL.Image - ClearML stores a PIL.Image as ``.png`` file and uploads it.
- Any - If called with auto_pickle=True, the object will be pickled and uploaded.
:param name: The artifact name.
.. warning::
If an artifact with the same name was previously uploaded, then it is overwritten.
:param artifact_object: The artifact object.
:param metadata: A dictionary of key-value pairs for any metadata. This dictionary appears with the
experiment in the **ClearML Web-App (UI)**, **ARTIFACTS** tab.
:param delete_after_upload: After the upload, delete the local copy of the artifact
- ``True`` - Delete the local copy of the artifact.
- ``False`` - Do not delete. (default)
:param auto_pickle: If True, and the artifact_object is not one of the following types:
pathlib2.Path, dict, pandas.DataFrame, numpy.ndarray, PIL.Image, url (string), local_file (string)
the artifact_object will be pickled and uploaded as pickle file artifact (with file extension .pkl)
If set to None (default) the sdk.development.artifacts.auto_pickle configuration value will be used.
:param preview: The artifact preview
:param wait_on_upload: Whether the upload should be synchronous, forcing the upload to complete
before continuing.
:param serialization_function: A serialization function that takes one
parameter of any type which is the object to be serialized. The function should return
a `bytes` or `bytearray` object, which represents the serialized object. Note that the object will be
immediately serialized using this function, thus other serialization methods will not be used
(e.g. `pandas.DataFrame.to_csv`), even if possible. To deserialize this artifact when getting
it using the `Artifact.get` method, use its `deserialization_function` argument.
:param sort_keys: If True (default), sort the keys of the artifact if it is yaml/json serializable.
Otherwise, don't sort the keys. Ignored if the artifact is not yaml/json serializable.
:return: The status of the upload.
- ``True`` - Upload succeeded.
- ``False`` - Upload failed.
:raise: If the artifact object type is not supported, raise a ``ValueError``.
"""
task = cls._get_pipeline_task()
return task.upload_artifact(
name=name,
artifact_object=artifact_object,
metadata=metadata,
delete_after_upload=delete_after_upload,
auto_pickle=auto_pickle,
preview=preview,
wait_on_upload=wait_on_upload,
serialization_function=serialization_function,
sort_keys=sort_keys,
)
def stop(
self,
timeout: Optional[float] = None,
mark_failed: bool = False,
mark_aborted: bool = False,
) -> None:
"""
Stop the pipeline controller and the optimization thread.
If mark_failed and mark_aborted are False (default) mark the pipeline as completed,
unless one of the steps failed, then mark the pipeline as failed.
:param timeout: Wait timeout for the optimization thread to exit (minutes).
The default is ``None``, indicating do not wait to terminate immediately.
:param mark_failed: If True, mark the pipeline task as failed. (default False)
:param mark_aborted: If True, mark the pipeline task as aborted. (default False)
"""
self._stop_event.set()
self.wait(timeout=timeout)
if not self._task:
return
# sync pipeline state
self.update_execution_plot()
self._task.close()
if mark_failed:
self._task.mark_failed(status_reason="Pipeline aborted and failed", force=True)
elif mark_aborted:
self._task.mark_stopped(status_message="Pipeline aborted", force=True)
elif self._pipeline_task_status_failed:
print("Setting pipeline controller Task as failed (due to failed steps) !")
self._task.mark_failed(status_reason="Pipeline step failed", force=True)
def wait(self, timeout: Optional[float] = None) -> bool:
"""
Wait for the pipeline to finish.
.. note::
This method does not stop the pipeline. Call :meth:`stop` to terminate the pipeline.
:param float timeout: The timeout to wait for the pipeline to complete (minutes).
If ``None``, then wait until we reached the timeout, or pipeline completed.
:return: True, if the pipeline finished. False, if the pipeline timed out.
"""
if not self.is_running():
return True
if timeout is not None:
timeout *= 60.0
_thread = self._thread
_thread.join(timeout=timeout)
if _thread.is_alive():
return False
return True
def is_running(self) -> bool:
"""
return True if the pipeline controller is running.
:return: A boolean indicating whether the pipeline controller is active (still running) or stopped.
"""
return self._thread is not None and self._thread.is_alive()
def is_successful(self, fail_on_step_fail: bool = True, fail_condition: str = "all") -> bool:
"""
Evaluate whether the pipeline is successful.
:param fail_on_step_fail: If True (default), evaluate the pipeline steps' status to assess if the pipeline
is successful. If False, only evaluate the controller
:param fail_condition: Must be one of the following: 'all' (default), 'failed' or 'aborted'. If 'failed', this
function will return False if the pipeline failed and True if the pipeline was aborted. If 'aborted',
this function will return False if the pipeline was aborted and True if the pipeline failed. If 'all',
this function will return False in both cases.
:return: A boolean indicating whether the pipeline was successful or not. Note that if the pipeline is in a
running/pending state, this function will return False
"""
if fail_condition == "all":
success_status = [Task.TaskStatusEnum.completed]
elif fail_condition == "failed":
success_status = [
Task.TaskStatusEnum.completed,
Task.TaskStatusEnum.stopped,
]
elif fail_condition == "aborted":
success_status = [Task.TaskStatusEnum.completed, Task.TaskStatusEnum.failed]
else:
raise UsageError("fail_condition needs to be one of the following: 'all', 'failed', 'aborted'")
if self._task.status not in success_status:
return False
if not fail_on_step_fail:
return True
self._update_nodes_status()
for node in self._nodes.values():
if node.status not in success_status:
return False
return True
def elapsed(self) -> float:
"""
Return minutes elapsed from controller stating time stamp.
:return: The minutes from controller start time. A negative value means the process has not started yet.
"""
if self._start_time is None:
return -1.0
return (time() - self._start_time) / 60.0
def get_pipeline_dag(self) -> Mapping[str, "PipelineController.Node"]:
"""
Return the pipeline execution graph, each node in the DAG is PipelineController.Node object.
Graph itself is a dictionary of Nodes (key based on the Node name),
each node holds links to its parent Nodes (identified by their unique names)
:return: execution tree, as a nested dictionary. Example:
.. code-block:: py
{
'stage1' : Node() {
name: 'stage1'
job: ClearmlJob
...
},
}
"""
return self._nodes
def get_processed_nodes(self) -> Sequence["PipelineController.Node"]:
"""
Return a list of the processed pipeline nodes, each entry in the list is PipelineController.Node object.
:return: executed (excluding currently executing) nodes list
"""
return {k: n for k, n in self._nodes.items() if n.executed}
def get_running_nodes(self) -> Sequence["PipelineController.Node"]:
"""
Return a list of the currently running pipeline nodes,
each entry in the list is PipelineController.Node object.
:return: Currently running nodes list
"""
return {k: n for k, n in self._nodes.items() if k in self._running_nodes}
def update_execution_plot(self) -> None:
"""
Update sankey diagram of the current pipeline
"""
with self._reporting_lock:
self._update_execution_plot()
# also trigger node monitor scanning
self._scan_monitored_nodes()
def add_parameter(
self,
name: str,
default: Optional[Any] = None,
description: Optional[str] = None,
param_type: Optional[str] = None,
) -> None:
"""
Add a parameter to the pipeline Task.
The parameter can be used as input parameter for any step in the pipeline.
Notice all parameters will appear under the PipelineController Task's Hyper-parameters -> Pipeline section
Example: pipeline.add_parameter(name='dataset', description='dataset ID to process the pipeline')
Then in one of the steps we can refer to the value of the parameter with ``'${pipeline.dataset}'``
:param name: String name of the parameter.
:param default: Default value to be put as the default value (can be later changed in the UI)
:param description: String description of the parameter and its usage in the pipeline
:param param_type: Optional, parameter type information (to be used as hint for casting and description)
"""
self._pipeline_args[str(name)] = default
if description:
self._pipeline_args_desc[str(name)] = str(description)
if param_type:
self._pipeline_args_type[str(name)] = param_type
def get_parameters(self) -> dict:
"""
Return the pipeline parameters dictionary
:return: Dictionary str -> str
"""
return self._pipeline_args
@classmethod
def _create_pipeline_project_args(cls, name: str, project: str) -> dict:
task_name = name or project or "{}".format(datetime.now())
if cls._pipeline_as_sub_project():
parent_project = (project + "/" if project else "") + cls._project_section
project_name = "{}/{}".format(parent_project, task_name)
else:
parent_project = None
project_name = project or "Pipelines"
return {
"task_name": task_name,
"parent_project": parent_project,
"project_name": project_name,
}
@classmethod
def _create_pipeline_projects(cls, task: Task, parent_project: str, project_name: str) -> None:
# make sure project is hidden
if not cls._pipeline_as_sub_project():
return
get_or_create_project(
Task._get_default_session(),
project_name=parent_project,
system_tags=["hidden"],
)
return get_or_create_project(
Task._get_default_session(),
project_name=project_name,
project_id=task.project,
system_tags=cls._project_system_tags,
)
@classmethod
def create(
cls,
project_name: str,
task_name: str,
repo: str = None,
branch: Optional[str] = None,
commit: Optional[str] = None,
script: Optional[str] = None,
working_directory: Optional[str] = None,
packages: Optional[Union[bool, Sequence[str]]] = None,
requirements_file: Optional[Union[str, Path]] = None,
docker: Optional[str] = None,
docker_args: Optional[str] = None,
docker_bash_setup_script: Optional[str] = None,
argparse_args: Optional[Sequence[Tuple[str, str]]] = None,
force_single_script_file: bool = False,
version: Optional[str] = None,
add_run_number: bool = True,
binary: Optional[str] = None,
module: Optional[str] = None,
detect_repository: bool = True
) -> "PipelineController":
"""
Manually create and populate a new Pipeline in the system.
Supports pipelines from functions, decorators and tasks.
:param project_name: Set the project name for the pipeline.
:param task_name: Set the name of the remote pipeline..
:param repo: Remote URL for the repository to use, or path to local copy of the git repository.
Example: 'https://github.com/allegroai/clearml.git' or '~/project/repo'. If ``repo`` is specified, then
the ``script`` parameter must also be specified
:param branch: Select specific repository branch/tag (implies the latest commit from the branch)
:param commit: Select specific commit ID to use (default: latest commit,
or when used with local repository matching the local commit ID)
:param script: Specify the entry point script for the remote execution. When used in tandem with
remote git repository the script should be a relative path inside the repository,
for example: './source/train.py' . When used with local repository path it supports a
direct path to a file inside the local repository itself, for example: '~/project/source/train.py'
:param working_directory: Working directory to launch the script from. Default: repository root folder.
Relative to repo root or local folder.
:param packages: Manually specify a list of required packages. Example: ``["tqdm>=2.1", "scikit-learn"]``
or `True` to automatically create requirements
based on locally installed packages (repository must be local).
Pass an empty string to not install any packages (not even from the repository)
:param requirements_file: Specify requirements.txt file to install when setting the session.
If not provided, the requirements.txt from the repository will be used.
:param docker: Select the docker image to be executed in by the remote session
:param docker_args: Add docker arguments, pass a single string
:param docker_bash_setup_script: Add bash script to be executed
inside the docker before setting up the Task's environment
:param argparse_args: Arguments to pass to the remote execution, list of string pairs (argument, value)
Notice, only supported if the codebase itself uses argparse.ArgumentParser
:param force_single_script_file: If True, do not auto-detect local repository
:param binary: Binary used to launch the pipeline
:param module: If specified instead of executing `script`, a module named `module` is executed.
Implies script is empty. Module can contain multiple argument for execution,
for example: module="my.module arg1 arg2"
:param detect_repository: If True, detect the repository if no repository has been specified.
If False, don't detect repository under any circumstance. Ignored if `repo` is specified
:return: The newly created PipelineController
"""
pipeline_project_args = cls._create_pipeline_project_args(name=task_name, project=project_name)
pipeline_controller = Task.create(
project_name=pipeline_project_args["project_name"],
task_name=pipeline_project_args["task_name"],
task_type=Task.TaskTypes.controller,
repo=repo,
branch=branch,
commit=commit,
script=script,
working_directory=working_directory,
packages=packages,
requirements_file=requirements_file,
docker=docker,
docker_args=docker_args,
docker_bash_setup_script=docker_bash_setup_script,
argparse_args=argparse_args,
add_task_init_call=False,
force_single_script_file=force_single_script_file,
binary=binary,
module=module,
detect_repository=detect_repository
)
cls._create_pipeline_projects(
task=pipeline_controller,
parent_project=pipeline_project_args["parent_project"],
project_name=pipeline_project_args["project_name"],
)
pipeline_controller.set_system_tags((pipeline_controller.get_system_tags() or []) + [cls._tag])
pipeline_controller.set_user_properties(version=version or cls._default_pipeline_version)
if add_run_number:
cls._add_pipeline_name_run_number(pipeline_controller)
return cls._create_pipeline_controller_from_task(pipeline_controller)
@classmethod
def clone(
cls,
pipeline_controller: Union["PipelineController", str],
name: Optional[str] = None,
comment: Optional[str] = None,
parent: Optional[str] = None,
project: Optional[str] = None,
version: Optional[str] = None,
) -> "PipelineController":
"""
Create a duplicate (a clone) of a pipeline (experiment). The status of the cloned pipeline is ``Draft``
and modifiable.
:param str pipeline_controller: The pipeline to clone. Specify a PipelineController object or an ID.
:param str name: The name of the new cloned pipeline.
:param str comment: A comment / description for the new cloned pipeline.
:param str parent: The ID of the parent Task of the new pipeline.
- If ``parent`` is not specified, then ``parent`` is set to ``source_task.parent``.
- If ``parent`` is not specified and ``source_task.parent`` is not available,
then ``parent`` set to ``source_task``.
:param str project: The project name in which to create the new pipeline.
If ``None``, the clone inherits the original pipeline's project
:param str version: The version of the new cloned pipeline. If ``None``, the clone
inherits the original pipeline's version
:return: The new cloned PipelineController
"""
if isinstance(pipeline_controller, six.string_types):
pipeline_controller = Task.get_task(task_id=pipeline_controller)
elif isinstance(pipeline_controller, PipelineController):
pipeline_controller = pipeline_controller.task
if project or name:
pipeline_project_args = cls._create_pipeline_project_args(
name=name or pipeline_controller.name,
project=project or pipeline_controller.get_project_name(),
)
project = cls._create_pipeline_projects(
task=pipeline_controller,
parent_project=pipeline_project_args["parent_project"],
project_name=pipeline_project_args["project_name"],
)
name = pipeline_project_args["task_name"]
cloned_controller = Task.clone(
source_task=pipeline_controller,
name=name,
comment=comment,
parent=parent,
project=project,
)
if version:
cloned_controller.set_user_properties(version=version)
return cls._create_pipeline_controller_from_task(cloned_controller)
@classmethod
def enqueue(
cls,
pipeline_controller: Union["PipelineController", str],
queue_name: Optional[str] = None,
queue_id: Optional[str] = None,
force: bool = False,
) -> Any:
"""
Enqueue a PipelineController for execution, by adding it to an execution queue.
.. note::
A worker daemon must be listening at the queue for the worker to fetch the Task and execute it,
see "ClearML Agent" in the ClearML Documentation.
:param pipeline_controller: The PipelineController to enqueue. Specify a PipelineController object or PipelineController ID
:param queue_name: The name of the queue. If not specified, then ``queue_id`` must be specified.
:param queue_id: The ID of the queue. If not specified, then ``queue_name`` must be specified.
:param bool force: If True, reset the PipelineController if necessary before enqueuing it
:return: An enqueue JSON response.
.. code-block:: javascript
{
"queued": 1,
"updated": 1,
"fields": {
"status": "queued",
"status_reason": "",
"status_message": "",
"status_changed": "2020-02-24T15:05:35.426770+00:00",
"last_update": "2020-02-24T15:05:35.426770+00:00",
"execution.queue": "2bd96ab2d9e54b578cc2fb195e52c7cf"
}
}
- ``queued`` - The number of Tasks enqueued (an integer or ``null``).
- ``updated`` - The number of Tasks updated (an integer or ``null``).
- ``fields``
- ``status`` - The status of the experiment.
- ``status_reason`` - The reason for the last status change.
- ``status_message`` - Information about the status.
- ``status_changed`` - The last status change date and time (ISO 8601 format).
- ``last_update`` - The last Task update time, including Task creation, update, change, or events for this task (ISO 8601 format).
- ``execution.queue`` - The ID of the queue where the Task is enqueued. ``null`` indicates not enqueued.
"""
pipeline_controller = (
pipeline_controller
if isinstance(pipeline_controller, PipelineController)
else cls.get(pipeline_id=pipeline_controller)
)
return Task.enqueue(
pipeline_controller._task,
queue_name=queue_name,
queue_id=queue_id,
force=force,
)
@classmethod
def get(
cls,
pipeline_id: Optional[str] = None,
pipeline_project: Optional[str] = None,
pipeline_name: Optional[str] = None,
pipeline_version: Optional[str] = None,
pipeline_tags: Optional[Sequence[str]] = None,
shallow_search: bool = False,
) -> "PipelineController":
"""
Get a specific PipelineController. If multiple pipeline controllers are found, the pipeline controller
with the highest semantic version is returned. If no semantic version is found, the most recently
updated pipeline controller is returned. This function raises aan Exception if no pipeline controller
was found
Note: In order to run the pipeline controller returned by this function, use PipelineController.enqueue
:param pipeline_id: Requested PipelineController ID
:param pipeline_project: Requested PipelineController project
:param pipeline_name: Requested PipelineController name
:param pipeline_tags: Requested PipelineController tags (list of tag strings)
:param shallow_search: If True, search only the first 500 results (first page)
"""
mutually_exclusive(
pipeline_id=pipeline_id,
pipeline_project=pipeline_project,
_require_at_least_one=False,
)
mutually_exclusive(
pipeline_id=pipeline_id,
pipeline_name=pipeline_name,
_require_at_least_one=False,
)
if not pipeline_id:
pipeline_project_hidden = "{}/{}/{}".format(pipeline_project, cls._project_section, pipeline_name)
name_with_runtime_number_regex = r"^{}( #[0-9]+)*$".format(re.escape(pipeline_name))
pipelines = Task._query_tasks(
pipeline_project=[pipeline_project_hidden],
task_name=name_with_runtime_number_regex,
fetch_only_first_page=False if not pipeline_version else shallow_search,
only_fields=["id"] if not pipeline_version else ["id", "runtime.version"],
system_tags=[cls._tag],
order_by=["-last_update"],
tags=pipeline_tags,
search_hidden=True,
_allow_extra_fields_=True,
)
if pipelines:
if not pipeline_version:
pipeline_id = pipelines[0].id
current_version = None
for pipeline in pipelines:
if not pipeline.runtime:
continue
candidate_version = pipeline.runtime.get("version")
if not candidate_version or not Version.is_valid_version_string(candidate_version):
continue
if not current_version or Version(candidate_version) > current_version:
current_version = Version(candidate_version)
pipeline_id = pipeline.id
else:
for pipeline in pipelines:
if pipeline.runtime.get("version") == pipeline_version:
pipeline_id = pipeline.id
break
if not pipeline_id:
error_msg = "Could not find dataset with pipeline_project={}, pipeline_name={}".format(
pipeline_project, pipeline_name
)
if pipeline_version:
error_msg += ", pipeline_version={}".format(pipeline_version)
raise ValueError(error_msg)
pipeline_task = Task.get_task(task_id=pipeline_id)
return cls._create_pipeline_controller_from_task(pipeline_task)
@classmethod
def _create_pipeline_controller_from_task(cls, pipeline_task: Task) -> "PipelineController":
pipeline_object = cls.__new__(cls)
pipeline_object._task = pipeline_task
pipeline_object._nodes = {}
pipeline_object._running_nodes = []
pipeline_object._version = pipeline_task._get_runtime_properties().get("version")
try:
pipeline_object._deserialize(pipeline_task._get_configuration_dict(cls._config_section), force=True)
except Exception:
pass
return pipeline_object
@property
def task(self) -> Task:
return self._task
@property
def id(self) -> str:
return self._task.id
@property
def tags(self) -> List[str]:
return self._task.get_tags() or []
@property
def version(self) -> str:
return self._version
def add_tags(self, tags: Union[Sequence[str], str]) -> None:
"""
Add tags to this pipeline. Old tags are not deleted.
When executing a Pipeline remotely
(i.e. launching the pipeline from the UI/enqueuing it), this method has no effect.
:param tags: A list of tags for this pipeline.
"""
if not self._task:
return # should not actually happen
self._task.add_tags(tags)
def _create_task_from_function(
self,
docker: Optional[str],
docker_args: Optional[str],
docker_bash_setup_script: Optional[str],
function: Callable,
function_input_artifacts: Dict[str, str],
function_kwargs: Dict[str, Any],
function_return: List[str],
auto_connect_frameworks: Optional[dict],
auto_connect_arg_parser: Optional[dict],
packages: Optional[Union[bool, str, Sequence[str]]],
project_name: Optional[str],
task_name: Optional[str],
task_type: Optional[str],
repo: Optional[str],
branch: Optional[str],
commit: Optional[str],
helper_functions: Optional[Sequence[Callable]],
output_uri: Optional[Union[str, bool]] = None,
working_dir: Optional[str] = None,
) -> dict:
task_definition = CreateFromFunction.create_task_from_function(
a_function=function,
function_kwargs=function_kwargs or None,
function_input_artifacts=function_input_artifacts,
function_return=function_return,
project_name=project_name,
task_name=task_name,
task_type=task_type,
auto_connect_frameworks=auto_connect_frameworks,
auto_connect_arg_parser=auto_connect_arg_parser,
repo=repo,
branch=branch,
commit=commit,
packages=packages,
docker=docker,
docker_args=docker_args,
docker_bash_setup_script=docker_bash_setup_script,
output_uri=output_uri,
helper_functions=helper_functions,
dry_run=True,
task_template_header=self._task_template_header,
artifact_serialization_function=self._artifact_serialization_function,
artifact_deserialization_function=self._artifact_deserialization_function,
skip_global_imports=self._skip_global_imports,
working_dir=working_dir,
)
return task_definition
def _start(
self,
step_task_created_callback: Optional[
Callable[["PipelineController", "PipelineController.Node", dict], bool]
] = None, # noqa
step_task_completed_callback: Optional[
Callable[["PipelineController", "PipelineController.Node"], None]
] = None, # noqa
wait: bool = True,
) -> bool:
"""
Start the pipeline controller.
If the calling process is stopped, then the controller stops as well.
:param Callable step_task_created_callback: Callback function, called when a step (Task) is created
and before it is sent for execution. Allows a user to modify the Task before launch.
Use `node.job` to access the ClearmlJob object, or `node.job.task` to directly access the Task object.
`parameters` are the configuration arguments passed to the ClearmlJob.
If the callback returned value is `False`,
the Node is skipped and so is any node in the DAG that relies on this node.
Notice the `parameters` are already parsed,
e.g. ``${step1.parameters.Args/param}`` is replaced with relevant value.
.. code-block:: py
def step_created_callback(
pipeline, # type: PipelineController,
node, # type: PipelineController.Node,
parameters, # type: dict
):
pass
:param Callable step_task_completed_callback: Callback function, called when a step (Task) is completed
and other jobs are executed. Allows a user to modify the Task status after completion.
.. code-block:: py
def step_completed_callback(
pipeline, # type: PipelineController,
node, # type: PipelineController.Node,
):
pass
:param wait: If True (default), start the pipeline controller, return only
after the pipeline is done (completed/aborted/failed)
:return: True, if the controller started. False, if the controller did not start.
"""
if self._thread:
return True
self._prepare_pipeline(step_task_created_callback, step_task_completed_callback)
self._thread = Thread(target=self._daemon)
self._thread.daemon = True
self._thread.start()
if wait:
self.wait()
self.stop()
return True
def _prepare_pipeline(
self,
step_task_created_callback: Optional[
Callable[["PipelineController", "PipelineController.Node", dict], bool]
] = None, # noqa
step_task_completed_callback: Optional[
Callable[["PipelineController", "PipelineController.Node"], None]
] = None, # noqa
) -> None:
# type (...) -> None
params, pipeline_dag = self._serialize_pipeline_task()
# deserialize back pipeline state
if not params["continue_pipeline"]:
for k in pipeline_dag:
pipeline_dag[k]["executed"] = None
pipeline_dag[k]["job_started"] = None
pipeline_dag[k]["job_ended"] = None
self._default_execution_queue = params["default_queue"]
self._add_pipeline_tags = params["add_pipeline_tags"]
self._target_project = params["target_project"] or ""
self._deserialize(pipeline_dag)
# if we continue the pipeline, make sure that we re-execute failed tasks
if params["continue_pipeline"]:
for node in list(self._nodes.values()):
if node.executed is False:
node.executed = None
if not self._verify():
raise ValueError(
"Failed verifying pipeline execution graph, it has either inaccessible nodes, or contains cycles"
)
self.update_execution_plot()
self._start_time = time()
self._stop_event = Event()
self._experiment_created_cb = step_task_created_callback
self._experiment_completed_cb = step_task_completed_callback
def _serialize_pipeline_task(self) -> Tuple[dict, dict]:
"""
Serialize current pipeline state into the main Task
:return: params, pipeline_dag
"""
params = {
"default_queue": self._default_execution_queue,
"add_pipeline_tags": self._add_pipeline_tags,
"target_project": self._target_project,
}
pipeline_dag = self._serialize()
# serialize pipeline state
if self._task and self._auto_connect_task:
# check if we are either running locally or that we are running remotely,
# but we have no configuration, so we need to act as if this is a local run and create everything
if self._task.running_locally() or self._task.get_configuration_object(name=self._config_section) is None:
# noinspection PyProtectedMember
self._task._set_configuration(
name=self._config_section,
config_type="dictionary",
config_text=json.dumps(pipeline_dag, indent=2),
)
args_map_inversed = {}
for section, arg_list in self._args_map.items():
for arg in arg_list:
args_map_inversed[arg] = section
pipeline_args = flatten_dictionary(self._pipeline_args)
# noinspection PyProtectedMember
self._task._set_parameters(
{
"{}/{}".format(args_map_inversed.get(k, self._args_section), k): v
for k, v in pipeline_args.items()
},
__parameters_descriptions=self._pipeline_args_desc,
__parameters_types=self._pipeline_args_type,
__update=True,
)
self._task.connect(params, name=self._pipeline_section)
params["continue_pipeline"] = False
# make sure we have a unique version number (auto bump version if needed)
# only needed when manually (from code) creating pipelines
self._handle_pipeline_version()
# noinspection PyProtectedMember
pipeline_hash = self._get_task_hash()
# noinspection PyProtectedMember
self._task._set_runtime_properties(
{
self._runtime_property_hash: "{}:{}".format(pipeline_hash, self._version),
"version": self._version,
}
)
self._task.set_user_properties(version=self._version)
else:
self._task.connect_configuration(pipeline_dag, name=self._config_section)
connected_args = set()
new_pipeline_args = {}
for section, arg_list in self._args_map.items():
mutable_dict = {arg: self._pipeline_args.get(arg) for arg in arg_list}
self._task.connect(mutable_dict, name=section)
new_pipeline_args.update(mutable_dict)
connected_args.update(arg_list)
mutable_dict = {k: v for k, v in self._pipeline_args.items() if k not in connected_args}
self._task.connect(mutable_dict, name=self._args_section)
new_pipeline_args.update(mutable_dict)
self._pipeline_args = new_pipeline_args
self._task.connect(params, name=self._pipeline_section)
# noinspection PyProtectedMember
if self._task._get_runtime_properties().get(self._runtime_property_hash):
params["continue_pipeline"] = True
else:
# noinspection PyProtectedMember
pipeline_hash = ClearmlJob._create_task_hash(self._task)
# noinspection PyProtectedMember
self._task._set_runtime_properties(
{
self._runtime_property_hash: "{}:{}".format(pipeline_hash, self._version),
}
)
params["continue_pipeline"] = False
return params, pipeline_dag
def _handle_pipeline_version(self) -> None:
if not self._version:
# noinspection PyProtectedMember
self._version = self._task._get_runtime_properties().get("version")
if not self._version:
previous_pipeline_tasks = Task._query_tasks(
project=[self._task.project],
fetch_only_first_page=True,
only_fields=["runtime.version"],
order_by=["-last_update"],
system_tags=[self._tag],
search_hidden=True,
_allow_extra_fields_=True,
)
for previous_pipeline_task in previous_pipeline_tasks:
if previous_pipeline_task.runtime.get("version"):
self._version = str(Version(previous_pipeline_task.runtime.get("version")).get_next_version())
break
self._version = self._version or self._default_pipeline_version
def _get_task_hash(self) -> str:
params_override = dict(**(self._task.get_parameters() or {}))
params_override.pop("properties/version", None)
# dag state without status / states
nodes_items = list(self._nodes.items())
dag = {
name: {
k: v
for k, v in node.__dict__.items()
if k
not in (
"job",
"name",
"task_factory_func",
"executed",
"status",
"job_started",
"job_ended",
"skip_job",
)
}
for name, node in nodes_items
}
# get all configurations (as dict of strings for hashing)
configurations_override = dict(**self._task.get_configuration_objects())
# store as text so we can hash it later
configurations_override[self._config_section] = json.dumps(dag)
# noinspection PyProtectedMember
pipeline_hash = ClearmlJob._create_task_hash(
self._task,
params_override=params_override,
configurations_override=configurations_override,
)
return pipeline_hash
def _serialize(self) -> dict:
"""
Store the definition of the pipeline DAG into a dictionary.
This dictionary will be used to store the DAG as a configuration on the Task
:return:
"""
nodes_items = list(self._nodes.items())
dag = {
name: dict((k, v) for k, v in node.__dict__.items() if k not in ("job", "name", "task_factory_func"))
for name, node in nodes_items
}
# update state for presentation only
for name, node in nodes_items:
dag[name]["job_id"] = node.executed or (node.job.task_id() if node.job else None)
return dag
def _deserialize(self, dag_dict: dict, force: bool = False) -> None:
"""
Restore the DAG from a dictionary.
This will be used to create the DAG from the dict stored on the Task, when running remotely.
:return:
"""
# if we always want to load the pipeline DAG from code, we are skipping the deserialization step
if not force and self._always_create_from_code:
return
# if we do not clone the Task, only merge the parts we can override.
for name in list(self._nodes.keys()):
if not self._nodes[name].clone_task and name in dag_dict and not dag_dict[name].get("clone_task"):
for k in (
"queue",
"parents",
"timeout",
"parameters",
"configurations",
"task_overrides",
"executed",
"job_started",
"job_ended",
):
setattr(
self._nodes[name],
k,
dag_dict[name].get(k) or type(getattr(self._nodes[name], k))(),
)
# if we do clone the Task deserialize everything, except the function creating
self._nodes = {
k: self.Node(name=k, **{kk: vv for kk, vv in v.items() if kk not in ("job_id",)})
if k not in self._nodes or (v.get("base_task_id") and v.get("clone_task"))
else self._nodes[k]
for k, v in dag_dict.items()
}
# set the task_factory_func for each cloned node
for node in list(self._nodes.values()):
if not node.base_task_id and not node.task_factory_func and node.job_code_section:
if node.job_code_section in self._nodes:
func = self._nodes[node.job_code_section].task_factory_func
if func:
node.task_factory_func = func
def _has_stored_configuration(self) -> bool:
"""
Return True if we are running remotely, and we have stored configuration on the Task
"""
if self._auto_connect_task and self._task and not self._task.running_locally() and self._task.is_main_task():
stored_config = self._task.get_configuration_object(self._config_section)
return bool(stored_config)
return False
def _verify(self) -> bool:
"""
Verify the DAG, (i.e. no cycles and no missing parents)
On error raise ValueError with verification details
:return: return True iff DAG has no errors
"""
# verify nodes
for node in list(self._nodes.values()):
# raise value error if not verified
self._verify_node(node)
# check the dag itself
if not self._verify_dag():
return False
return True
def _verify_node(self, node: "PipelineController.Node") -> bool:
"""
Raise ValueError on verification errors
:return: Return True iff the specific node is verified
"""
if not node.base_task_id and not node.task_factory_func:
raise ValueError("Node '{}', base_task_id is empty".format(node.name))
if not self._default_execution_queue and not node.queue:
raise ValueError(
"Node '{}' missing execution queue, "
"no default queue defined and no specific node queue defined".format(node.name)
)
task = node.task_factory_func or Task.get_task(task_id=node.base_task_id)
if not task:
raise ValueError("Node '{}', base_task_id={} is invalid".format(node.name, node.base_task_id))
pattern = self._step_ref_pattern
# verify original node parents
if node.parents and not all(isinstance(p, str) and p in self._nodes for p in node.parents):
raise ValueError("Node '{}', parents={} is invalid".format(node.name, node.parents))
parents = set()
for k, v in node.parameters.items():
if isinstance(v, str):
for g in pattern.findall(v):
ref_step = self.__verify_step_reference(node, g)
if ref_step:
parents.add(ref_step)
# verify we have a section name
if "/" not in k:
raise ValueError(
'Section name is missing in parameter "{}", '
"parameters should be in the form of "
'"`section-name`/parameter", example: "Args/param"'.format(v)
)
if parents and parents != set(node.parents or []):
parents = parents - set(node.parents or [])
getLogger("clearml.automation.controller").info(
'Node "{}" missing parent reference, adding: {}'.format(node.name, parents)
)
node.parents = (node.parents or []) + list(parents)
# verify and fix monitoring sections:
def _verify_monitors(
monitors: Union[List[Union[str, Tuple[Any, Any]]], None],
monitor_type: str,
nested_pairs: bool = False,
) -> List[Tuple[Union[str, Tuple[str, str]], Union[str, Tuple[str, str]]]]:
if not monitors:
return monitors
if nested_pairs:
if not all(isinstance(x, (list, tuple)) and x for x in monitors):
raise ValueError("{} should be a list of tuples, found: {}".format(monitor_type, monitors))
# convert single pair into a pair of pairs:
conformed_monitors = [pair if isinstance(pair[0], (list, tuple)) else (pair, pair) for pair in monitors]
# verify the pair of pairs
if not all(
isinstance(x[0][0], str)
and isinstance(x[0][1], str)
and isinstance(x[1][0], str)
and isinstance(x[1][1], str)
for x in conformed_monitors
):
raise ValueError("{} should be a list of tuples, found: {}".format(monitor_type, monitors))
else:
# verify a list of tuples
if not all(isinstance(x, (list, tuple, str)) and x for x in monitors):
raise ValueError("{} should be a list of tuples, found: {}".format(monitor_type, monitors))
# convert single str into a pair of pairs:
conformed_monitors = [pair if isinstance(pair, (list, tuple)) else (pair, pair) for pair in monitors]
# verify the pair of pairs
if not all(isinstance(x[0], str) and isinstance(x[1], str) for x in conformed_monitors):
raise ValueError("{} should be a list of tuples, found: {}".format(monitor_type, monitors))
return conformed_monitors
# verify and fix monitoring sections:
node.monitor_metrics = _verify_monitors(node.monitor_metrics, "monitor_metrics", nested_pairs=True)
node.monitor_artifacts = _verify_monitors(node.monitor_artifacts, "monitor_artifacts")
node.monitor_models = _verify_monitors(node.monitor_models, "monitor_models")
return True
def _verify_dag(self) -> bool:
"""
:return: True iff the pipeline dag is fully accessible and contains no cycles
"""
visited = set()
prev_visited = None
while prev_visited != visited:
prev_visited = copy(visited)
for k, node in list(self._nodes.items()):
if k in visited:
continue
if any(p == node.name for p in node.parents or []):
# node cannot have itself as parent
return False
if not all(p in visited for p in node.parents or []):
continue
visited.add(k)
# return False if we did not cover all the nodes
return not bool(set(self._nodes.keys()) - visited)
def _add_function_step(
self,
name: str,
function: Callable,
function_kwargs: Optional[Dict[str, Any]] = None,
function_return: Optional[List[str]] = None,
project_name: Optional[str] = None,
task_name: Optional[str] = None,
task_type: Optional[str] = None,
auto_connect_frameworks: Optional[dict] = None,
auto_connect_arg_parser: Optional[dict] = None,
packages: Optional[Union[bool, str, Sequence[str]]] = None,
repo: Optional[str] = None,
repo_branch: Optional[str] = None,
repo_commit: Optional[str] = None,
helper_functions: Optional[Sequence[Callable]] = None,
docker: Optional[str] = None,
docker_args: Optional[str] = None,
docker_bash_setup_script: Optional[str] = None,
parents: Optional[Sequence[str]] = None,
execution_queue: Optional[str] = None,
monitor_metrics: Optional[List[Union[Tuple[str, str], Tuple]]] = None,
monitor_artifacts: Optional[List[Union[str, Tuple[str, str]]]] = None,
monitor_models: Optional[List[Union[str, Tuple[str, str]]]] = None,
time_limit: Optional[float] = None,
continue_on_fail: bool = False,
pre_execute_callback: Optional[
Callable[["PipelineController", "PipelineController.Node", dict], bool]
] = None, # noqa
post_execute_callback: Optional[Callable[["PipelineController", "PipelineController.Node"], None]] = None,
# noqa
cache_executed_step: bool = False,
retry_on_failure: Optional[
Union[
int,
Callable[["PipelineController", "PipelineController.Node", int], bool],
]
] = None, # noqa
status_change_callback: Optional[
Callable[["PipelineController", "PipelineController.Node", str], None]
] = None, # noqa
tags: Optional[Union[str, Sequence[str]]] = None,
output_uri: Optional[Union[str, bool]] = None,
draft: Optional[bool] = False,
working_dir: Optional[str] = None,
continue_behaviour: Optional[dict] = None,
stage: Optional[str] = None
) -> bool:
"""
Create a Task from a function, including wrapping the function input arguments
into the hyperparameter section as kwargs, and storing function results as named artifacts
Example:
.. code-block:: py
def mock_func(a=6, b=9):
c = a*b
print(a, b, c)
return c, c**2
create_task_from_function(mock_func, function_return=['mul', 'square'])
Example arguments from other Tasks (artifact):
.. code-block:: py
def mock_func(matrix_np):
c = matrix_np*matrix_np
print(matrix_np, c)
return c
create_task_from_function(
mock_func,
function_kwargs={'matrix_np': 'aabb1122.previous_matrix'},
function_return=['square_matrix']
)
:param name: Unique of the step. For example `stage1`
:param function: A global function to convert into a standalone Task
:param function_kwargs: Optional, provide subset of function arguments and default values to expose.
If not provided automatically take all function arguments & defaults
Optional, pass input arguments to the function from other Tasks's output artifact.
Example argument named `numpy_matrix` from Task ID `aabbcc` artifact name `answer`:
``{'numpy_matrix': 'aabbcc.answer'}``
:param function_return: Provide a list of names for all the results.
If not provided, no results will be stored as artifacts.
:param project_name: Set the project name for the task. Required if base_task_id is None.
:param task_name: Set the name of the remote task, if not provided use `name` argument.
:param task_type: Optional, The task type to be created. Supported values: 'training', 'testing', 'inference',
'data_processing', 'application', 'monitor', 'controller', 'optimizer', 'service', 'qc', 'custom'
:param auto_connect_frameworks: Control the frameworks auto connect, see `Task.init` auto_connect_frameworks
:param auto_connect_arg_parser: Control the ArgParser auto connect, see `Task.init` auto_connect_arg_parser
:param packages: Manually specify a list of required packages or a local requirements.txt file.
Example: ["tqdm>=2.1", "scikit-learn"] or "./requirements.txt"
If not provided, packages are automatically added based on the imports used in the function.
Use `False` to install requirements from "requirements.txt" inside your git repository
:param repo: Optional, specify a repository to attach to the function, when remotely executing.
Allow users to execute the function inside the specified repository, enabling to load modules/script
from a repository Notice the execution work directory will be the repository root folder.
Supports both git repo url link, and local repository path.
Example remote url: 'https://github.com/user/repo.git'
Example local repo copy: './repo' -> will automatically store the remote
repo url and commit ID based on the locally cloned copy
:param repo_branch: Optional, specify the remote repository branch (Ignored, if local repo path is used)
:param repo_commit: Optional, specify the repository commit ID (Ignored, if local repo path is used)
:param helper_functions: Optional, a list of helper functions to make available
for the standalone function Task.
:param docker: Select the docker image to be executed in by the remote session
:param docker_args: Add docker arguments, pass a single string
:param docker_bash_setup_script: Add bash script to be executed
inside the docker before setting up the Task's environment
:param parents: Optional list of parent nodes in the DAG.
The current step in the pipeline will be sent for execution only after all the parent nodes
have been executed successfully.
:param execution_queue: Optional, the queue to use for executing this specific step.
If not provided, the task will be sent to the default execution queue, as defined on the class
:param monitor_metrics: Optional, log the step's metrics on the pipeline Task.
Format is a list of pairs metric (title, series) to log:
[(step_metric_title, step_metric_series), ]
Example: [('test', 'accuracy'), ]
Or a list of tuple pairs, to specify a different target metric for to use on the pipeline Task:
[((step_metric_title, step_metric_series), (target_metric_title, target_metric_series)), ]
Example: [[('test', 'accuracy'), ('model', 'accuracy')], ]
:param monitor_artifacts: Optional, log the step's artifacts on the pipeline Task.
Provided a list of artifact names existing on the step's Task, they will also appear on the Pipeline itself.
Example: [('processed_data', 'final_processed_data'), ]
Alternatively user can also provide a list of artifacts to monitor
(target artifact name will be the same as original artifact name)
Example: ['processed_data', ]
:param monitor_models: Optional, log the step's output models on the pipeline Task.
Provided a list of model names existing on the step's Task, they will also appear on the Pipeline itself.
Example: [('model_weights', 'final_model_weights'), ]
Alternatively user can also provide a list of models to monitor
(target models name will be the same as original model)
Example: ['model_weights', ]
To select the latest (lexicographic) model use "model_*", or the last created model with just "*"
Example: ['model_weights_*', ]
:param time_limit: Default None, no time limit.
Step execution time limit, if exceeded the Task is aborted and the pipeline is stopped and marked failed.
:param continue_on_fail: (Deprecated, use `continue_behaviour` instead).
If True, failed step will not cause the pipeline to stop
(or marked as failed). Notice, that steps that are connected (or indirectly connected)
to the failed step will be skipped. Defaults to False
:param pre_execute_callback: Callback function, called when the step (Task) is created,
and before it is sent for execution. Allows a user to modify the Task before launch.
Use `node.job` to access the ClearmlJob object, or `node.job.task` to directly access the Task object.
``parameters`` are the configuration arguments passed to the ClearmlJob.
If the callback returned value is `False`,
the Node is skipped and so is any node in the DAG that relies on this node.
Notice the `parameters` are already parsed,
e.g. ``${step1.parameters.Args/param}`` is replaced with relevant value.
.. code-block:: py
def step_created_callback(
pipeline, # type: PipelineController,
node, # type: PipelineController.Node,
parameters, # type: dict
):
pass
:param post_execute_callback: Callback function, called when a step (Task) is completed
and other jobs are executed. Allows a user to modify the Task status after completion.
.. code-block:: py
def step_completed_callback(
pipeline, # type: PipelineController,
node, # type: PipelineController.Node,
):
pass
:param cache_executed_step: If True, before launching the new step,
after updating with the latest configuration, check if an exact Task with the same parameter/code
was already executed. If it was found, use it instead of launching a new Task.
Default: False, a new cloned copy of base_task is always used.
Notice: If the git repo reference does not have a specific commit ID, the Task will never be used.
:param retry_on_failure: Integer (number of retries) or Callback function that returns True to allow a retry
- Integer: In case of node failure, retry the node the number of times indicated by this parameter.
- Callable: A function called on node failure. Takes as parameters:
the PipelineController instance, the PipelineController.Node that failed and an int
representing the number of previous retries for the node that failed
The function must return a `bool`: True if the node should be retried and False otherwise.
If True, the node will be re-queued and the number of retries left will be decremented by 1.
By default, if this callback is not specified, the function will be retried the number of
times indicated by `retry_on_failure`.
.. code-block:: py
def example_retry_on_failure_callback(pipeline, node, retries):
print(node.name, ' failed')
# allow up to 5 retries (total of 6 runs)
return retries < 5
:param status_change_callback: Callback function, called when the status of a step (Task) changes.
Use `node.job` to access the ClearmlJob object, or `node.job.task` to directly access the Task object.
The signature of the function must look the following way:
.. code-block:: py
def status_change_callback(
pipeline, # type: PipelineController,
node, # type: PipelineController.Node,
previous_status # type: str
):
pass
:param tags: A list of tags for the specific pipeline step.
When executing a Pipeline remotely
(i.e. launching the pipeline from the UI/enqueuing it), this method has no effect.
:param output_uri: The storage / output url for this step. This is the default location for output
models and other artifacts. Check Task.init reference docs for more info (output_uri is a parameter).
:param draft: (default False). If True, the Task will be created as a draft task.
:param working_dir: Working directory to launch the step from.
:param continue_behaviour: Controls whether the pipeline will continue running after a step failed/was aborted.
Different behaviours can be set using a dictionary of boolean options. Supported options are:
- continue_on_fail - If True, the pipeline will continue even if the step failed.
If False, the pipeline will stop
- continue_on_abort - If True, the pipeline will continue even if the step was aborted.
If False, the pipeline will stop
- skip_children_on_fail - If True, the children of this step will be skipped if it failed.
If False, the children will run even if this step failed.
Any parameters passed from the failed step to its children will default to None
- skip_children_on_abort - If True, the children of this step will be skipped if it was aborted.
If False, the children will run even if this step was aborted.
Any parameters passed from the failed step to its children will default to None
If the keys are not present in the dictionary, their values will default to True
:param stage: Name of the stage. This parameter enables pipeline step grouping into stages
:return: True if successful
"""
# always store callback functions (even when running remotely)
if pre_execute_callback:
self._pre_step_callbacks[name] = pre_execute_callback
if post_execute_callback:
self._post_step_callbacks[name] = post_execute_callback
if status_change_callback:
self._status_change_callbacks[name] = status_change_callback
self._verify_node_name(name)
if output_uri is None:
output_uri = self._output_uri
function_input_artifacts = {}
# go over function_kwargs, split it into string and input artifacts
for k, v in function_kwargs.items():
if v is None:
continue
if self._step_ref_pattern.match(str(v)):
# check for step artifacts
step, _, artifact = v[2:-1].partition(".")
if step in self._nodes and artifact in self._nodes[step].return_artifacts:
function_input_artifacts[k] = "${{{}.id}}.{}".format(step, artifact)
continue
# verify the reference only if we are running locally (on remote when we have multiple
# steps from tasks the _nodes is till empty, only after deserializing we will have the full DAG)
if self._task.running_locally():
self.__verify_step_reference(node=self.Node(name=name), step_ref_string=v)
elif not isinstance(v, (float, int, bool, six.string_types)):
function_input_artifacts[k] = "{}.{}.{}".format(self._task.id, name, k)
self._upload_pipeline_artifact(artifact_name="{}.{}".format(name, k), artifact_object=v)
function_kwargs = {k: v for k, v in function_kwargs.items() if k not in function_input_artifacts}
parameters = {"{}/{}".format(CreateFromFunction.kwargs_section, k): v for k, v in function_kwargs.items()}
if function_input_artifacts:
parameters.update(
{
"{}/{}".format(CreateFromFunction.input_artifact_section, k): str(v)
for k, v in function_input_artifacts.items()
}
)
job_code_section = name
task_name = task_name or name or None
if self._mock_execution:
project_name = project_name or self._get_target_project() or self._task.get_project_name()
task_definition = self._create_task_from_function(
docker,
docker_args,
docker_bash_setup_script,
function,
function_input_artifacts,
function_kwargs,
function_return,
auto_connect_frameworks,
auto_connect_arg_parser,
packages,
project_name,
task_name,
task_type,
repo,
repo_branch,
repo_commit,
helper_functions,
output_uri=output_uri,
working_dir=working_dir,
)
elif self._task.running_locally() or self._task.get_configuration_object(name=name) is None:
project_name = project_name or self._get_target_project() or self._task.get_project_name()
task_definition = self._create_task_from_function(
docker,
docker_args,
docker_bash_setup_script,
function,
function_input_artifacts,
function_kwargs,
function_return,
auto_connect_frameworks,
auto_connect_arg_parser,
packages,
project_name,
task_name,
task_type,
repo,
repo_branch,
repo_commit,
helper_functions,
output_uri=output_uri,
working_dir=working_dir,
)
# update configuration with the task definitions
# noinspection PyProtectedMember
self._task._set_configuration(
name=name,
config_type="json",
config_text=json.dumps(task_definition, indent=1),
)
else:
# load task definition from configuration
# noinspection PyProtectedMember
config_text = self._task._get_configuration_text(name=name)
task_definition = json.loads(config_text) if config_text else dict()
def _create_task(_: Any) -> Task:
a_task = Task.create(
project_name=project_name,
task_name=task_definition.get("name"),
task_type=task_definition.get("type"),
)
# replace reference
a_task.update_task(task_definition)
if tags:
a_task.add_tags(tags)
if output_uri is not None:
a_task.output_uri = output_uri
return a_task
self._nodes[name] = self.Node(
name=name,
base_task_id=None,
parents=parents or [],
queue=execution_queue,
timeout=time_limit,
parameters=parameters,
clone_task=False,
cache_executed_step=cache_executed_step,
task_factory_func=_create_task,
continue_on_fail=continue_on_fail,
return_artifacts=function_return,
monitor_artifacts=monitor_artifacts,
monitor_metrics=monitor_metrics,
monitor_models=monitor_models,
job_code_section=job_code_section,
explicit_docker_image=docker,
output_uri=output_uri,
draft=draft,
continue_behaviour=continue_behaviour,
stage=stage
)
self._retries[name] = 0
self._retries_callbacks[name] = (
retry_on_failure
if callable(retry_on_failure)
else (
functools.partial(
self._default_retry_on_failure_callback,
max_retries=retry_on_failure,
)
if isinstance(retry_on_failure, int)
else self._retry_on_failure_callback
)
)
return True
def _relaunch_node(self, node: "PipelineController.Node") -> None:
if not node.job:
getLogger("clearml.automation.controller").warning(
"Could not relaunch node {} (job object is missing)".format(node.name)
)
return
self._retries[node.name] = self._retries.get(node.name, 0) + 1
getLogger("clearml.automation.controller").warning(
"Node '{}' failed. Retrying... (this is retry number {})".format(node.name, self._retries[node.name])
)
node.job.task.mark_stopped(force=True, status_message=self._relaunch_status_message)
node.job.task.set_progress(0)
node.job.task.get_logger().report_text(
"\nNode '{}' failed. Retrying... (this is retry number {})\n".format(node.name, self._retries[node.name])
)
parsed_queue_name = self._parse_step_ref(node.queue)
node.job.launch(queue_name=parsed_queue_name or self._default_execution_queue)
def _launch_node(self, node: "PipelineController.Node") -> bool:
"""
Launch a single node (create and enqueue a ClearmlJob)
:param node: Node to launch
:return: Return True if a new job was launched
"""
# clear state if we are creating a new job
if not node.job:
node.job_started = None
node.job_ended = None
node.job_type = None
if node.job or node.executed:
print("Skipping cached/executed step [{}]".format(node.name))
return False
print("Launching step [{}]".format(node.name))
updated_hyper_parameters = {}
for k, v in node.parameters.items():
updated_hyper_parameters[k] = self._parse_step_ref(v, recursive=node.recursively_parse_parameters)
task_overrides = self._parse_task_overrides(node.task_overrides) if node.task_overrides else None
extra_args = dict()
extra_args["project"] = self._get_target_project(return_project_id=True) or None
# set Task name to match job name
if self._pipeline_as_sub_project():
extra_args["name"] = node.name
if node.explicit_docker_image:
extra_args["explicit_docker_image"] = node.explicit_docker_image
skip_node = None
if self._pre_step_callbacks.get(node.name):
skip_node = self._pre_step_callbacks[node.name](self, node, updated_hyper_parameters)
if skip_node is False:
node.skip_job = True
return True
task_id = node.base_task_id
disable_clone_task = not node.clone_task
task_factory_func_task = None
if node.task_factory_func:
# create Task
task_factory_func_task = node.task_factory_func(node)
task_id = task_factory_func_task.id
disable_clone_task = True
try:
node.job = self._clearml_job_class(
base_task_id=task_id,
parameter_override=updated_hyper_parameters,
configuration_overrides=node.configurations,
tags=["{} {}".format(self._node_tag_prefix, self._task.id)]
if self._add_pipeline_tags and self._task
else None,
parent=self._task.id if self._task else None,
disable_clone_task=disable_clone_task,
task_overrides=task_overrides,
allow_caching=node.cache_executed_step,
output_uri=node.output_uri,
enable_local_imports=self._enable_local_imports,
**extra_args,
)
except Exception:
self._pipeline_task_status_failed = True
raise
node.job_started = None
node.job_ended = None
node.job_type = str(node.job.task.task_type)
if self._experiment_created_cb:
skip_node = self._experiment_created_cb(self, node, updated_hyper_parameters)
if skip_node is False:
# skipping node
getLogger("clearml.automation.controller").warning("Skipping node {} on callback request".format(node))
# delete the job we just created
node.job.delete()
node.skip_job = True
elif node.job.is_cached_task():
node.executed = node.job.task_id()
if task_factory_func_task:
task_factory_func_task.delete(raise_on_error=False)
self._running_nodes.append(node.name)
elif node.draft:
self._running_nodes.append(node.name)
else:
self._running_nodes.append(node.name)
parsed_queue_name = self._parse_step_ref(node.queue)
return node.job.launch(queue_name=parsed_queue_name or self._default_execution_queue)
return True
def _update_execution_plot(self) -> None:
"""
Update sankey diagram of the current pipeline
Also update the controller Task artifact storing the DAG state (with all the nodes states)
"""
if not self._task:
return
nodes = list(self._nodes.values())
self._update_nodes_status()
# update the configuration state, so that the UI is presents the correct state
self._force_task_configuration_update()
sankey_node = dict(
label=[],
color=[],
hovertemplate="%{label}<extra></extra>",
# customdata=[],
# hovertemplate='%{label}<br />Hyper-Parameters:<br />%{customdata}<extra></extra>',
)
sankey_link = dict(
source=[],
target=[],
value=[],
# hovertemplate='%{target.label}<extra></extra>',
hovertemplate="<extra></extra>",
)
visited = []
node_params = []
# update colors
while nodes:
next_nodes = []
for node in nodes:
if not all(p in visited for p in node.parents or []):
next_nodes.append(node)
continue
visited.append(node.name)
idx = len(visited) - 1
parents = [visited.index(p) for p in node.parents or []]
if node.job and node.job.task_parameter_override is not None:
node.job.task_parameter_override.update(node.parameters or {})
node_params.append(
(
node.job.task_parameter_override
if node.job and node.job.task_parameter_override
else node.parameters
)
or {}
)
# sankey_node['label'].append(node.name)
# sankey_node['customdata'].append(
# '<br />'.join('{}: {}'.format(k, v) for k, v in (node.parameters or {}).items()))
sankey_node["label"].append(
"{}<br />".format(node.name)
+ "<br />".join(
"{}: {}".format(k, v if len(str(v)) < 24 else (str(v)[:24] + " ..."))
for k, v in (node.parameters or {}).items()
)
)
sankey_node["color"].append(self._get_node_color(node))
for p in parents:
sankey_link["source"].append(p)
sankey_link["target"].append(idx)
sankey_link["value"].append(1)
# if nothing changed, we give up
if nodes == next_nodes:
break
nodes = next_nodes
# make sure we have no independent (unconnected) nodes
single_nodes = []
for i in [n for n in range(len(visited)) if n not in sankey_link["source"] and n not in sankey_link["target"]]:
single_nodes.append(i)
# create the sankey graph
dag_flow = dict(
link=sankey_link,
node=sankey_node,
textfont=dict(color="rgba(0,0,0,0)", size=1),
type="sankey",
orientation="h",
)
table_values = self._build_table_report(node_params, visited)
# hack, show single node sankey
if single_nodes:
singles_flow = dict(
x=list(range(len(single_nodes))),
y=[1] * len(single_nodes),
text=[v for i, v in enumerate(sankey_node["label"]) if i in single_nodes],
mode="markers",
hovertemplate="%{text}<extra></extra>",
marker=dict(
color=[v for i, v in enumerate(sankey_node["color"]) if i in single_nodes],
size=[40] * len(single_nodes),
),
showlegend=False,
type="scatter",
)
# only single nodes
if len(single_nodes) == len(sankey_node["label"]):
fig = dict(
data=[singles_flow],
layout={
"hovermode": "closest",
"xaxis": {"visible": False},
"yaxis": {"visible": False},
},
)
else:
dag_flow["domain"] = {"x": [0.0, 1.0], "y": [0.2, 1.0]}
fig = dict(
data=[dag_flow, singles_flow],
layout={
"autosize": True,
"hovermode": "closest",
"xaxis": {
"anchor": "y",
"domain": [0.0, 1.0],
"visible": False,
},
"yaxis": {
"anchor": "x",
"domain": [0.0, 0.15],
"visible": False,
},
},
)
else:
# create the sankey plot
fig = dict(
data=[dag_flow],
layout={"xaxis": {"visible": False}, "yaxis": {"visible": False}},
)
# report DAG
self._task.get_logger().report_plotly(
title=self._report_plot_execution_flow["title"],
series=self._report_plot_execution_flow["series"],
iteration=0,
figure=fig,
)
# report detailed table
self._task.get_logger().report_table(
title=self._report_plot_execution_details["title"],
series=self._report_plot_execution_details["series"],
iteration=0,
table_plot=table_values,
)
def _build_table_report(self, node_params: List, visited: List) -> List[List]:
"""
Create the detailed table report on all the jobs in the pipeline
:param node_params: list of node parameters
:param visited: list of nodes
:return: Table as a List of a List of strings (cell)
"""
task_link_template = (
self._task.get_output_log_web_page()
.replace("/{}/".format(self._task.project), "/{project}/")
.replace("/{}/".format(self._task.id), "/{task}/")
)
table_values = [["Pipeline Step", "Task ID", "Task Name", "Status", "Parameters"]]
for name, param in zip(visited, node_params):
param_str = str(param) if param else ""
if len(param_str) > 3:
# remove {} from string
param_str = param_str[1:-1]
step_name = name
if self._nodes[name].base_task_id:
step_name += '\n[<a href="{}"> {} </a>]'.format(
task_link_template.format(project="*", task=self._nodes[name].base_task_id),
"base task",
)
table_values.append(
[
step_name,
self.__create_task_link(self._nodes[name], task_link_template),
self._nodes[name].job.task.name if self._nodes[name].job else "",
str(self._nodes[name].status or ""),
param_str,
]
)
return table_values
def _call_retries_callback(self, node: "PipelineController.Node") -> bool:
# if this functions returns True, we should relaunch the node
# if False, don't relaunch
if node.name not in self._retries_callbacks:
return False
try:
return self._retries_callbacks[node.name](self, node, self._retries.get(node.name, 0))
except Exception as e:
getLogger("clearml.automation.controller").warning(
"Failed calling the retry callback for node '{}'. Error is '{}'".format(node.name, e)
)
return False
@classmethod
def _get_node_color(cls, node: "PipelineController.Node") -> str:
# type (self.Mode) -> str
"""
Return the node color based on the node/job state
:param node: A node in the pipeline
:return: string representing the color of the node (e.g. "red", "green", etc)
"""
if not node:
return ""
color_lookup = {
"failed": "red",
"cached": "darkslateblue",
"completed": "blue",
"aborted": "royalblue",
"queued": "#bdf5bd",
"running": "green",
"skipped": "gray",
"pending": "lightsteelblue",
}
return color_lookup.get(node.status, "")
def _update_nodes_status(self) -> None:
# type () -> None
"""
Update the status of all nodes in the pipeline
"""
jobs = []
previous_status_map = {}
# copy to avoid race condition
nodes = self._nodes.copy()
for name, node in nodes.items():
if not node.job:
continue
# noinspection PyProtectedMember
previous_status_map[name] = node.job._last_status
jobs.append(node.job)
BaseJob.update_status_batch(jobs)
for node in nodes.values():
self._update_node_status(node)
def _update_node_status(self, node: "PipelineController.Node") -> None:
# type (self.Node) -> None
"""
Update the node status entry based on the node/job state
:param node: A node in the pipeline
"""
previous_status = node.status
if node.job and node.job.is_running():
node.set_job_started()
update_job_ended = node.job_started and not node.job_ended
if node.executed is not None:
if node.job and node.job.is_failed():
# failed job
node.status = "failed"
elif node.job and node.job.is_cached_task():
# cached job
node.status = "cached"
elif not node.job or node.job.is_completed():
# completed job
node.status = "completed"
else:
# aborted job
node.status = "aborted"
elif node.job:
if node.job.is_pending():
# lightgreen, pending in queue
node.status = "queued"
elif node.job.is_completed():
# completed job
node.status = "completed"
elif node.job.is_failed():
# failed job
node.status = "failed"
elif node.job.is_stopped():
# aborted job
node.status = "aborted"
else:
node.status = "running"
elif node.skip_job:
node.status = "skipped"
else:
node.status = "pending"
if update_job_ended and node.status in ("aborted", "failed", "completed"):
node.set_job_ended()
if (
previous_status is not None
and previous_status != node.status
and self._status_change_callbacks.get(node.name)
):
# noinspection PyBroadException
try:
self._status_change_callbacks[node.name](self, node, previous_status)
except Exception as e:
getLogger("clearml.automation.controller").warning(
"Failed calling the status change callback for node '{}'. Error is '{}'".format(node.name, e)
)
def _update_dag_state_artifact(self) -> None:
pipeline_dag = self._serialize()
self._task.upload_artifact(
name=self._state_artifact_name,
artifact_object="",
metadata=dict(pipeline=hash_dict(pipeline_dag)),
preview=json.dumps(pipeline_dag, indent=1),
)
def _force_task_configuration_update(self) -> None:
pipeline_dag = self._serialize()
if self._task:
# noinspection PyProtectedMember
self._task._set_configuration(
name=self._config_section,
config_type="dictionary",
description="pipeline state: {}".format(hash_dict(pipeline_dag)),
config_text=json.dumps(pipeline_dag, indent=2),
force=True,
)
def _update_progress(self) -> None:
"""
Update progress of the pipeline every PipelineController._update_progress_interval seconds.
Progress is calculated as the mean of the progress of each step in the pipeline.
"""
if time() - self._last_progress_update_time < self._update_progress_interval:
return
# copy to avoid race condition
nodes = self._nodes.copy()
job_progress = [(node.job.task.get_progress() or 0) if node.job else 0 for node in nodes.values()]
if len(job_progress):
self._task.set_progress(int(sum(job_progress) / len(job_progress)))
self._last_progress_update_time = time()
def _daemon(self) -> None:
"""
The main pipeline execution loop. This loop is executed on its own dedicated thread.
:return:
"""
launch_thread_pool = ThreadPool(16)
pooling_counter = 0
launched_nodes = set()
last_monitor_report = last_plot_report = time()
while self._stop_event:
# stop request
if self._stop_event.wait(self._pool_frequency if pooling_counter else 0.01):
break
pooling_counter += 1
# check the pipeline time limit
if self._pipeline_time_limit and (time() - self._start_time) > self._pipeline_time_limit:
break
self._update_progress()
self._update_nodes_status()
# check the state of all current jobs
# if no a job ended, continue
completed_jobs = []
force_execution_plot_update = False
nodes_failed_stop_pipeline = []
for j in self._running_nodes:
node = self._nodes[j]
if not node.job:
continue
if node.job.is_stopped(aborted_nonresponsive_as_running=True):
node_failed = node.job.is_failed()
if node_failed:
if self._call_retries_callback(node):
self._relaunch_node(node)
continue
else:
self._final_failure[node.name] = True
completed_jobs.append(j)
if node.job.is_aborted():
node.executed = node.job.task_id() if not node.skip_children_on_abort else False
elif node_failed:
node.executed = node.job.task_id() if not node.skip_children_on_fail else False
else:
node.executed = node.job.task_id()
if j in launched_nodes:
launched_nodes.remove(j)
# check if we need to stop all running steps
if node_failed and self._abort_running_steps_on_failure and not node.continue_on_fail:
nodes_failed_stop_pipeline.append(node.name)
elif node.timeout:
started = node.job.task.data.started
if (datetime.now().astimezone(started.tzinfo) - started).total_seconds() > node.timeout:
node.job.abort()
completed_jobs.append(j)
node.executed = node.job.task_id()
elif j in launched_nodes and node.job.is_running():
# make sure update the execution graph when the job started running
# (otherwise it will still be marked queued)
launched_nodes.remove(j)
force_execution_plot_update = True
# update running jobs
self._running_nodes = [j for j in self._running_nodes if j not in completed_jobs]
# nothing changed, we can sleep
if not completed_jobs and self._running_nodes:
# force updating the pipeline state (plot) at least every 5 min.
if force_execution_plot_update or time() - last_plot_report > self._update_execution_plot_interval:
last_plot_report = time()
last_monitor_report = time()
self.update_execution_plot()
elif time() - last_monitor_report > self._monitor_node_interval:
last_monitor_report = time()
self._scan_monitored_nodes()
continue
# callback on completed jobs
if self._experiment_completed_cb or self._post_step_callbacks:
for job in completed_jobs:
job_node = self._nodes.get(job)
if not job_node:
continue
if self._experiment_completed_cb:
self._experiment_completed_cb(self, job_node)
if self._post_step_callbacks.get(job_node.name):
self._post_step_callbacks[job_node.name](self, job_node)
# check if we need to stop the pipeline, and abort all running steps
if nodes_failed_stop_pipeline:
print(
"Aborting pipeline and stopping all running steps, node {} failed".format(
nodes_failed_stop_pipeline
)
)
break
# Pull the next jobs in the pipeline, based on the completed list
next_nodes = []
for node in list(self._nodes.values()):
# check if already processed or needs to be skipped
if node.job or node.executed or node.skip_job:
continue
completed_parents = [bool(p in self._nodes and self._nodes[p].executed) for p in node.parents or []]
if all(completed_parents):
next_nodes.append(node.name)
# update the execution graph
print("Launching the next {} steps".format(len(next_nodes)))
node_launch_success = launch_thread_pool.map(self._launch_node, [self._nodes[name] for name in next_nodes])
for name, success in zip(next_nodes, node_launch_success):
if success and not self._nodes[name].skip_job:
if self._nodes[name].job and self._nodes[name].job.task_parameter_override is not None:
self._nodes[name].job.task_parameter_override.update(self._nodes[name].parameters or {})
print("Launching step: {}".format(name))
print(
"Parameters:\n{}".format(
self._nodes[name].job.task_parameter_override
if self._nodes[name].job
else self._nodes[name].parameters
)
)
print("Configurations:\n{}".format(self._nodes[name].configurations))
print("Overrides:\n{}".format(self._nodes[name].task_overrides))
launched_nodes.add(name)
# check if node is cached do not wait for event but run the loop again
if self._nodes[name].executed:
pooling_counter = 0
else:
getLogger("clearml.automation.controller").warning(
"Skipping launching step '{}': {}".format(name, self._nodes[name])
)
# update current state (in configuration, so that we could later continue an aborted pipeline)
# visualize pipeline state (plot)
self.update_execution_plot()
# quit if all pipelines nodes are fully executed.
if not next_nodes and not self._running_nodes:
break
# stop all currently running jobs:
for node in list(self._nodes.values()):
if node.executed is False and not node.continue_on_fail:
self._pipeline_task_status_failed = True
if node.job and not node.job.is_stopped():
node.job.abort()
elif not node.job and not node.executed:
# mark Node as skipped if it has no Job object and it is not executed
node.skip_job = True
# visualize pipeline state (plot)
self.update_execution_plot()
if self._stop_event:
# noinspection PyBroadException
try:
self._stop_event.set()
except Exception:
pass
def _parse_step_ref(self, value: Any, recursive: bool = False) -> Optional[str]:
"""
Return the step reference. For example ``"${step1.parameters.Args/param}"``
:param value: string
:param recursive: if True, recursively parse all values in the dict, list or tuple
:return:
"""
# look for all the step references
pattern = self._step_ref_pattern
updated_value = value
if isinstance(value, str):
for g in pattern.findall(value):
# update with actual value
new_val = self.__parse_step_reference(g)
if not isinstance(new_val, six.string_types):
return new_val
updated_value = updated_value.replace(g, new_val, 1)
# if we have a dict, list or tuple, we need to recursively update the values
if recursive:
if isinstance(value, dict):
updated_value = {}
for k, v in value.items():
updated_value[k] = self._parse_step_ref(v, recursive=True)
elif isinstance(value, list):
updated_value = [self._parse_step_ref(v, recursive=True) for v in value]
elif isinstance(value, tuple):
updated_value = tuple(self._parse_step_ref(v, recursive=True) for v in value)
return updated_value
def _parse_task_overrides(self, task_overrides: dict) -> dict:
"""
Return the step reference. For example ``"${step1.parameters.Args/param}"``
:param task_overrides: string
:return:
"""
updated_overrides = {}
for k, v in task_overrides.items():
updated_overrides[k] = self._parse_step_ref(v)
return updated_overrides
def _verify_node_name(self, name: str) -> None:
if name in self._nodes:
raise ValueError("Node named '{}' already exists in the pipeline dag".format(name))
if name in self._reserved_pipeline_names:
raise ValueError("Node named '{}' is a reserved keyword, use a different name".format(name))
def _scan_monitored_nodes(self) -> None:
"""
Scan all nodes and monitor their metrics/artifacts/models
"""
for node in list(self._nodes.values()):
self._monitor_node(node)
def _monitor_node(self, node: "PipelineController.Node") -> None:
"""
If Node is running, put the metrics from the node on the pipeline itself.
:param node: Node to test
"""
if not node:
return
# verify we have the node
if node.name not in self._monitored_nodes:
self._monitored_nodes[node.name] = {}
# if we are done with this node, skip it
if self._monitored_nodes[node.name].get("completed"):
return
if node.job and node.job.task:
task = node.job.task
elif node.job and node.executed and isinstance(node.executed, str):
task = Task.get_task(task_id=node.executed)
else:
return
# update the metrics
if node.monitor_metrics:
metrics_state = self._monitored_nodes[node.name].get("metrics", {})
logger = self._task.get_logger()
scalars = task.get_reported_scalars(x_axis="iter")
for (s_title, s_series), (t_title, t_series) in node.monitor_metrics:
values = scalars.get(s_title, {}).get(s_series)
if values and values.get("x") is not None and values.get("y") is not None:
x = values["x"][-1]
y = values["y"][-1]
last_y = metrics_state.get(s_title, {}).get(s_series)
if last_y is None or y > last_y:
logger.report_scalar(title=t_title, series=t_series, value=y, iteration=int(x))
last_y = y
if not metrics_state.get(s_title):
metrics_state[s_title] = {}
metrics_state[s_title][s_series] = last_y
self._monitored_nodes[node.name]["metrics"] = metrics_state
if node.monitor_artifacts:
task.reload()
artifacts = task.data.execution.artifacts
self._task.reload()
output_artifacts = []
for s_artifact, t_artifact in node.monitor_artifacts:
# find artifact
for a in artifacts:
if a.key != s_artifact:
continue
new_a = copy(a)
new_a.key = t_artifact
output_artifacts.append(new_a)
break
# update artifacts directly on the Task
if output_artifacts:
# noinspection PyProtectedMember
self._task._add_artifacts(output_artifacts)
if node.monitor_models:
task.reload()
output_models = task.data.models.output
self._task.reload()
target_models = []
for s_model, t_model in node.monitor_models:
# find artifact
for a in output_models:
if a.name != s_model:
continue
new_a = copy(a)
new_a.name = t_model
target_models.append(new_a)
break
# update artifacts directly on the Task
if target_models:
self._task.reload()
models = self._task.data.models
keys = [a.name for a in target_models]
models.output = [a for a in models.output or [] if a.name not in keys] + target_models
# noinspection PyProtectedMember
self._task._edit(models=models)
# update the state (so that we do not scan the node twice)
if node.job.is_stopped(aborted_nonresponsive_as_running=True):
self._monitored_nodes[node.name]["completed"] = True
def _get_target_project(self, return_project_id: bool = False) -> str:
"""
return the pipeline components target folder name/id
:param return_project_id: if False (default), return target folder name. If True, return project id
:return: project id/name (None if not valid)
"""
if not self._target_project:
return ""
if str(self._target_project).lower().strip() == "true":
if not self._task:
return ""
return self._task.project if return_project_id else self._task.get_project_name()
if not return_project_id:
return self._target_project
return get_or_create_project(
session=self._task.session if self._task else Task.default_session,
project_name=self._target_project,
)
@classmethod
def _add_pipeline_name_run_number(cls, task: Task) -> None:
if not task:
return
# if we were already executed, do not rename (meaning aborted pipeline that was continued)
# noinspection PyProtectedMember
if task._get_runtime_properties().get(cls._runtime_property_hash):
return
# remove the #<num> suffix if we have one:
task_name = re.compile(r" #\d+$").split(task.name or "", 1)[0]
page_size = 100
# find exact name or " #<num>" extension
prev_pipelines_ids = task.query_tasks(
task_name=r"^{}(| #\d+)$".format(task_name),
task_filter=dict(
project=[task.project],
system_tags=[cls._tag],
order_by=["-created"],
page_size=page_size,
fetch_only_first_page=True,
),
)
max_value = len(prev_pipelines_ids) if prev_pipelines_ids else 0
# we hit the limit
if max_value == page_size:
# make sure that if we get something wrong we do not stop the pipeline,
# worst case fail to auto increment
try:
# we assume we are the latest so let's take a few (last 10) and check the max number
last_task_name: List[Dict] = task.query_tasks(
task_filter=dict(task_ids=prev_pipelines_ids[:10], project=[task.project]),
additional_return_fields=["name"],
)
# let's parse the names
pattern = re.compile(r" #(?P<key>\d+)$")
task_parts = [pattern.split(t.get("name") or "", 1) for t in last_task_name]
# find the highest number
for parts in task_parts:
if len(parts) >= 2:
try:
max_value = max(max_value, int(parts[1]) + 1)
except (TypeError, ValueError):
pass
except Exception as ex:
getLogger("clearml.automation.controller").warning(
"Pipeline auto run increment failed (skipping): {}".format(ex)
)
max_value = 0
if max_value > 1:
task.set_name(task_name + " #{}".format(max_value))
@classmethod
def _get_pipeline_task(cls) -> Task:
"""
Return the pipeline Task (either the current one, or the parent Task of the currently running Task)
Raise ValueError if we could not locate the pipeline Task
:return: Pipeline Task
"""
# get main Task.
task = Task.current_task()
if str(task.task_type) == str(Task.TaskTypes.controller) and cls._tag in task.get_system_tags():
return task
# get the parent Task, it should be the pipeline
if not task.parent:
raise ValueError("Could not locate parent Pipeline Task")
parent = Task.get_task(task_id=task.parent)
if str(parent.task_type) == str(Task.TaskTypes.controller) and cls._tag in parent.get_system_tags():
return parent
raise ValueError("Could not locate parent Pipeline Task")
def __verify_step_reference(self, node: "PipelineController.Node", step_ref_string: str) -> Optional[str]:
"""
Verify the step reference. For example ``"${step1.parameters.Args/param}"``
Raise ValueError on misconfiguration
:param Node node: calling reference node (used for logging)
:param str step_ref_string: For example ``"${step1.parameters.Args/param}"``
:return: If step reference is used, return the pipeline step name, otherwise return None
"""
parts = step_ref_string[2:-1].split(".")
v = step_ref_string
if len(parts) < 2:
raise ValueError("Node '{}', parameter '{}' is invalid".format(node.name, v))
prev_step = parts[0]
input_type = parts[1]
# check if we reference the pipeline arguments themselves
if prev_step == self._pipeline_step_ref:
if input_type not in self._pipeline_args:
raise ValueError("Node '{}', parameter '{}', step name '{}' is invalid".format(node.name, v, prev_step))
return None
if prev_step not in self._nodes:
raise ValueError("Node '{}', parameter '{}', step name '{}' is invalid".format(node.name, v, prev_step))
if input_type not in ("artifacts", "parameters", "models", "id"):
raise ValueError("Node {}, parameter '{}', input type '{}' is invalid".format(node.name, v, input_type))
if input_type != "id" and len(parts) < 3:
raise ValueError("Node '{}', parameter '{}' is invalid".format(node.name, v))
if input_type == "models":
try:
model_type = parts[2].lower()
except Exception:
raise ValueError(
"Node '{}', parameter '{}', input type '{}', model_type is missing {}".format(
node.name, v, input_type, parts
)
)
if model_type not in ("input", "output"):
raise ValueError(
"Node '{}', parameter '{}', input type '{}', "
"model_type is invalid (input/output) found {}".format(node.name, v, input_type, model_type)
)
if len(parts) < 4:
raise ValueError(
"Node '{}', parameter '{}', input type '{}', model index is missing".format(
node.name, v, input_type
)
)
# check casting
try:
int(parts[3])
except Exception:
raise ValueError(
"Node '{}', parameter '{}', input type '{}', model index is missing {}".format(
node.name, v, input_type, parts
)
)
if len(parts) < 5:
raise ValueError(
"Node '{}', parameter '{}', input type '{}', model property is missing".format(
node.name, v, input_type
)
)
if not hasattr(BaseModel, parts[4]):
raise ValueError(
"Node '{}', parameter '{}', input type '{}', model property is invalid {}".format(
node.name, v, input_type, parts[4]
)
)
return prev_step
def __parse_step_reference(self, step_ref_string: str) -> Optional[str]:
"""
return the adjusted value for "${step...}"
:param step_ref_string: reference string of the form ${step_name.type.value}"
:return: str with value
"""
parts = step_ref_string[2:-1].split(".")
if len(parts) < 2:
raise ValueError("Could not parse reference '{}'".format(step_ref_string))
prev_step = parts[0]
input_type = parts[1].lower()
# check if we reference the pipeline arguments themselves
if prev_step == self._pipeline_step_ref:
if parts[1] not in self._pipeline_args:
raise ValueError(
"Could not parse reference '{}', "
"pipeline argument '{}' could not be found".format(step_ref_string, parts[1])
)
return self._pipeline_args[parts[1]]
if prev_step not in self._nodes or (
not self._nodes[prev_step].job
and not self._nodes[prev_step].executed
and not self._nodes[prev_step].base_task_id
):
raise ValueError(
"Could not parse reference '{}', step '{}' could not be found".format(step_ref_string, prev_step)
)
if input_type not in (
"artifacts",
"parameters",
"models",
"id",
"script",
"execution",
"container",
"output",
"comment",
"models",
"tags",
"system_tags",
"project",
):
raise ValueError("Could not parse reference '{}', type '{}' not valid".format(step_ref_string, input_type))
if input_type != "id" and len(parts) < 3:
raise ValueError("Could not parse reference '{}', missing fields in '{}'".format(step_ref_string, parts))
task = (
self._nodes[prev_step].job.task
if self._nodes[prev_step].job
else Task.get_task(task_id=self._nodes[prev_step].executed or self._nodes[prev_step].base_task_id)
)
task.reload()
if input_type == "artifacts":
# fix \. to use . in artifacts
artifact_path = (".".join(parts[2:])).replace("\\.", "\\_dot_\\")
artifact_path = artifact_path.split(".")
obj = task.artifacts
for p in artifact_path:
p = p.replace("\\_dot_\\", ".")
if isinstance(obj, dict):
obj = obj.get(p)
elif hasattr(obj, p):
obj = getattr(obj, p)
else:
raise ValueError(
"Could not locate artifact {} on previous step {}".format(".".join(parts[1:]), prev_step)
)
return str(obj)
elif input_type == "parameters":
step_params = task.get_parameters()
param_name = ".".join(parts[2:])
if param_name not in step_params:
raise ValueError(
"Could not locate parameter {} on previous step {}".format(".".join(parts[1:]), prev_step)
)
return step_params.get(param_name)
elif input_type == "models":
model_type = parts[2].lower()
if model_type not in ("input", "output"):
raise ValueError("Could not locate model {} on previous step {}".format(".".join(parts[1:]), prev_step))
try:
model_idx = int(parts[3])
model = task.models[model_type][model_idx]
except Exception:
raise ValueError(
"Could not locate model {} on previous step {}, index {} is invalid".format(
".".join(parts[1:]), prev_step, parts[3]
)
)
return str(getattr(model, parts[4]))
elif input_type == "id":
return task.id
elif input_type in (
"script",
"execution",
"container",
"output",
"comment",
"models",
"tags",
"system_tags",
"project",
):
# noinspection PyProtectedMember
return task._get_task_property(".".join(parts[1:]))
return None
@classmethod
def __create_task_link(cls, a_node: "PipelineController.Node", task_link_template: str) -> str:
if not a_node:
return ""
# create the detailed parameter table
task_id = project_id = None
if a_node.job:
project_id = a_node.job.task.project
task_id = a_node.job.task.id
elif a_node.executed:
task_id = a_node.executed
if cls._task_project_lookup.get(task_id):
project_id = cls._task_project_lookup[task_id]
else:
# noinspection PyBroadException
try:
project_id = Task.get_task(task_id=task_id).project
except Exception:
project_id = "*"
cls._task_project_lookup[task_id] = project_id
if not task_id:
return ""
return '<a href="{}"> {} </a>'.format(task_link_template.format(project=project_id, task=task_id), task_id)
def _default_retry_on_failure_callback(
self,
_pipeline_controller: "PipelineController",
_node: "PipelineController.Node",
retries: int,
max_retries: Optional[int] = None,
) -> bool:
return retries < (self._def_max_retry_on_failure if max_retries is None else max_retries)
def _upload_pipeline_artifact(self, artifact_name: str, artifact_object: Any) -> None:
self._task.upload_artifact(
name=artifact_name,
artifact_object=artifact_object,
wait_on_upload=True,
extension_name=(
".pkl" if isinstance(artifact_object, dict) and not self._artifact_serialization_function else None
),
serialization_function=self._artifact_serialization_function,
)
| PipelineController |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/postgresql/json.py | {
"start": 1264,
"end": 2510
} | class ____(sqltypes.JSON.JSONPathType):
def _processor(
self, dialect: Dialect, super_proc: Optional[Callable[[Any], Any]]
) -> Callable[[Any], Any]:
def process(value: Any) -> Any:
if isinstance(value, str):
# If it's already a string assume that it's in json path
# format. This allows using cast with json paths literals
return value
elif value:
# If it's already a string assume that it's in json path
# format. This allows using cast with json paths literals
value = "{%s}" % (", ".join(map(str, value)))
else:
value = "{}"
if super_proc:
value = super_proc(value)
return value
return process
def bind_processor(self, dialect: Dialect) -> _BindProcessorType[Any]:
return self._processor(dialect, self.string_bind_processor(dialect)) # type: ignore[return-value] # noqa: E501
def literal_processor(
self, dialect: Dialect
) -> _LiteralProcessorType[Any]:
return self._processor(dialect, self.string_literal_processor(dialect)) # type: ignore[return-value] # noqa: E501
| JSONPathType |
python | astropy__astropy | astropy/modeling/projections.py | {
"start": 34545,
"end": 35847
} | class ____(Sky2PixProjection, Conic):
r"""
Conic orthomorphic projection - sky to pixel.
Corresponds to the ``COO`` projection in FITS WCS.
See `Conic` for a description of the entire equation.
The projection formulae are:
.. math::
C &= \frac{\ln \left( \frac{\cos\theta_2}{\cos\theta_1} \right)}
{\ln \left[ \frac{\tan\left(\frac{90^\circ-\theta_2}{2}\right)}
{\tan\left(\frac{90^\circ-\theta_1}{2}\right)} \right] } \\
R_\theta &= \psi \left[ \tan \left( \frac{90^\circ - \theta}{2} \right) \right]^C \\
Y_0 &= \psi \left[ \tan \left( \frac{90^\circ - \theta_a}{2} \right) \right]^C
where:
.. math::
\psi = \frac{180^\circ}{\pi} \frac{\cos \theta}
{C\left[\tan\left(\frac{90^\circ-\theta}{2}\right)\right]^C}
Parameters
----------
sigma : float
:math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 90.
delta : float
:math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 0.
"""
| Sky2Pix_ConicOrthomorphic |
python | dask__dask | dask/dataframe/dask_expr/_reductions.py | {
"start": 26284,
"end": 27965
} | class ____(Reduction):
_parameters = [
"frame",
"meta",
"chunk_kwargs",
"aggregate_kwargs",
"combine_kwargs",
"split_every",
"token",
]
@functools.cached_property
def _name(self):
name = self.operand("token") or funcname(type(self)).lower()
return name + "-" + self.deterministic_token
@classmethod
def chunk(cls, df, **kwargs):
func = kwargs.pop("func")
out = func(df, **kwargs)
# Return a dataframe so that the concatenated version is also a dataframe
return out.to_frame().T if is_series_like(out) else out
@classmethod
def combine(cls, inputs: list, **kwargs):
func = kwargs.pop("func")
df = _concat(inputs)
out = func(df, **kwargs)
# Return a dataframe so that the concatenated version is also a dataframe
return out.to_frame().T if is_series_like(out) else out
@classmethod
def aggregate(cls, inputs, **kwargs):
func = kwargs.pop("func")
df = _concat(inputs)
return func(df, **kwargs)
@functools.cached_property
def _meta(self):
if self.operand("meta") is not no_default:
return self.operand("meta")
return super()._meta
@property
def chunk_kwargs(self):
return self.operand("chunk_kwargs")
@property
def combine_kwargs(self):
return self.operand("combine_kwargs")
@property
def aggregate_kwargs(self):
return self.operand("aggregate_kwargs")
def _simplify_up(self, parent, dependents):
return
def _divisions(self):
return (None, None)
| CustomReduction |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/summary_ops/summary_v1_tensor_op_test.py | {
"start": 1098,
"end": 5943
} | class ____(test.TestCase):
def _SummarySingleValue(self, s):
summ = summary_pb2.Summary()
summ.ParseFromString(s)
self.assertEqual(len(summ.value), 1)
return summ.value[0]
def _AssertNumpyEq(self, actual, expected):
self.assertTrue(np.array_equal(actual, expected))
def testTags(self):
with self.cached_session() as sess:
c = constant_op.constant(1)
s1 = summary_lib.tensor_summary("s1", c)
with ops.name_scope("foo", skip_on_eager=False):
s2 = summary_lib.tensor_summary("s2", c)
with ops.name_scope("zod", skip_on_eager=False):
s3 = summary_lib.tensor_summary("s3", c)
s4 = summary_lib.tensor_summary("TensorSummary", c)
summ1, summ2, summ3, summ4 = self.evaluate([s1, s2, s3, s4])
v1 = self._SummarySingleValue(summ1)
self.assertEqual(v1.tag, "s1")
v2 = self._SummarySingleValue(summ2)
self.assertEqual(v2.tag, "foo/s2")
v3 = self._SummarySingleValue(summ3)
self.assertEqual(v3.tag, "foo/zod/s3")
v4 = self._SummarySingleValue(summ4)
self.assertEqual(v4.tag, "foo/zod/TensorSummary")
def testScalarSummary(self):
with self.cached_session() as sess:
const = constant_op.constant(10.0)
summ = summary_lib.tensor_summary("foo", const)
result = self.evaluate(summ)
value = self._SummarySingleValue(result)
n = tensor_util.MakeNdarray(value.tensor)
self._AssertNumpyEq(n, 10)
def testStringSummary(self):
s = b"foobar"
with self.cached_session() as sess:
const = constant_op.constant(s)
summ = summary_lib.tensor_summary("foo", const)
result = self.evaluate(summ)
value = self._SummarySingleValue(result)
n = tensor_util.MakeNdarray(value.tensor)
self._AssertNumpyEq(n, s)
def testManyScalarSummary(self):
with self.cached_session() as sess:
const = array_ops.ones([5, 5, 5])
summ = summary_lib.tensor_summary("foo", const)
result = self.evaluate(summ)
value = self._SummarySingleValue(result)
n = tensor_util.MakeNdarray(value.tensor)
self._AssertNumpyEq(n, np.ones([5, 5, 5]))
def testManyStringSummary(self):
strings = [[b"foo bar", b"baz"], [b"zoink", b"zod"]]
with self.cached_session() as sess:
const = constant_op.constant(strings)
summ = summary_lib.tensor_summary("foo", const)
result = self.evaluate(summ)
value = self._SummarySingleValue(result)
n = tensor_util.MakeNdarray(value.tensor)
self._AssertNumpyEq(n, strings)
def testManyBools(self):
bools = [True, True, True, False, False, False]
with self.cached_session() as sess:
const = constant_op.constant(bools)
summ = summary_lib.tensor_summary("foo", const)
result = self.evaluate(summ)
value = self._SummarySingleValue(result)
n = tensor_util.MakeNdarray(value.tensor)
self._AssertNumpyEq(n, bools)
def testSummaryDescriptionAndDisplayName(self):
with self.cached_session() as sess:
def get_description(summary_op):
summ_str = self.evaluate(summary_op)
summ = summary_pb2.Summary()
summ.ParseFromString(summ_str)
return summ.value[0].metadata
const = constant_op.constant(1)
# Default case; no description or display name
simple_summary = summary_lib.tensor_summary("simple", const)
descr = get_description(simple_summary)
self.assertEqual(descr.display_name, "")
self.assertEqual(descr.summary_description, "")
# Values are provided via function args
with_values = summary_lib.tensor_summary(
"simple",
const,
display_name="my name",
summary_description="my description")
descr = get_description(with_values)
self.assertEqual(descr.display_name, "my name")
self.assertEqual(descr.summary_description, "my description")
# Values are provided via the SummaryMetadata arg
metadata = summary_pb2.SummaryMetadata()
metadata.display_name = "my name"
metadata.summary_description = "my description"
with_metadata = summary_lib.tensor_summary(
"simple", const, summary_metadata=metadata)
descr = get_description(with_metadata)
self.assertEqual(descr.display_name, "my name")
self.assertEqual(descr.summary_description, "my description")
# If both SummaryMetadata and explicit args are provided, the args win
overwrite = summary_lib.tensor_summary(
"simple",
const,
summary_metadata=metadata,
display_name="overwritten",
summary_description="overwritten")
descr = get_description(overwrite)
self.assertEqual(descr.display_name, "overwritten")
self.assertEqual(descr.summary_description, "overwritten")
if __name__ == "__main__":
test.main()
| SummaryV1TensorOpTest |
python | numba__numba | numba/core/untyped_passes.py | {
"start": 72088,
"end": 73512
} | class ____(FunctionPass):
"""Perform SSA-reconstruction
Produces minimal SSA.
"""
_name = "reconstruct_ssa"
def __init__(self):
FunctionPass.__init__(self)
def run_pass(self, state):
state.func_ir = reconstruct_ssa(state.func_ir)
self._patch_locals(state)
# Rebuild definitions
state.func_ir._definitions = build_definitions(state.func_ir.blocks)
# Rerun postprocessor to update metadata
# example generator_info
post_proc = postproc.PostProcessor(state.func_ir)
post_proc.run(emit_dels=False)
if config.DEBUG or config.DUMP_SSA:
name = state.func_ir.func_id.func_qualname
print(f"SSA IR DUMP: {name}".center(80, "-"))
state.func_ir.dump()
return True # XXX detect if it actually got changed
def _patch_locals(self, state):
# Fix dispatcher locals dictionary type annotation
locals_dict = state.get('locals')
if locals_dict is None:
return
first_blk, *_ = state.func_ir.blocks.values()
scope = first_blk.scope
for parent, redefs in scope.var_redefinitions.items():
if parent in locals_dict:
typ = locals_dict[parent]
for derived in redefs:
locals_dict[derived] = typ
@register_pass(mutates_CFG=False, analysis_only=False)
| ReconstructSSA |
python | pallets__flask | src/flask/cli.py | {
"start": 26527,
"end": 29098
} | class ____(click.ParamType):
"""Click option type for the ``--cert`` option. Allows either an
existing file, the string ``'adhoc'``, or an import for a
:class:`~ssl.SSLContext` object.
"""
name = "path"
def __init__(self) -> None:
self.path_type = click.Path(exists=True, dir_okay=False, resolve_path=True)
def convert(
self, value: t.Any, param: click.Parameter | None, ctx: click.Context | None
) -> t.Any:
try:
import ssl
except ImportError:
raise click.BadParameter(
'Using "--cert" requires Python to be compiled with SSL support.',
ctx,
param,
) from None
try:
return self.path_type(value, param, ctx)
except click.BadParameter:
value = click.STRING(value, param, ctx).lower()
if value == "adhoc":
try:
import cryptography # noqa: F401
except ImportError:
raise click.BadParameter(
"Using ad-hoc certificates requires the cryptography library.",
ctx,
param,
) from None
return value
obj = import_string(value, silent=True)
if isinstance(obj, ssl.SSLContext):
return obj
raise
def _validate_key(ctx: click.Context, param: click.Parameter, value: t.Any) -> t.Any:
"""The ``--key`` option must be specified when ``--cert`` is a file.
Modifies the ``cert`` param to be a ``(cert, key)`` pair if needed.
"""
cert = ctx.params.get("cert")
is_adhoc = cert == "adhoc"
try:
import ssl
except ImportError:
is_context = False
else:
is_context = isinstance(cert, ssl.SSLContext)
if value is not None:
if is_adhoc:
raise click.BadParameter(
'When "--cert" is "adhoc", "--key" is not used.', ctx, param
)
if is_context:
raise click.BadParameter(
'When "--cert" is an SSLContext object, "--key" is not used.',
ctx,
param,
)
if not cert:
raise click.BadParameter('"--cert" must also be specified.', ctx, param)
ctx.params["cert"] = cert, value
else:
if cert and not (is_adhoc or is_context):
raise click.BadParameter('Required when using "--cert".', ctx, param)
return value
| CertParamType |
python | mkdocstrings__mkdocstrings | src/mkdocstrings/_internal/handlers/rendering.py | {
"start": 5078,
"end": 7298
} | class ____(Treeprocessor):
"""Prepend the configured prefix to IDs of all HTML elements."""
name: str = "mkdocstrings_ids"
"""The name of the treeprocessor."""
id_prefix: str
"""The prefix to add to every ID. It is prepended without any separator; specify your own separator if needed."""
def __init__(self, md: Markdown, id_prefix: str):
"""Initialize the object.
Arguments:
md: A `markdown.Markdown` instance.
id_prefix: The prefix to add to every ID. It is prepended without any separator.
"""
super().__init__(md)
self.id_prefix = id_prefix
def run(self, root: Element) -> None:
"""Prepend the configured prefix to all IDs in the document."""
if self.id_prefix:
self._prefix_ids(root)
def _prefix_ids(self, root: Element) -> None:
index = len(root)
for el in reversed(root): # Reversed mainly for the ability to mutate during iteration.
index -= 1
self._prefix_ids(el)
href_attr = el.get("href")
if id_attr := el.get("id"):
if el.tag == "a" and not href_attr:
# An anchor with id and no href is used by autorefs:
# leave it untouched and insert a copy with updated id after it.
new_el = copy.deepcopy(el)
new_el.set("id", self.id_prefix + id_attr)
root.insert(index + 1, new_el)
else:
# Anchors with id and href are not used by autorefs:
# update in place.
el.set("id", self.id_prefix + id_attr)
# Always update hrefs, names and labels-for:
# there will always be a corresponding id.
if href_attr and href_attr.startswith("#"):
el.set("href", "#" + self.id_prefix + href_attr[1:])
if name_attr := el.get("name"):
el.set("name", self.id_prefix + name_attr)
if el.tag == "label":
for_attr = el.get("for")
if for_attr:
el.set("for", self.id_prefix + for_attr)
| IdPrependingTreeprocessor |
python | bottlepy__bottle | bottle.py | {
"start": 127198,
"end": 131509
} | class ____:
def __init__(self, buffer_size=2 ** 16, memfile_limit=2 ** 18, charset="latin1"):
self.headerlist = []
self.headers = None
self.file = False
self.size = 0
self._buf = b""
self.disposition = None
self.name = None
self.filename = None
self.content_type = None
self.charset = charset
self.memfile_limit = memfile_limit
self.buffer_size = buffer_size
def feed(self, line, nl=""):
if self.file:
return self.write_body(line, nl)
return self.write_header(line, nl)
def write_header(self, line, nl):
line = str(line, self.charset)
if not nl:
raise MultipartError("Unexpected end of line in header.")
if not line.strip(): # blank line -> end of header segment
self.finish_header()
elif line[0] in " \t" and self.headerlist:
name, value = self.headerlist.pop()
self.headerlist.append((name, value + line.strip()))
else:
if ":" not in line:
raise MultipartError("Syntax error in header: No colon.")
name, value = line.split(":", 1)
self.headerlist.append((name.strip(), value.strip()))
def write_body(self, line, nl):
if not line and not nl:
return # This does not even flush the buffer
self.size += len(line) + len(self._buf)
self.file.write(self._buf + line)
self._buf = nl
if self.content_length > 0 and self.size > self.content_length:
raise MultipartError("Size of body exceeds Content-Length header.")
if self.size > self.memfile_limit and isinstance(self.file, BytesIO):
self.file, old = NamedTemporaryFile(mode="w+b"), self.file
old.seek(0)
copied, maxcopy, chunksize = 0, self.size, self.buffer_size
read, write = old.read, self.file.write
while copied < maxcopy:
chunk = read(min(chunksize, maxcopy - copied))
write(chunk)
copied += len(chunk)
def finish_header(self):
self.file = BytesIO()
self.headers = HeaderDict(self.headerlist)
content_disposition = self.headers.get("Content-Disposition")
content_type = self.headers.get("Content-Type")
if not content_disposition:
raise MultipartError("Content-Disposition header is missing.")
self.disposition, self.options = _parse_http_header(content_disposition)[0]
self.name = self.options.get("name")
if "filename" in self.options:
self.filename = self.options.get("filename")
if self.filename[1:3] == ":\\" or self.filename[:2] == "\\\\":
self.filename = self.filename.split("\\")[-1] # ie6 bug
self.content_type, options = _parse_http_header(content_type)[0] if content_type else (None, {})
self.charset = options.get("charset") or self.charset
self.content_length = int(self.headers.get("Content-Length", "-1"))
def finish(self):
if not self.file:
raise MultipartError("Incomplete part: Header section not closed.")
self.file.seek(0)
def is_buffered(self):
""" Return true if the data is fully buffered in memory."""
return isinstance(self.file, BytesIO)
@property
def value(self):
""" Data decoded with the specified charset """
return str(self.raw, self.charset)
@property
def raw(self):
""" Data without decoding """
pos = self.file.tell()
self.file.seek(0)
try:
return self.file.read()
finally:
self.file.seek(pos)
def close(self):
if self.file:
self.file.close()
self.file = False
###############################################################################
# Server Adapter ###############################################################
###############################################################################
# Before you edit or add a server adapter, please read:
# - https://github.com/bottlepy/bottle/pull/647#issuecomment-60152870
# - https://github.com/bottlepy/bottle/pull/865#issuecomment-242795341
| _MultipartPart |
python | keras-team__keras | keras/src/backend/openvino/export.py | {
"start": 0,
"end": 360
} | class ____:
def track(self, resource):
raise NotImplementedError(
"`track` is not implemented in the openvino backend."
)
def add_endpoint(self, name, fn, input_signature=None, **kwargs):
raise NotImplementedError(
"`add_endpoint` is not implemented in the openvino backend."
)
| OpenvinoExportArchive |
python | kamyu104__LeetCode-Solutions | Python/maximum-of-minimum-values-in-all-subarrays.py | {
"start": 29,
"end": 898
} | class ____(object):
def findMaximums(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
def find_bound(nums, direction, init):
result = [0]*len(nums)
stk = [init]
for i in direction(xrange(len(nums))):
while stk[-1] != init and nums[stk[-1]] >= nums[i]:
stk.pop()
result[i] = stk[-1]
stk.append(i)
return result
left = find_bound(nums, lambda x: x, -1)
right = find_bound(nums, reversed, len(nums))
result = [-1]*len(nums)
for i, v in enumerate(nums):
result[((right[i]-1)-left[i])-1] = max(result[((right[i]-1)-left[i])-1], v)
for i in reversed(xrange(len(nums)-1)):
result[i] = max(result[i], result[i+1])
return result
| Solution |
python | jazzband__django-simple-history | simple_history/tests/models.py | {
"start": 20444,
"end": 20836
} | class ____(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
history = HistoricalRecords()
# Clear the SIMPLE_HISTORY_HISTORY_ID_USE_UUID
delattr(settings, "SIMPLE_HISTORY_HISTORY_ID_USE_UUID")
# Set the SIMPLE_HISTORY_HISTORY_CHANGE_REASON_FIELD
setattr(settings, "SIMPLE_HISTORY_HISTORY_CHANGE_REASON_USE_TEXT_FIELD", True)
| UUIDDefaultModel |
python | dask__distributed | distributed/protocol/tests/test_pickle.py | {
"start": 493,
"end": 6392
} | class ____:
def __init__(self, mv):
self.mv = memoryview(mv)
def __reduce_ex__(self, protocol):
if protocol >= 5:
return MemoryviewHolder, (pickle.PickleBuffer(self.mv),)
else:
return MemoryviewHolder, (self.mv.tobytes(),)
@pytest.mark.parametrize("protocol", range(4, HIGHEST_PROTOCOL + 1))
def test_pickle_data(protocol):
context = {"pickle-protocol": protocol}
data = [1, b"123", "123", [123], {}, set()]
for d in data:
assert loads(dumps(d, protocol=protocol)) == d
assert deserialize(*serialize(d, serializers=("pickle",), context=context)) == d
@pytest.mark.parametrize("protocol", range(4, HIGHEST_PROTOCOL + 1))
def test_pickle_out_of_band(protocol):
context = {"pickle-protocol": protocol}
mv = memoryview(b"123")
mvh = MemoryviewHolder(mv)
if protocol >= 5:
l = []
d = dumps(mvh, protocol=protocol, buffer_callback=l.append)
mvh2 = loads(d, buffers=l)
assert len(l) == 1
assert isinstance(l[0], pickle.PickleBuffer)
assert memoryview(l[0]) == mv
else:
mvh2 = loads(dumps(mvh, protocol=protocol))
assert isinstance(mvh2, MemoryviewHolder)
assert isinstance(mvh2.mv, memoryview)
assert mvh2.mv == mv
h, f = serialize(mvh, serializers=("pickle",), context=context)
mvh3 = deserialize(h, f)
assert isinstance(mvh3, MemoryviewHolder)
assert isinstance(mvh3.mv, memoryview)
assert mvh3.mv == mv
if protocol >= 5:
assert len(f) == 2
assert isinstance(f[0], bytes)
assert isinstance(f[1], memoryview)
assert f[1] == mv
else:
assert len(f) == 1
assert isinstance(f[0], bytes)
@pytest.mark.parametrize("protocol", range(4, HIGHEST_PROTOCOL + 1))
def test_pickle_empty(protocol):
context = {"pickle-protocol": protocol}
x = MemoryviewHolder(bytearray()) # Empty view
header, frames = serialize(x, serializers=("pickle",), context=context)
assert header["serializer"] == "pickle"
assert len(frames) >= 1
assert isinstance(frames[0], bytes)
if protocol >= 5:
assert len(frames) == 2
assert len(header["writeable"]) == 1
header["writeable"] = (False,) * len(frames)
else:
assert len(frames) == 1
assert len(header["writeable"]) == 0
y = deserialize(header, frames)
assert isinstance(y, MemoryviewHolder)
assert isinstance(y.mv, memoryview)
assert y.mv == x.mv
assert y.mv.nbytes == 0
assert y.mv.readonly
@pytest.mark.parametrize("protocol", range(4, HIGHEST_PROTOCOL + 1))
def test_pickle_numpy(protocol):
np = pytest.importorskip("numpy")
context = {"pickle-protocol": protocol}
x = np.ones(5)
assert (loads(dumps(x, protocol=protocol)) == x).all()
assert (
deserialize(*serialize(x, serializers=("pickle",), context=context)) == x
).all()
x = np.ones(5000)
assert (loads(dumps(x, protocol=protocol)) == x).all()
assert (
deserialize(*serialize(x, serializers=("pickle",), context=context)) == x
).all()
x = np.array([np.arange(3), np.arange(4, 6)], dtype=object)
x2 = loads(dumps(x, protocol=protocol))
assert x.shape == x2.shape
assert x.dtype == x2.dtype
assert x.strides == x2.strides
for e_x, e_x2 in zip(x.flat, x2.flat):
np.testing.assert_equal(e_x, e_x2)
h, f = serialize(x, serializers=("pickle",), context=context)
if protocol >= 5:
assert len(f) == 3
else:
assert len(f) == 1
x3 = deserialize(h, f)
assert x.shape == x3.shape
assert x.dtype == x3.dtype
assert x.strides == x3.strides
for e_x, e_x3 in zip(x.flat, x3.flat):
np.testing.assert_equal(e_x, e_x3)
if protocol >= 5:
x = np.ones(5000)
l = []
d = dumps(x, protocol=protocol, buffer_callback=l.append)
assert len(l) == 1
assert isinstance(l[0], pickle.PickleBuffer)
assert memoryview(l[0]) == memoryview(x)
assert (loads(d, buffers=l) == x).all()
h, f = serialize(x, serializers=("pickle",), context=context)
assert len(f) == 2
assert isinstance(f[0], bytes)
assert isinstance(f[1], memoryview)
assert (deserialize(h, f) == x).all()
@pytest.mark.parametrize("protocol", range(4, HIGHEST_PROTOCOL + 1))
def test_pickle_functions(protocol):
context = {"pickle-protocol": protocol}
def make_closure():
value = 1
def f(x): # closure
return x + value
return f
def funcs():
yield make_closure()
yield (lambda x: x + 1)
yield partial(add, 1)
for func in funcs():
wr = weakref.ref(func)
func2 = loads(dumps(func, protocol=protocol))
wr2 = weakref.ref(func2)
assert func2(1) == func(1)
func3 = deserialize(*serialize(func, serializers=("pickle",), context=context))
wr3 = weakref.ref(func3)
assert func3(1) == func(1)
del func, func2, func3
with profile.lock:
assert wr() is None
assert wr2() is None
assert wr3() is None
def test_pickle_by_value_when_registered():
with save_sys_modules():
with tmpdir() as d:
try:
sys.path.insert(0, d)
module = f"{d}/mymodule.py"
with open(module, "w") as f:
f.write("def myfunc(x):\n return x + 1")
import mymodule
assert dumps(mymodule.myfunc) == pickle.dumps(
mymodule.myfunc, protocol=HIGHEST_PROTOCOL
)
cloudpickle.register_pickle_by_value(mymodule)
assert len(dumps(mymodule.myfunc)) > len(pickle.dumps(mymodule.myfunc))
finally:
sys.path.pop(0)
| MemoryviewHolder |
python | crytic__slither | slither/vyper_parsing/ast/types.py | {
"start": 328,
"end": 423
} | class ____(ASTNode):
module: str
name: str
alias: Optional[str]
@dataclass
| ImportFrom |
python | getsentry__sentry | tests/sentry/integrations/slack/test_integration.py | {
"start": 1834,
"end": 9980
} | class ____(IntegrationTestCase):
provider = SlackIntegrationProvider
def setUp(self) -> None:
super().setUp()
def assert_setup_flow(
self,
team_id="TXXXXXXX1",
authorizing_user_id="UXXXXXXX1",
expected_client_id="slack-client-id",
expected_client_secret="slack-client-secret",
customer_domain=None,
):
responses.reset()
kwargs = {}
if customer_domain:
kwargs["HTTP_HOST"] = customer_domain
resp = self.client.get(self.init_path, **kwargs)
assert resp.status_code == 302
redirect = urlparse(resp["Location"])
assert redirect.scheme == "https"
assert redirect.netloc == "slack.com"
assert redirect.path == "/oauth/v2/authorize"
params = parse_qs(redirect.query)
scopes = self.provider.identity_oauth_scopes
assert params["scope"] == [" ".join(scopes)]
assert params["state"]
assert params["redirect_uri"] == ["http://testserver/extensions/slack/setup/"]
assert params["response_type"] == ["code"]
assert params["client_id"] == [expected_client_id]
assert params.get("user_scope") == [" ".join(self.provider.user_scopes)]
# once we've asserted on it, switch to a singular values to make life
# easier
authorize_params = {k: v[0] for k, v in params.items()}
access_json = {
"ok": True,
"access_token": "xoxb-xxxxxxxxx-xxxxxxxxxx-xxxxxxxxxxxx",
"team": {"id": team_id, "name": "Example"},
"authed_user": {"id": authorizing_user_id},
}
responses.add(responses.POST, "https://slack.com/api/oauth.v2.access", json=access_json)
response_json = {
"ok": True,
"members": [
{
"id": authorizing_user_id,
"team_id": team_id,
"deleted": False,
"profile": {
"email": self.user.email,
"team": team_id,
},
},
],
"response_metadata": {"next_cursor": ""},
}
with patch(
"slack_sdk.web.client.WebClient.users_list",
return_value=SlackResponse(
client=None,
http_verb="GET",
api_url="https://slack.com/api/users.list",
req_args={},
data=response_json,
headers={},
status_code=200,
),
) as self.mock_post:
resp = self.client.get(
"{}?{}".format(
self.setup_path,
urlencode({"code": "oauth-code", "state": authorize_params["state"]}),
)
)
if customer_domain:
assert resp.status_code == 302
assert resp["Location"].startswith(
f"http://{customer_domain}/extensions/slack/setup/"
)
resp = self.client.get(resp["Location"], **kwargs)
mock_request = responses.calls[0].request
req_params = parse_qs(mock_request.body)
assert req_params["grant_type"] == ["authorization_code"]
assert req_params["code"] == ["oauth-code"]
assert req_params["redirect_uri"] == ["http://testserver/extensions/slack/setup/"]
assert req_params["client_id"] == [expected_client_id]
assert req_params["client_secret"] == [expected_client_secret]
assert resp.status_code == 200
self.assertDialogSuccess(resp)
@responses.activate
def test_bot_flow(self, mock_api_call: MagicMock) -> None:
with self.tasks():
self.assert_setup_flow()
integration = Integration.objects.get(provider=self.provider.key)
assert integration.external_id == "TXXXXXXX1"
assert integration.name == "Example"
assert integration.metadata == {
"access_token": "xoxb-xxxxxxxxx-xxxxxxxxxx-xxxxxxxxxxxx",
"scopes": sorted(self.provider.identity_oauth_scopes),
"icon": "http://example.com/ws_icon.jpg",
"domain_name": "test-slack-workspace.slack.com",
"installation_type": "born_as_bot",
}
oi = OrganizationIntegration.objects.get(
integration=integration, organization_id=self.organization.id
)
assert oi.config == {}
idp = IdentityProvider.objects.get(type="slack", external_id="TXXXXXXX1")
identity = Identity.objects.get(idp=idp, user=self.user, external_id="UXXXXXXX1")
assert identity.status == IdentityStatus.VALID
audit_entry = AuditLogEntry.objects.get(event=audit_log.get_event_id("INTEGRATION_ADD"))
audit_log_event = audit_log.get(audit_entry.event)
assert audit_log_event.render(audit_entry) == "installed Example for the slack integration"
@responses.activate
def test_bot_flow_customer_domains(self, mock_api_call: MagicMock) -> None:
with self.tasks():
self.assert_setup_flow(customer_domain=f"{self.organization.slug}.testserver")
integration = Integration.objects.get(provider=self.provider.key)
assert integration.external_id == "TXXXXXXX1"
assert integration.name == "Example"
assert integration.metadata == {
"access_token": "xoxb-xxxxxxxxx-xxxxxxxxxx-xxxxxxxxxxxx",
"scopes": sorted(self.provider.identity_oauth_scopes),
"icon": "http://example.com/ws_icon.jpg",
"domain_name": "test-slack-workspace.slack.com",
"installation_type": "born_as_bot",
}
oi = OrganizationIntegration.objects.get(
integration=integration, organization_id=self.organization.id
)
assert oi.config == {}
idp = IdentityProvider.objects.get(type="slack", external_id="TXXXXXXX1")
identity = Identity.objects.get(idp=idp, user=self.user, external_id="UXXXXXXX1")
assert identity.status == IdentityStatus.VALID
audit_entry = AuditLogEntry.objects.get(event=audit_log.get_event_id("INTEGRATION_ADD"))
audit_log_event = audit_log.get(audit_entry.event)
assert audit_log_event.render(audit_entry) == "installed Example for the slack integration"
@responses.activate
def test_multiple_integrations(self, mock_api_call: MagicMock) -> None:
with self.tasks():
self.assert_setup_flow()
with self.tasks():
self.assert_setup_flow(team_id="TXXXXXXX2", authorizing_user_id="UXXXXXXX2")
integrations = Integration.objects.filter(provider=self.provider.key).order_by(
"external_id"
)
assert integrations.count() == 2
assert integrations[0].external_id == "TXXXXXXX1"
assert integrations[1].external_id == "TXXXXXXX2"
oi = OrganizationIntegration.objects.get(
integration=integrations[1], organization_id=self.organization.id
)
assert oi.config == {}
idps = IdentityProvider.objects.filter(type="slack")
assert idps.count() == 2
identities = Identity.objects.all()
assert identities.count() == 2
assert identities[0].external_id != identities[1].external_id
assert identities[0].idp != identities[1].idp
@responses.activate
def test_reassign_user(self, mock_api_call: MagicMock) -> None:
"""Test that when you install and then later re-install and the user who installs it
has a different external ID, their Identity is updated to reflect that
"""
with self.tasks():
self.assert_setup_flow()
identity = Identity.objects.get()
assert identity.external_id == "UXXXXXXX1"
with self.tasks():
self.assert_setup_flow(authorizing_user_id="UXXXXXXX2")
identity = Identity.objects.get()
assert identity.external_id == "UXXXXXXX2"
@patch("slack_sdk.web.client.WebClient._perform_urllib_http_request")
@control_silo_test
| SlackIntegrationTest |
python | python__mypy | mypy/types.py | {
"start": 20486,
"end": 21960
} | class ____(ProperType):
__slots__ = ("name", "fullname", "id", "upper_bound", "default")
name: str # Name (may be qualified)
fullname: str # Fully qualified name
id: TypeVarId
upper_bound: Type
default: Type
def __init__(
self,
name: str,
fullname: str,
id: TypeVarId,
upper_bound: Type,
default: Type,
line: int = -1,
column: int = -1,
) -> None:
super().__init__(line, column)
self.name = name
self.fullname = fullname
self.id = id
self.upper_bound = upper_bound
self.default = default
def serialize(self) -> JsonDict:
raise NotImplementedError
@classmethod
def deserialize(cls, data: JsonDict) -> TypeVarLikeType:
raise NotImplementedError
def copy_modified(self, *, id: TypeVarId, **kwargs: Any) -> Self:
raise NotImplementedError
@classmethod
def new_unification_variable(cls, old: Self) -> Self:
new_id = TypeVarId.new(meta_level=1)
return old.copy_modified(id=new_id)
def has_default(self) -> bool:
t = get_proper_type(self.default)
return not (isinstance(t, AnyType) and t.type_of_any == TypeOfAny.from_omitted_generics)
def values_or_bound(self) -> ProperType:
if isinstance(self, TypeVarType) and self.values:
return UnionType(self.values)
return get_proper_type(self.upper_bound)
| TypeVarLikeType |
python | davidhalter__parso | parso/python/tree.py | {
"start": 8979,
"end": 9084
} | class ____(_LeafWithoutNewlines, _StringComparisonMixin):
type = 'operator'
__slots__ = ()
| Operator |
python | keras-team__keras | keras/src/saving/serialization_lib_test.py | {
"start": 15499,
"end": 16540
} | class ____(keras.layers.Layer):
def __init__(
self,
units,
*,
kernel_regularizer=None,
kernel_initializer=None,
**kwargs,
):
super().__init__(**kwargs)
self._units = units
self._kernel_regularizer = kernel_regularizer
self._kernel_initializer = kernel_initializer
def get_config(self):
return dict(
units=self._units,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
**super().get_config(),
)
def build(self, input_shape):
_, input_units = input_shape
self._kernel = self.add_weight(
name="kernel",
shape=[input_units, self._units],
dtype="float32",
regularizer=self._kernel_regularizer,
initializer=self._kernel_initializer,
)
def call(self, inputs):
return ops.matmul(inputs, self._kernel)
@keras.saving.register_keras_serializable()
| MyDense |
python | numpy__numpy | numpy/f2py/tests/test_crackfortran.py | {
"start": 2553,
"end": 3848
} | class ____:
def test_moduleOperators(self, tmp_path):
fpath = util.getpath("tests", "src", "crackfortran", "operators.f90")
mod = crackfortran.crackfortran([str(fpath)])
assert len(mod) == 1
mod = mod[0]
assert "body" in mod and len(mod["body"]) == 9
assert mod["body"][1]["name"] == "operator(.item.)"
assert "implementedby" in mod["body"][1]
assert mod["body"][1]["implementedby"] == \
["item_int", "item_real"]
assert mod["body"][2]["name"] == "operator(==)"
assert "implementedby" in mod["body"][2]
assert mod["body"][2]["implementedby"] == ["items_are_equal"]
assert mod["body"][3]["name"] == "assignment(=)"
assert "implementedby" in mod["body"][3]
assert mod["body"][3]["implementedby"] == \
["get_int", "get_real"]
def test_notPublicPrivate(self, tmp_path):
fpath = util.getpath("tests", "src", "crackfortran", "pubprivmod.f90")
mod = crackfortran.crackfortran([str(fpath)])
assert len(mod) == 1
mod = mod[0]
assert mod['vars']['a']['attrspec'] == ['private', ]
assert mod['vars']['b']['attrspec'] == ['public', ]
assert mod['vars']['seta']['attrspec'] == ['public', ]
| TestModuleProcedure |
python | ethereum__web3.py | web3/exceptions.py | {
"start": 7495,
"end": 7622
} | class ____(Web3Exception):
"""
Raised when a persistent connection encounters an error.
"""
| PersistentConnectionError |
python | pypa__setuptools | setuptools/_vendor/wheel/vendored/packaging/markers.py | {
"start": 1009,
"end": 5790
} | class ____(ValueError):
"""
A name was attempted to be used that does not exist inside of the
environment.
"""
def _normalize_extra_values(results: Any) -> Any:
"""
Normalize extra values.
"""
if isinstance(results[0], tuple):
lhs, op, rhs = results[0]
if isinstance(lhs, Variable) and lhs.value == "extra":
normalized_extra = canonicalize_name(rhs.value)
rhs = Value(normalized_extra)
elif isinstance(rhs, Variable) and rhs.value == "extra":
normalized_extra = canonicalize_name(lhs.value)
lhs = Value(normalized_extra)
results[0] = lhs, op, rhs
return results
def _format_marker(
marker: Union[List[str], MarkerAtom, str], first: Optional[bool] = True
) -> str:
assert isinstance(marker, (list, tuple, str))
# Sometimes we have a structure like [[...]] which is a single item list
# where the single item is itself it's own list. In that case we want skip
# the rest of this function so that we don't get extraneous () on the
# outside.
if (
isinstance(marker, list)
and len(marker) == 1
and isinstance(marker[0], (list, tuple))
):
return _format_marker(marker[0])
if isinstance(marker, list):
inner = (_format_marker(m, first=False) for m in marker)
if first:
return " ".join(inner)
else:
return "(" + " ".join(inner) + ")"
elif isinstance(marker, tuple):
return " ".join([m.serialize() for m in marker])
else:
return marker
_operators: Dict[str, Operator] = {
"in": lambda lhs, rhs: lhs in rhs,
"not in": lambda lhs, rhs: lhs not in rhs,
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
}
def _eval_op(lhs: str, op: Op, rhs: str) -> bool:
try:
spec = Specifier("".join([op.serialize(), rhs]))
except InvalidSpecifier:
pass
else:
return spec.contains(lhs, prereleases=True)
oper: Optional[Operator] = _operators.get(op.serialize())
if oper is None:
raise UndefinedComparison(f"Undefined {op!r} on {lhs!r} and {rhs!r}.")
return oper(lhs, rhs)
def _normalize(*values: str, key: str) -> Tuple[str, ...]:
# PEP 685 – Comparison of extra names for optional distribution dependencies
# https://peps.python.org/pep-0685/
# > When comparing extra names, tools MUST normalize the names being
# > compared using the semantics outlined in PEP 503 for names
if key == "extra":
return tuple(canonicalize_name(v) for v in values)
# other environment markers don't have such standards
return values
def _evaluate_markers(markers: MarkerList, environment: Dict[str, str]) -> bool:
groups: List[List[bool]] = [[]]
for marker in markers:
assert isinstance(marker, (list, tuple, str))
if isinstance(marker, list):
groups[-1].append(_evaluate_markers(marker, environment))
elif isinstance(marker, tuple):
lhs, op, rhs = marker
if isinstance(lhs, Variable):
environment_key = lhs.value
lhs_value = environment[environment_key]
rhs_value = rhs.value
else:
lhs_value = lhs.value
environment_key = rhs.value
rhs_value = environment[environment_key]
lhs_value, rhs_value = _normalize(lhs_value, rhs_value, key=environment_key)
groups[-1].append(_eval_op(lhs_value, op, rhs_value))
else:
assert marker in ["and", "or"]
if marker == "or":
groups.append([])
return any(all(item) for item in groups)
def format_full_version(info: "sys._version_info") -> str:
version = "{0.major}.{0.minor}.{0.micro}".format(info)
kind = info.releaselevel
if kind != "final":
version += kind[0] + str(info.serial)
return version
def default_environment() -> Dict[str, str]:
iver = format_full_version(sys.implementation.version)
implementation_name = sys.implementation.name
return {
"implementation_name": implementation_name,
"implementation_version": iver,
"os_name": os.name,
"platform_machine": platform.machine(),
"platform_release": platform.release(),
"platform_system": platform.system(),
"platform_version": platform.version(),
"python_full_version": platform.python_version(),
"platform_python_implementation": platform.python_implementation(),
"python_version": ".".join(platform.python_version_tuple()[:2]),
"sys_platform": sys.platform,
}
| UndefinedEnvironmentName |
python | pytorch__pytorch | test/dynamo/cpython/3_13/typinganndata/_typed_dict_helper.py | {
"start": 745,
"end": 859
} | class ____(TypedDict, total=False):
a: Annotated[Annotated[Annotated[Required[int], "a"], "b"], "c"]
| VeryAnnotated |
python | tensorflow__tensorflow | tensorflow/python/debug/lib/debug_data.py | {
"start": 14200,
"end": 59900
} | class ____:
"""Data set from a debug-dump directory on filesystem.
An instance of `DebugDumpDir` contains all `DebugTensorDatum` instances
in a tfdbg dump root directory.
"""
def __init__(self, dump_root, partition_graphs=None, validate=True):
"""`DebugDumpDir` constructor.
Args:
dump_root: (`str`) path to the dump root directory.
partition_graphs: A repeated field of GraphDefs representing the
partition graphs executed by the TensorFlow runtime.
validate: (`bool`) whether the dump files are to be validated against the
partition graphs.
Raises:
IOError: If dump_root does not exist as a directory.
ValueError: If more than one core metadata file is found under the dump
root directory.
"""
if not gfile.IsDirectory(dump_root):
raise IOError("Dump root directory %s does not exist" % dump_root)
self._core_metadata = []
# Find the list of devices.
self._dump_root = dump_root
self._load_core_metadata()
self._load_fetches_info()
self._load_feeds_info()
self._load_all_device_dumps(partition_graphs, validate)
self._python_graph = None
def _load_all_device_dumps(self, partition_graphs, validate):
"""Load the dump data for all devices."""
device_dirs = _glob(os.path.join(
self._dump_root, METADATA_FILE_PREFIX + DEVICE_TAG + "*"))
self._device_names = []
self._t0s = {}
self._dump_tensor_data = {}
self._dump_graph_file_paths = {}
self._debug_watches = {}
self._watch_key_to_devices = {}
self._watch_key_to_datum = {}
self._watch_key_to_rel_time = {}
self._watch_key_to_dump_size_bytes = {}
for device_dir in device_dirs:
device_name = device_path_to_device_name(device_dir)
self._device_names.append(device_name)
self._load_device_dumps(device_name, device_dir)
self._load_partition_graphs(partition_graphs, validate)
self._calculate_t0()
for device_name in self._device_names:
self._create_tensor_watch_maps(device_name)
def _load_device_dumps(self, device_name, device_root):
"""Load `DebugTensorDatum` instances from the dump root of a given device.
Populates a map {device_name: a list of `DebugTensorDatum`}, where the list
is sorted by ascending timestamp.
This sorting order reflects the order in which the TensorFlow executor
processed the nodes of the graph. It is (one of many possible) topological
sort of the nodes. This is useful for displaying tensors in the debugger
frontend as well as for the use case in which the user wants to find a
"culprit tensor", i.e., the first tensor in the graph that exhibits certain
problematic properties, i.e., all zero values, or bad numerical values such
as nan and inf.
In addition, creates a map from node name to debug watches. In this Map,
the key is the watched node name; the value is a dictionary.
Of this dictionary, the key is the watched_output_slot.
This method attempts to load the debug watches from the tensor dump files
first, before loading the full set of debug watches from the partition
graphs as done later. This is necessary because sometimes the partition
graphs may not be available, e.g., when the run errors out.
Args:
device_name: (`str`) name of the device.
device_root: (`str`) dump root directory of the given device.
Raises:
ValueError: If GraphDef for the device is not available.
"""
self._dump_tensor_data[device_name] = []
self._debug_watches[device_name] = collections.defaultdict(
lambda: collections.defaultdict(set))
for root, _, files in gfile.Walk(device_root):
for f in files:
if _is_graph_file(f):
self._dump_graph_file_paths[device_name] = os.path.join(root, f)
else:
datum = self._dump_file_name_to_datum(root, f)
self._dump_tensor_data[device_name].append(datum)
self._debug_watches[device_name][datum.node_name][
datum.output_slot].add(datum.debug_op)
self._dump_tensor_data[device_name] = sorted(
self._dump_tensor_data[device_name],
key=lambda x: x.extended_timestamp)
if self._dump_tensor_data[device_name]:
self._t0s[device_name] = self._dump_tensor_data[device_name][0].timestamp
else:
self._t0s[device_name] = None
def _calculate_t0(self):
"""Calculate the first timestamp across all devices."""
t0s = [t0 for t0 in self._t0s.values() if t0 is not None]
self._t0 = min(t0s) if t0s else None
def _load_core_metadata(self):
core_metadata_files = _glob(os.path.join(
self._dump_root, METADATA_FILE_PREFIX + CORE_METADATA_TAG + "*"))
for core_metadata_file in core_metadata_files:
with gfile.Open(core_metadata_file, "rb") as f:
event = event_pb2.Event()
event.ParseFromString(f.read())
self._core_metadata.append(
extract_core_metadata_from_event_proto(event))
def _load_fetches_info(self):
fetches_info_files = _glob(os.path.join(
self._dump_root, METADATA_FILE_PREFIX + FETCHES_INFO_FILE_TAG + "*"))
self._run_fetches_info = []
for fetches_info_file in fetches_info_files:
self._run_fetches_info.append(
_load_log_message_from_event_file(fetches_info_file))
def _load_feeds_info(self):
feeds_info_files = _glob(os.path.join(
self._dump_root, METADATA_FILE_PREFIX + FEED_KEYS_INFO_FILE_TAG + "*"))
self._run_feed_keys_info = []
for feeds_info_file in feeds_info_files:
self._run_feed_keys_info.append(
_load_log_message_from_event_file(feeds_info_file))
def _dump_file_name_to_datum(self, dir_name, file_name):
"""Obtain a DebugTensorDatum from the directory and file name.
Args:
dir_name: (`str`) Name of the directory in which the dump file resides.
file_name: (`str`) Base name of the dump file.
Returns:
(`DebugTensorDatum`) The `DebugTensorDatum` loaded from the dump file.
"""
# Calculate the relative path of the dump file with respect to the root.
debug_dump_rel_path = os.path.join(
os.path.relpath(dir_name, self._dump_root), file_name)
return DebugTensorDatum(self._dump_root, debug_dump_rel_path)
def _create_tensor_watch_maps(self, device_name):
"""Create maps from tensor watch keys to datum and to timestamps.
Create a map from watch key (tensor name + debug op) to `DebugTensorDatum`
item. Also make a map from watch key to relative timestamp.
"relative" means (absolute timestamp - t0).
Args:
device_name: (str) name of the device.
"""
self._watch_key_to_datum[device_name] = {}
self._watch_key_to_rel_time[device_name] = {}
self._watch_key_to_dump_size_bytes[device_name] = {}
for datum in self._dump_tensor_data[device_name]:
if datum.watch_key not in self._watch_key_to_devices:
self._watch_key_to_devices[datum.watch_key] = {device_name}
else:
self._watch_key_to_devices[datum.watch_key].add(device_name)
if datum.watch_key not in self._watch_key_to_datum[device_name]:
self._watch_key_to_datum[device_name][datum.watch_key] = [datum]
self._watch_key_to_rel_time[device_name][datum.watch_key] = [
datum.timestamp - self._t0]
self._watch_key_to_dump_size_bytes[device_name][datum.watch_key] = [
datum.dump_size_bytes]
else:
self._watch_key_to_datum[device_name][datum.watch_key].append(datum)
self._watch_key_to_rel_time[device_name][datum.watch_key].append(
datum.timestamp - self._t0)
self._watch_key_to_dump_size_bytes[device_name][datum.watch_key].append(
datum.dump_size_bytes)
def set_python_graph(self, python_graph):
"""Provide Python `Graph` object to the wrapper.
Unlike the partition graphs, which are protobuf `GraphDef` objects, `Graph`
is a Python object and carries additional information such as the traceback
of the construction of the nodes in the graph.
Args:
python_graph: (ops.Graph) The Python Graph object.
"""
self._python_graph = python_graph
self._node_traceback = {}
if self._python_graph:
for op in self._python_graph.get_operations():
self._node_traceback[op.name] = tuple(map(tuple, op.traceback))
@property
def python_graph(self):
"""Get the Python graph.
Returns:
If the Python graph has been set, returns a `tf.Graph` object. Otherwise,
returns None.
"""
return self._python_graph
@property
def core_metadata(self):
"""Metadata about the `Session.run()` call from the core runtime.
Of the three counters available in the return value, `global_step` is
supplied by the caller of the debugged `Session.run()`, while
`session_run_index` and `executor_step_index` are determined by the state
of the core runtime, automatically. For the same fetch list, feed keys and
debug tensor watch options, the same executor will be used and
`executor_step_index` should increase by one at a time. However, runs with
different fetch lists, feed keys and debug_tensor watch options that all
share the same `Session` object can lead to gaps in `session_run_index`.
Returns:
If core metadata are loaded, a `namedtuple` with the fields:
`global_step`: A global step count supplied by the caller of
`Session.run()`. It is optional to the caller. If the caller did not
supply this parameter, its value will be -1.
`session_run_index`: A sorted index for Run() calls to the underlying
TensorFlow `Session` object.
`executor_step_index`: A counter for invocations of a given runtime
executor. The same executor is re-used for the same fetched tensors,
target nodes, input feed keys and debug tensor watch options.
`input_names`: Names of the input (feed) Tensors.
`output_names`: Names of the output (fetched) Tensors.
`target_nodes`: Names of the target nodes.
If the core metadata have not been loaded, `None`.
If more than one core metadata files exist, return a list of the
`nametuple` described above.
"""
output = self._core_metadata
return output[0] if len(output) == 1 else output
@property
def dumped_tensor_data(self):
"""Retrieve dumped tensor data."""
if len(self.devices()) == 1:
return self._dump_tensor_data[self.devices()[0]]
else:
all_devices_data = self._dump_tensor_data.values()
data = []
for device_data in all_devices_data:
data.extend(device_data)
return sorted(data, key=lambda x: x.extended_timestamp)
@property
def t0(self):
"""Absolute timestamp of the first dumped tensor across all devices.
Returns:
(`int`) absolute timestamp of the first dumped tensor, in microseconds.
"""
return self._t0
@property
def size(self):
"""Total number of dumped tensors in the dump root directory.
Returns:
(`int`) The total number of dumped tensors in the dump root directory.
"""
return sum(len(self._dump_tensor_data[device_name])
for device_name in self._dump_tensor_data)
def _load_partition_graphs(self, client_partition_graphs, validate):
"""Load and process partition graphs.
Load the graphs; parse the input and control input structure; obtain the
device and op type of each node; remove the Copy and debug ops inserted
by the debugger. The gathered information can be used to validate the
tensor dumps.
Args:
client_partition_graphs: A repeated field of GraphDefs representing the
partition graphs executed by the TensorFlow runtime, from the Python
client. These partition graphs are used only if partition graphs
cannot be loaded from the dump directory on the file system.
validate: (`bool`) Whether the dump files are to be validated against the
partition graphs.
Raises:
ValueError: If the partition GraphDef of one or more devices fail to be
loaded.
"""
self._debug_graphs = {}
self._node_devices = {}
partition_graphs_and_device_names = []
for device_name in self._device_names:
partition_graph = None
if device_name in self._dump_graph_file_paths:
partition_graph = _load_graph_def_from_event_file(
self._dump_graph_file_paths[device_name])
else:
logging.warn(
"Failed to load partition graphs for device %s from disk. "
"As a fallback, the client graphs will be used. This "
"may cause mismatches in device names." % device_name)
partition_graph = self._find_partition_graph(client_partition_graphs,
device_name)
if partition_graph:
partition_graphs_and_device_names.append((partition_graph,
device_name))
for partition_graph, maybe_device_name in partition_graphs_and_device_names:
# Normalize all node names and their input references directly in the
# GraphDef protobuf before any other processing. b/429335661
for node in partition_graph.node:
node.name = re.sub(r"/+", "/", node.name)
for i, inp in enumerate(node.input):
node.input[i] = re.sub(r"/+", "/", inp)
debug_graph = debug_graphs.DebugGraph(partition_graph,
device_name=maybe_device_name)
self._debug_graphs[debug_graph.device_name] = debug_graph
self._collect_node_devices(debug_graph)
if validate and debug_graph.device_name in self._dump_tensor_data:
self._validate_dump_with_graphs(debug_graph.device_name)
def _find_partition_graph(self, partition_graphs, device_name):
if partition_graphs is None:
return None
else:
for graph_def in partition_graphs:
for node_def in graph_def.node:
if node_def.device == device_name:
return graph_def
return None
def _collect_node_devices(self, debug_graph):
for node_name in debug_graph.node_devices:
if node_name in self._node_devices:
self._node_devices[node_name] = self._node_devices[node_name].union(
debug_graph.node_devices[node_name])
else:
self._node_devices[node_name] = debug_graph.node_devices[node_name]
def _validate_dump_with_graphs(self, device_name):
"""Validate the dumped tensor data against the partition graphs.
Only the watched nodes are validated by this method, because tfdbg allows
clients to watch only a subset of the nodes.
Args:
device_name: (`str`) device name.
Raises:
LookupError: If the partition graphs have not been loaded yet.
ValueError: If dumps contain node names not found in partition graph.
Or if the temporal order of the dump's timestamps violate the
input relations on the partition graphs.
"""
if not self._debug_graphs:
raise LookupError(
"No partition graphs loaded for device %s" % device_name)
debug_graph = self._debug_graphs[device_name]
# Verify that the node names in the dump data are all present in the
# partition graphs.
for datum in self._dump_tensor_data[device_name]:
if datum.node_name not in debug_graph.node_inputs:
raise ValueError("Node name '%s' is not found in partition graphs of "
"device %s." % (datum.node_name, device_name))
pending_inputs = {}
for node in debug_graph.node_inputs:
pending_inputs[node] = []
inputs = debug_graph.node_inputs[node]
for inp in inputs:
inp_node = debug_graphs.get_node_name(inp)
inp_output_slot = debug_graphs.get_output_slot(inp)
# Inputs from Enter and NextIteration nodes are not validated because
# DebugNodeInserter::InsertNodes() in the debugger core skips creating
# control edges from debug ops watching these types of nodes.
if (inp_node in self._debug_watches[device_name] and
inp_output_slot in self._debug_watches[device_name][inp_node] and
debug_graph.node_op_types.get(inp) not in (
"Enter", "NextIteration") and
(inp_node, inp_output_slot) not in pending_inputs[node]):
pending_inputs[node].append((inp_node, inp_output_slot))
for i, datum in enumerate(self._dump_tensor_data[device_name]):
node = datum.node_name
slot = datum.output_slot
# In some cases (e.g., system clocks with insufficient precision),
# the upstream and downstream tensors may have identical timestamps, the
# following check examines this possibility and avoids raising an error if
# that is the case.
if not self._satisfied_at_timestamp(
device_name, pending_inputs[node], datum.timestamp, start_i=i + 1):
raise ValueError("Causality violated in timing relations of debug "
"dumps: %s (%d): "
"these input(s) are not satisfied: %s" %
(node, datum.timestamp, repr(pending_inputs[node])))
recipients = debug_graph.node_recipients[node]
for recipient in recipients:
recipient_pending_inputs = pending_inputs[recipient]
if (node, slot) in recipient_pending_inputs:
if self.node_op_type(recipient) == "Merge":
# If this is a Merge op, we automatically clear the list because
# a Merge node only requires one of its two inputs.
del recipient_pending_inputs[:]
else:
del recipient_pending_inputs[
recipient_pending_inputs.index((node, slot))]
def _satisfied_at_timestamp(self, device_name, pending, timestamp, start_i=0):
"""Determine whether pending inputs are satisfied at given timestamp.
Note: This method mutates the input argument "pending".
Args:
device_name: (str) device name.
pending: A list of 2-tuple (node_name, output_slot): the dependencies to
check.
timestamp: (int) the timestamp in question.
start_i: (int) the index in self._dump_tensor_data to start searching for
the timestamp.
Returns:
(bool) Whether all the dependencies in pending are satisfied at the
timestamp. If pending is empty to begin with, return True.
"""
if not pending:
return True
for datum in self._dump_tensor_data[device_name][start_i:]:
if datum.timestamp > timestamp:
break
if (datum.timestamp == timestamp and
(datum.node_name, datum.output_slot) in pending):
pending.remove((datum.node_name, datum.output_slot))
if not pending:
return True
return not pending
def loaded_partition_graphs(self):
"""Test whether partition graphs have been loaded."""
return bool(self._debug_graphs)
def partition_graphs(self):
"""Get the partition graphs.
Returns:
Partition graphs as a list of GraphDef.
Raises:
LookupError: If no partition graphs have been loaded.
"""
if not self._debug_graphs:
raise LookupError("No partition graphs have been loaded.")
return [self._debug_graphs[key].debug_graph_def
for key in self._debug_graphs]
def reconstructed_non_debug_partition_graphs(self):
"""Reconstruct partition graphs with the debugger-inserted ops stripped.
The reconstructed partition graphs are identical to the original (i.e.,
non-debugger-decorated) partition graphs except in the following respects:
1) The exact names of the runtime-inserted internal nodes may differ.
These include _Send, _Recv, _HostSend, _HostRecv, _Retval ops.
2) As a consequence of 1, the nodes that receive input directly from such
send- and recv-type ops will have different input names.
3) The parallel_iteration attribute of while-loop Enter ops are set to 1.
Returns:
A dict mapping device names (`str`s) to reconstructed
`tf.compat.v1.GraphDef`s.
"""
non_debug_graphs = {}
for key in self._debug_graphs:
non_debug_graphs[key] = self._debug_graphs[key].non_debug_graph_def
return non_debug_graphs
@property
def run_fetches_info(self):
"""Get a str representation of the fetches used in the Session.run() call.
Returns:
If the information is available from one `Session.run` call, a `str`
obtained from `repr(fetches)`.
If the information is available from multiple `Session.run` calls, a
`list` of `str` from `repr(fetches)`.
If the information is not available, `None`.
"""
output = self._run_fetches_info
return output[0] if len(output) == 1 else output
@property
def run_feed_keys_info(self):
"""Get a str representation of the feed_dict used in the Session.run() call.
Returns:
If the information is available from one `Session.run` call, a `str`
obtained from `repr(feed_dict)`.
If the information is available from multiple `Session.run` calls, a
`list` of `str` obtained from `repr(feed_dict)`.
If the information is not available, `None`.
"""
output = self._run_feed_keys_info
return output[0] if len(output) == 1 else output
def _infer_device_name(self, device_name, node_name):
"""Infer the device name given node name.
If device_name is provided (i.e., not None), it'll be simply returned right
away.
Args:
device_name: (str or None) name of the device. If None, will try to infer
the device name by looking at the available nodes.
node_name: (str) name of the node.
Returns:
(str) Inferred name of the device, if available.
Raises:
ValueError: If the node name does not exist on any of the available
devices or if there are multiple devices that contain the node with
the given name.
"""
if device_name is None:
if node_name in self._node_devices:
if len(self._node_devices[node_name]) == 1:
return list(self._node_devices[node_name])[0]
else:
raise ValueError(
"There are multiple (%d) devices with nodes named '%s' but "
"device_name is not specified." %
(len(self._node_devices[node_name]), node_name))
else:
raise ValueError("None of the %d device(s) has a node named '%s'." %
(len(self._device_names), node_name))
else:
return device_name
def nodes(self, device_name=None):
"""Get a list of all nodes from the partition graphs.
Args:
device_name: (`str`) name of device. If None, all nodes from all available
devices will be included.
Returns:
All nodes' names, as a list of str.
Raises:
LookupError: If no partition graphs have been loaded.
ValueError: If specified node name does not exist.
"""
if not self._debug_graphs:
raise LookupError("No partition graphs have been loaded.")
if device_name is None:
nodes = []
for device_name in self._debug_graphs:
nodes.extend(self._debug_graphs[device_name].node_inputs.keys())
return nodes
else:
if device_name not in self._debug_graphs:
raise ValueError("Invalid device name: %s" % device_name)
return self._debug_graphs[device_name].node_inputs.keys()
def node_attributes(self, node_name, device_name=None):
"""Get the attributes of a node.
Args:
node_name: Name of the node in question.
device_name: (`str`) name of the device. If there is only one device or if
node_name exists on only one device, this argument is optional.
Returns:
Attributes of the node.
Raises:
LookupError: If no partition graphs have been loaded.
"""
if not self._debug_graphs:
raise LookupError("No partition graphs have been loaded.")
device_name = self._infer_device_name(device_name, node_name)
return self._debug_graphs[device_name].node_attributes[node_name]
def node_inputs(self, node_name, is_control=False, device_name=None):
"""Get the inputs of given node according to partition graphs.
Args:
node_name: Name of the node.
is_control: (`bool`) Whether control inputs, rather than non-control
inputs, are to be returned.
device_name: (`str`) name of the device. If there is only one device or if
node_name exists on only one device, this argument is optional.
Returns:
(`list` of `str`) inputs to the node, as a list of node names.
Raises:
LookupError: If node inputs and control inputs have not been loaded
from partition graphs yet.
"""
if not self._debug_graphs:
raise LookupError(
"Node inputs are not loaded from partition graphs yet.")
device_name = self._infer_device_name(device_name, node_name)
if is_control:
return self._debug_graphs[device_name].node_ctrl_inputs[node_name]
else:
return self._debug_graphs[device_name].node_inputs[node_name]
def transitive_inputs(self,
node_name,
include_control=True,
include_reversed_ref=False,
device_name=None,):
"""Get the transitive inputs of given node according to partition graphs.
Args:
node_name: Name of the node.
include_control: Include control inputs (True by default).
include_reversed_ref: Whether a ref input, say from A to B, is to be also
considered as an input from B to A. The rationale is that ref inputs
generally let the recipient (e.g., B in this case) mutate the value of
the source (e.g., A in this case). So the reverse direction of the ref
edge reflects the direction of information flow.
device_name: (`str`) name of the device. If there is only one device or if
node_name exists on only one device, this argument is optional.
Returns:
(`list` of `str`) all transitive inputs to the node, as a list of node
names.
Raises:
LookupError: If node inputs and control inputs have not been loaded
from partition graphs yet.
"""
if not self._debug_graphs:
raise LookupError(
"Node inputs are not loaded from partition graphs yet.")
device_name = self._infer_device_name(device_name, node_name)
input_lists = [self._debug_graphs[device_name].node_inputs]
if include_control:
input_lists.append(self._debug_graphs[device_name].node_ctrl_inputs)
if include_reversed_ref:
input_lists.append(
self._debug_graphs[device_name].node_reversed_ref_inputs)
tracer = debug_graphs.DFSGraphTracer(
input_lists,
skip_node_names=self._get_merge_node_names(device_name))
tracer.trace(node_name)
return tracer.inputs()
def _get_merge_node_names(self, device_name):
"""Lazily get a list of Merge nodes on a given device."""
if device_name not in self._device_names:
raise ValueError("Invalid device name: %s" % device_name)
if not hasattr(self, "_merge_node_names"):
self._merge_node_names = {}
if device_name not in self._merge_node_names:
debug_graph = self._debug_graphs[device_name]
self._merge_node_names[device_name] = [
node for node in debug_graph.node_op_types
if debug_graph.node_op_types[node] == "Merge"]
return self._merge_node_names[device_name]
def find_some_path(self,
src_node_name,
dst_node_name,
include_control=True,
include_reversed_ref=False,
device_name=None):
"""Find a path between a source node and a destination node.
Limitation: the source and destination are required to be on the same
device, i.e., this method does not yet take into account Send/Recv nodes
across devices.
TODO(cais): Make this method work across device edges by tracing Send/Recv
nodes.
Args:
src_node_name: (`str`) name of the source node or name of an output tensor
of the node.
dst_node_name: (`str`) name of the destination node or name of an output
tensor of the node.
include_control: (`bool`) whrther control edges are considered in the
graph tracing.
include_reversed_ref: Whether a ref input, say from A to B, is to be also
considered as an input from B to A. The rationale is that ref inputs
generally let the recipient (e.g., B in this case) mutate the value of
the source (e.g., A in this case). So the reverse direction of the ref
edge reflects the direction of information flow.
device_name: (`str`) name of the device. If there is only one device or if
node_name exists on only one device, this argument is optional.
Returns:
A path from the src_node_name to dst_node_name, as a `list` of `str`, if
it exists. The list includes src_node_name as the first item and
dst_node_name as the last.
If such a path does not exist, `None`.
Raises:
ValueError: If the source and destination nodes are not on the same
device.
"""
src_device_name = self._infer_device_name(device_name, src_node_name)
dst_device_name = self._infer_device_name(device_name, dst_node_name)
if src_device_name != dst_device_name:
raise ValueError(
"Source (%s) and destination (%s) are not on the same device: "
"%s vs. %s" % (src_node_name, dst_node_name, src_device_name,
dst_device_name))
input_lists = [self._debug_graphs[dst_device_name].node_inputs]
debug_graph = self._debug_graphs[dst_device_name]
if include_control:
input_lists.append(debug_graph.node_ctrl_inputs)
if include_reversed_ref:
input_lists.append(debug_graph.node_reversed_ref_inputs)
tracer = debug_graphs.DFSGraphTracer(
input_lists,
skip_node_names=self._get_merge_node_names(dst_device_name),
destination_node_name=src_node_name)
# Here the value of destination_node_name is src_node_name, because we
# are tracing the graph from output to its inputs (i.e., going backwards
# on the graph).
try:
tracer.trace(dst_node_name)
except debug_graphs.GraphTracingReachedDestination:
# Prune nodes not on the path.
inputs = [dst_node_name] + tracer.inputs()
depth_list = [0] + tracer.depth_list()
path = []
curr_depth = depth_list[-1]
for inp, depth in zip(reversed(inputs), reversed(depth_list)):
if depth == curr_depth:
path.append(inp)
curr_depth -= 1
return path
def node_recipients(self, node_name, is_control=False, device_name=None):
"""Get recipient of the given node's output according to partition graphs.
Args:
node_name: (`str`) name of the node.
is_control: (`bool`) whether control outputs, rather than non-control
outputs, are to be returned.
device_name: (`str`) name of the device. If there is only one device or if
node_name exists on only one device, this argument is optional.
Returns:
(`list` of `str`) all inputs to the node, as a list of node names.
Raises:
LookupError: If node inputs and control inputs have not been loaded
from partition graphs yet.
"""
if not self._debug_graphs:
raise LookupError(
"Node recipients are not loaded from partition graphs yet.")
device_name = self._infer_device_name(device_name, node_name)
debug_graph = self._debug_graphs[device_name]
if is_control:
return debug_graph.node_ctrl_recipients[node_name]
else:
return debug_graph.node_recipients[node_name]
def devices(self):
"""Get the list of device names.
Returns:
(`list` of `str`) names of the devices.
"""
return self._device_names
def node_exists(self, node_name, device_name=None):
"""Test if a node exists in the partition graphs.
Args:
node_name: (`str`) name of the node to be checked.
device_name: optional device name. If None, will search for the node
on all available devices. Otherwise, search for the node only on
the given device.
Returns:
A boolean indicating whether the node exists.
Raises:
LookupError: If no partition graphs have been loaded yet.
ValueError: If device_name is specified but cannot be found.
"""
if not self._debug_graphs:
raise LookupError(
"Nodes have not been loaded from partition graphs yet.")
if (device_name is not None) and device_name not in self._debug_graphs:
raise ValueError(
"The specified device_name '%s' cannot be found." % device_name)
for _, debug_graph in self._debug_graphs.items():
if node_name in debug_graph.node_inputs:
return True
return False
def node_device(self, node_name):
"""Get the names of the devices that has nodes of the specified name.
Args:
node_name: (`str`) name of the node.
Returns:
(`str` or `list` of `str`) name of the device(s) on which the node of the
given name is found. Returns a `str` if there is only one such device,
otherwise return a `list` of `str`.
Raises:
LookupError: If node inputs and control inputs have not been loaded
from partition graphs yet.
ValueError: If the node does not exist in partition graphs.
"""
if not self._debug_graphs:
raise LookupError(
"Node devices are not loaded from partition graphs yet.")
if node_name not in self._node_devices:
raise ValueError("Node '%s' does not exist in partition graphs." %
node_name)
output = list(self._node_devices[node_name])
return output[0] if len(output) == 1 else output
def node_op_type(self, node_name, device_name=None):
"""Get the op type of given node.
Args:
node_name: (`str`) name of the node.
device_name: (`str`) name of the device. If there is only one device or if
node_name exists on only one device, this argument is optional.
Returns:
(`str`) op type of the node.
Raises:
LookupError: If node op types have not been loaded
from partition graphs yet.
"""
if not self._debug_graphs:
raise LookupError(
"Node op types are not loaded from partition graphs yet.")
device_name = self._infer_device_name(device_name, node_name)
return self._debug_graphs[device_name].node_op_types[node_name]
def debug_watch_keys(self, node_name, device_name=None):
"""Get all tensor watch keys of given node according to partition graphs.
Args:
node_name: (`str`) name of the node.
device_name: (`str`) name of the device. If there is only one device or if
node_name exists on only one device, this argument is optional.
Returns:
(`list` of `str`) all debug tensor watch keys. Returns an empty list if
the node name does not correspond to any debug watch keys.
Raises:
`LookupError`: If debug watch information has not been loaded from
partition graphs yet.
"""
try:
device_name = self._infer_device_name(device_name, node_name)
except ValueError:
return []
if node_name not in self._debug_watches[device_name]:
return []
watch_keys = []
for watched_slot in self._debug_watches[device_name][node_name]:
debug_ops = self._debug_watches[device_name][node_name][watched_slot]
for debug_op in debug_ops:
watch_keys.append(
_get_tensor_watch_key(node_name, watched_slot, debug_op))
return watch_keys
def watch_key_to_data(self, debug_watch_key, device_name=None):
"""Get all `DebugTensorDatum` instances corresponding to a debug watch key.
Args:
debug_watch_key: (`str`) debug watch key.
device_name: (`str`) name of the device. If there is only one device or if
the specified debug_watch_key exists on only one device, this argument
is optional.
Returns:
A list of `DebugTensorDatum` instances that correspond to the debug watch
key. If the watch key does not exist, returns an empty list.
Raises:
ValueError: If there are multiple devices that have the debug_watch_key,
but device_name is not specified.
"""
if device_name is None:
matching_device_names = [
name for name in self._watch_key_to_datum
if debug_watch_key in self._watch_key_to_datum[name]]
if not matching_device_names:
return []
elif len(matching_device_names) == 1:
device_name = matching_device_names[0]
else:
raise ValueError(
"The debug watch key '%s' exists on multiple (%d) devices, but "
"device name is not specified." %
(debug_watch_key, len(matching_device_names)))
elif device_name not in self._debug_key_to_datum:
raise ValueError(
"There is no device named '%s' consisting of debug watch keys." %
device_name)
return self._watch_key_to_datum[device_name].get(debug_watch_key, [])
def find(self,
predicate,
first_n=0,
device_name=None,
exclude_node_names=None):
"""Find dumped tensor data by a certain predicate.
Args:
predicate: A callable that takes two input arguments:
```python
def predicate(debug_tensor_datum, tensor):
# returns a bool
```
where `debug_tensor_datum` is an instance of `DebugTensorDatum`, which
carries the metadata, such as the `Tensor`'s node name, output slot
timestamp, debug op name, etc.; and `tensor` is the dumped tensor value
as a `numpy.ndarray`.
first_n: (`int`) return only the first n `DebugTensotDatum` instances (in
time order) for which the predicate returns True. To return all the
`DebugTensotDatum` instances, let first_n be <= 0.
device_name: optional device name.
exclude_node_names: Optional regular expression to exclude nodes with
names matching the regular expression.
Returns:
A list of all `DebugTensorDatum` objects in this `DebugDumpDir` object
for which predicate returns True, sorted in ascending order of the
timestamp.
"""
if exclude_node_names:
exclude_node_names = re.compile(exclude_node_names)
matched_data = []
for device in (self._dump_tensor_data if device_name is None
else (self._dump_tensor_data[device_name],)):
for datum in self._dump_tensor_data[device]:
if exclude_node_names and exclude_node_names.match(datum.node_name):
continue
if predicate(datum, datum.get_tensor()):
matched_data.append(datum)
if first_n > 0 and len(matched_data) >= first_n:
return matched_data
return matched_data
def get_tensor_file_paths(self,
node_name,
output_slot,
debug_op,
device_name=None):
"""Get the file paths from a debug-dumped tensor.
Args:
node_name: (`str`) name of the node that the tensor is produced by.
output_slot: (`int`) output slot index of tensor.
debug_op: (`str`) name of the debug op.
device_name: (`str`) name of the device. If there is only one device or if
the specified debug_watch_key exists on only one device, this argument
is optional.
Returns:
List of file path(s) loaded. This is a list because each debugged tensor
may be dumped multiple times.
Raises:
WatchKeyDoesNotExistInDebugDumpDirError: If the tensor does not exist in
the debug-dump data.
"""
device_name = self._infer_device_name(device_name, node_name)
watch_key = _get_tensor_watch_key(node_name, output_slot, debug_op)
if watch_key not in self._watch_key_to_datum[device_name]:
raise WatchKeyDoesNotExistInDebugDumpDirError(
"Watch key \"%s\" does not exist in the debug dump of device %s" %
(watch_key, device_name))
return [datum.file_path for datum in
self._watch_key_to_datum[device_name][watch_key]]
def get_tensors(self, node_name, output_slot, debug_op, device_name=None):
"""Get the tensor value from for a debug-dumped tensor.
The tensor may be dumped multiple times in the dump root directory, so a
list of tensors (`numpy.ndarray`) is returned.
Args:
node_name: (`str`) name of the node that the tensor is produced by.
output_slot: (`int`) output slot index of tensor.
debug_op: (`str`) name of the debug op.
device_name: (`str`) name of the device. If there is only one device or if
the specified debug_watch_key exists on only one device, this argument
is optional.
Returns:
List of tensors (`numpy.ndarray`) loaded from the debug-dump file(s).
Raises:
WatchKeyDoesNotExistInDebugDumpDirError: If the tensor does not exist in
the debug-dump data.
"""
watch_key = _get_tensor_watch_key(node_name, output_slot, debug_op)
try:
device_name = self._infer_device_name(device_name, node_name)
return [datum.get_tensor() for datum in
self._watch_key_to_datum[device_name][watch_key]]
except (ValueError, KeyError):
raise WatchKeyDoesNotExistInDebugDumpDirError(
"Watch key \"%s\" does not exist in the debug dump of device %s" %
(watch_key, device_name))
def get_rel_timestamps(self,
node_name,
output_slot,
debug_op,
device_name=None):
"""Get the relative timestamp from for a debug-dumped tensor.
Relative timestamp means (absolute timestamp - `t0`), where `t0` is the
absolute timestamp of the first dumped tensor in the dump root. The tensor
may be dumped multiple times in the dump root directory, so a list of
relative timestamps (`numpy.ndarray`) is returned.
Args:
node_name: (`str`) name of the node that the tensor is produced by.
output_slot: (`int`) output slot index of tensor.
debug_op: (`str`) name of the debug op.
device_name: (`str`) name of the device. If there is only one device or if
the specified debug_watch_key exists on only one device, this argument
is optional.
Returns:
(`list` of `int`) list of relative timestamps.
Raises:
WatchKeyDoesNotExistInDebugDumpDirError: If the tensor watch key does not
exist in the debug dump data.
"""
device_name = self._infer_device_name(device_name, node_name)
watch_key = _get_tensor_watch_key(node_name, output_slot, debug_op)
if watch_key not in self._watch_key_to_datum[device_name]:
raise WatchKeyDoesNotExistInDebugDumpDirError(
"Watch key \"%s\" does not exist in the debug dump" % watch_key)
# TODO(cais): Figure out whether this should be relative to the global t0.
return self._watch_key_to_rel_time[device_name][watch_key]
def get_dump_sizes_bytes(self,
node_name,
output_slot,
debug_op,
device_name=None):
"""Get the sizes of the dump files for a debug-dumped tensor.
Unit of the file size: byte.
Args:
node_name: (`str`) name of the node that the tensor is produced by.
output_slot: (`int`) output slot index of tensor.
debug_op: (`str`) name of the debug op.
device_name: (`str`) name of the device. If there is only one device or if
the specified debug_watch_key exists on only one device, this argument
is optional.
Returns:
(`list` of `int`): list of dump file sizes in bytes.
Raises:
WatchKeyDoesNotExistInDebugDumpDirError: If the tensor watch key does not
exist in the debug dump data.
"""
device_name = self._infer_device_name(device_name, node_name)
watch_key = _get_tensor_watch_key(node_name, output_slot, debug_op)
if watch_key not in self._watch_key_to_datum[device_name]:
raise WatchKeyDoesNotExistInDebugDumpDirError(
"Watch key \"%s\" does not exist in the debug dump of device %s" %
(watch_key, device_name))
return self._watch_key_to_dump_size_bytes[device_name][watch_key]
def node_traceback(self, element_name):
"""Try to retrieve the Python traceback of node's construction.
Args:
element_name: (`str`) Name of a graph element (node or tensor).
Returns:
(list) The traceback list object as returned by the `extract_trace`
method of Python's traceback module.
Raises:
LookupError: If Python graph is not available for traceback lookup.
KeyError: If the node cannot be found in the Python graph loaded.
"""
if self._python_graph is None:
raise LookupError("Python graph is not available for traceback lookup")
node_name = debug_graphs.get_node_name(element_name)
if node_name not in self._node_traceback:
raise KeyError("Cannot find node \"%s\" in Python graph" % node_name)
return self._node_traceback[node_name]
| DebugDumpDir |
python | spack__spack | lib/spack/spack/cmd/common/arguments.py | {
"start": 4442,
"end": 5492
} | class ____(argparse.Action):
"""Like the builtin store_true, but prints a deprecation warning."""
def __init__(
self,
option_strings,
dest: str,
default: Optional[Any] = False,
required: bool = False,
help: Optional[str] = None,
removed_in: Optional[str] = None,
instructions: Optional[str] = None,
):
super().__init__(
option_strings=option_strings,
dest=dest,
nargs=0,
const=True,
required=required,
help=help,
default=default,
)
self.removed_in = removed_in
self.instructions = instructions
def __call__(self, parser, namespace, value, option_string=None):
instructions = [] if not self.instructions else [self.instructions]
tty.warn(
f"{option_string} is deprecated and will be removed in {self.removed_in}.",
*instructions,
)
setattr(namespace, self.dest, self.const)
| DeprecatedStoreTrueAction |
python | PyCQA__pylint | tests/functional/i/invalid/invalid_hash_returned.py | {
"start": 696,
"end": 826
} | class ____:
""" __hash__ returns str """
def __hash__(self): # [invalid-hash-returned]
return "True"
| SecondBadHash |
python | walkccc__LeetCode | solutions/2272. Substring With Largest Variance/2272.py | {
"start": 0,
"end": 989
} | class ____:
def largestVariance(self, s: str) -> int:
# a := the letter with the higher frequency
# b := the letter with the lower frequency
def kadane(a: str, b: str) -> int:
ans = 0
countA = 0
countB = 0
canExtendPrevB = False
for c in s:
if c != a and c != b:
continue
if c == a:
countA += 1
else:
countB += 1
if countB > 0:
# An interval should contain at least one b.
ans = max(ans, countA - countB)
elif countB == 0 and canExtendPrevB:
# edge case: consider the previous b.
ans = max(ans, countA - 1)
# Reset if the number of b > the number of a.
if countB > countA:
countA = 0
countB = 0
canExtendPrevB = True
return ans
return max(kadane(a, b)
for a in string.ascii_lowercase
for b in string.ascii_lowercase
if a != b)
| Solution |
python | langchain-ai__langchain | libs/cli/langchain_cli/utils/git.py | {
"start": 422,
"end": 7522
} | class ____(TypedDict):
"""Dependency source information."""
git: str
ref: str | None
subdirectory: str | None
api_path: str | None
event_metadata: dict[str, Any]
# use poetry dependency string format
def parse_dependency_string(
dep: str | None,
repo: str | None,
branch: str | None,
api_path: str | None,
) -> DependencySource:
"""Parse a dependency string into a `DependencySource`.
Args:
dep: The dependency string
repo: Optional repository
branch: Optional branch
api_path: Optional API path
Returns:
The parsed dependency source information
Raises:
ValueError: If the dependency string is invalid
"""
if dep is not None and dep.startswith("git+"):
if repo is not None or branch is not None:
msg = (
"If a dependency starts with git+, you cannot manually specify "
"a repo or branch."
)
raise ValueError(msg)
# remove git+
gitstring = dep[4:]
subdirectory = None
ref = None
# first check for #subdirectory= on the end
if "#subdirectory=" in gitstring:
gitstring, subdirectory = gitstring.split("#subdirectory=")
if "#" in subdirectory or "@" in subdirectory:
msg = "#subdirectory must be the last part of the dependency string"
raise ValueError(msg)
# find first slash after ://
# find @ or # after that slash
# remainder is ref
# if no @ or #, then ref is None
# find first slash after ://
if "://" not in gitstring:
msg = "git+ dependencies must start with git+https:// or git+ssh://"
raise ValueError(msg)
_, find_slash = gitstring.split("://", 1)
if "/" not in find_slash:
post_slash = find_slash
ref = None
else:
_, post_slash = find_slash.split("/", 1)
if "@" in post_slash or "#" in post_slash:
_, ref = re.split(r"[@#]", post_slash, maxsplit=1)
# gitstring is everything before that
gitstring = gitstring[: -len(ref) - 1] if ref is not None else gitstring
return DependencySource(
git=gitstring,
ref=ref,
subdirectory=subdirectory,
api_path=api_path,
event_metadata={"dependency_string": dep},
)
if dep is not None and dep.startswith("https://"):
msg = "Only git dependencies are supported"
raise ValueError(msg)
# if repo is none, use default, including subdirectory
base_subdir = Path(DEFAULT_GIT_SUBDIRECTORY) if repo is None else Path()
subdir = str(base_subdir / dep) if dep is not None else None
gitstring = (
DEFAULT_GIT_REPO
if repo is None
else f"https://github.com/{repo.strip('/')}.git"
)
ref = DEFAULT_GIT_REF if branch is None else branch
# it's a default git repo dependency
return DependencySource(
git=gitstring,
ref=ref,
subdirectory=subdir,
api_path=api_path,
event_metadata={
"dependency_string": dep,
"used_repo_flag": repo is not None,
"used_branch_flag": branch is not None,
},
)
def _list_arg_to_length(arg: list[str] | None, num: int) -> Sequence[str | None]:
if not arg:
return [None] * num
if len(arg) == 1:
return arg * num
if len(arg) == num:
return arg
msg = f"Argument must be of length 1 or {num}"
raise ValueError(msg)
def parse_dependencies(
dependencies: list[str] | None,
repo: list[str],
branch: list[str],
api_path: list[str],
) -> list[DependencySource]:
"""Parse dependencies.
Args:
dependencies: The dependencies to parse
repo: The repositories to use
branch: the branches to use
api_path: the api paths to use
Returns:
A list of DependencySource objects.
Raises:
ValueError: if the number of `dependencies`, `repos`, `branches`, or `api_paths`
do not match.
"""
num_deps = max(
len(dependencies) if dependencies is not None else 0,
len(repo),
len(branch),
)
if (
(dependencies and len(dependencies) != num_deps)
or (api_path and len(api_path) != num_deps)
or (repo and len(repo) not in {1, num_deps})
or (branch and len(branch) not in {1, num_deps})
):
msg = (
"Number of defined repos/branches/api_paths did not match the "
"number of templates."
)
raise ValueError(msg)
inner_deps = _list_arg_to_length(dependencies, num_deps)
inner_api_paths = _list_arg_to_length(api_path, num_deps)
inner_repos = _list_arg_to_length(repo, num_deps)
inner_branches = _list_arg_to_length(branch, num_deps)
return list(
map( # type: ignore[call-overload, unused-ignore]
parse_dependency_string,
inner_deps,
inner_repos,
inner_branches,
inner_api_paths,
strict=False,
)
)
def _get_repo_path(gitstring: str, ref: str | None, repo_dir: Path) -> Path:
# only based on git for now
ref_str = ref if ref is not None else ""
hashed = hashlib.sha256((f"{gitstring}:{ref_str}").encode()).hexdigest()[:8]
removed_protocol = gitstring.split("://", maxsplit=1)[-1]
removed_basename = re.split(r"[/:]", removed_protocol, maxsplit=1)[-1]
removed_extras = removed_basename.split("#")[0]
foldername = re.sub(r"\W", "_", removed_extras)
directory_name = f"{foldername}_{hashed}"
return repo_dir / directory_name
def update_repo(gitstring: str, ref: str | None, repo_dir: Path) -> Path:
"""Update a git repository to the specified ref.
Tries to pull if the repo already exists, otherwise clones it.
Args:
gitstring: The git repository URL.
ref: The git reference.
repo_dir: The directory to clone the repository into.
Returns:
The path to the cloned repository.
"""
# see if path already saved
repo_path = _get_repo_path(gitstring, ref, repo_dir)
if repo_path.exists():
# try pulling
try:
repo = Repo(repo_path)
if repo.active_branch.name == ref:
repo.remotes.origin.pull()
return repo_path
except Exception:
logger.exception("Failed to pull existing repo")
# if it fails, delete and clone again
shutil.rmtree(repo_path)
Repo.clone_from(gitstring, repo_path, branch=ref, depth=1)
return repo_path
def copy_repo(
source: Path,
destination: Path,
) -> None:
"""Copiy a repo, ignoring git folders.
Raises `FileNotFound` if it can't find source
"""
def ignore_func(_: str, files: list[str]) -> list[str]:
return [f for f in files if f == ".git"]
shutil.copytree(source, destination, ignore=ignore_func)
| DependencySource |
python | pyca__cryptography | tests/hazmat/primitives/decrepit/test_algorithms.py | {
"start": 10614,
"end": 11081
} | class ____:
test_cbc = generate_encrypt_test(
load_nist_vectors,
os.path.join("ciphers", "SEED"),
["rfc-4196.txt"],
lambda key, **kwargs: SEED(binascii.unhexlify(key)),
lambda iv, **kwargs: modes.CBC(binascii.unhexlify(iv)),
)
@pytest.mark.supported(
only_if=lambda backend: backend.cipher_supported(
SEED(b"\x00" * 16), OFB(b"\x00" * 16)
),
skip_message="Does not support SEED OFB",
)
| TestSEEDModeCBC |
python | bokeh__bokeh | tests/unit/bokeh/server/test_auth_provider.py | {
"start": 7673,
"end": 9593
} | class ____:
def test_no_file(self) -> None:
with pytest.raises(ValueError) as e:
bsa.AuthModule("junkjunkjunk")
assert str(e).startswith("no file exists at module_path:")
def test_both_user(self) -> None:
def func(filename: str):
with pytest.raises(ValueError) as e:
bsa.AuthModule(filename)
assert str(e) == "Only one of get_user or get_user_async should be supplied"
with_file_contents("""
def get_user(handler): return 10
async def get_user_async(handler): return 20
""", func, suffix='.py')
@pytest.mark.parametrize('user_func', ['get_user', 'get_user_async'])
def test_no_login(self, user_func: str) -> None:
def func(filename: str):
with pytest.raises(ValueError) as e:
bsa.AuthModule(filename)
assert str(e) == "When user authentication is enabled, one of login_url or get_login_url must be supplied"
with_file_contents(f"""
def {user_func}(handler): return 10
""", func, suffix='.py')
def test_both_login(self) -> None:
def func(filename: str):
with pytest.raises(ValueError) as e:
bsa.AuthModule(filename)
assert str(e) == "At most one of login_url or get_login_url should be supplied"
with_file_contents("""
def get_user(handler): return 10
def get_login_url(handler): return 20
login_url = "/foo"
""", func, suffix='.py')
def test_handler_with_get_login_url(self) -> None:
def func(filename: str):
with pytest.raises(ValueError) as e:
bsa.AuthModule(filename)
assert str(e) == "LoginHandler cannot be used with a get_login_url() function"
with_file_contents("""
def get_user(handler): return 10
def get_login_url(handler): return 20
from tornado.web import RequestHandler
| TestAuthModule_validation |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/protocolExplicit1.py | {
"start": 531,
"end": 656
} | class ____(Protocol1): ...
# This should generate an error because some attributes are not implemented.
Concrete1()
| Concrete1 |
python | pyinstaller__pyinstaller | bootloader/waflib/Utils.py | {
"start": 1497,
"end": 2156
} | class ____(dict):
def __init__(self, *k, **kw):
self.lst = deque()
dict.__init__(self, *k, **kw)
def clear(self):
dict.clear(self)
self.lst = deque()
def __setitem__(self, key, value):
if key in dict.keys(self):
self.lst.remove(key)
dict.__setitem__(self, key, value)
self.lst.append(key)
def __delitem__(self, key):
dict.__delitem__(self, key)
try:
self.lst.remove(key)
except ValueError:
pass
def __iter__(self):
return reversed(self.lst)
def keys(self):
return reversed(self.lst)
| ordered_iter_dict |
python | dagster-io__dagster | python_modules/dagster/dagster/_utils/concurrency.py | {
"start": 253,
"end": 346
} | class ____(Enum):
BLOCKED = "BLOCKED"
CLAIMED = "CLAIMED"
@record
| ConcurrencySlotStatus |
python | walkccc__LeetCode | solutions/2907. Maximum Profitable Triplets With Increasing Prices I/2907.py | {
"start": 446,
"end": 1084
} | class ____:
def maxProfit(self, prices: list[int], profits: list[int]) -> int:
ans = -1
maxPrice = max(prices)
maxProfitTree1 = FenwickTree(maxPrice)
maxProfitTree2 = FenwickTree(maxPrice)
for price, profit in zip(prices, profits):
# max(proftis[i])
maxProfit1 = maxProfitTree1.get(price - 1)
# max(proftis[i]) + max(profits[j])
maxProfit2 = maxProfitTree2.get(price - 1)
maxProfitTree1.maximize(price, profit)
if maxProfit1 > 0:
maxProfitTree2.maximize(price, profit + maxProfit1)
if maxProfit2 > 0:
ans = max(ans, profit + maxProfit2)
return ans
| Solution |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/execution_api/datamodels/hitl.py | {
"start": 1907,
"end": 2973
} | class ____(BaseModel):
"""Schema for the response part of a Human-in-the-loop detail for a specific task instance."""
response_received: bool
responded_by_user: HITLUser | None = None
responded_at: UtcDateTime | None
# It's empty if the user has not yet responded.
chosen_options: Iterable[str] | None
params_input: dict[str, Any] = Field(default_factory=dict)
@classmethod
def from_hitl_detail_orm(cls, hitl_detail: HITLDetail) -> HITLDetailResponse:
hitl_user = (
HITLUser(
id=hitl_detail.responded_by_user_id,
name=hitl_detail.responded_by_user_name,
)
if hitl_detail.responded_by_user
else None
)
return HITLDetailResponse(
response_received=hitl_detail.response_received,
responded_at=hitl_detail.responded_at,
responded_by_user=hitl_user,
chosen_options=hitl_detail.chosen_options or (),
params_input=hitl_detail.params_input or {},
)
| HITLDetailResponse |
python | pytorch__pytorch | torch/_inductor/cpp_builder.py | {
"start": 21276,
"end": 34077
} | class ____:
"""
This is the Base class for store cxx build options, as a template.
Actually, to build a cxx shared library. We just need to select a compiler
and maintains the suitable args.
"""
def __init__(
self,
compiler: str = "",
definitions: Optional[list[str]] = None,
include_dirs: Optional[list[str]] = None,
cflags: Optional[list[str]] = None,
ldflags: Optional[list[str]] = None,
libraries_dirs: Optional[list[str]] = None,
libraries: Optional[list[str]] = None,
passthrough_args: Optional[list[str]] = None,
aot_mode: bool = False,
use_relative_path: bool = False,
compile_only: bool = False,
precompiling: bool = False,
preprocessing: bool = False,
) -> None:
self._compiler = compiler
self._definitions: list[str] = definitions or []
self._include_dirs: list[str] = include_dirs or []
self._cflags: list[str] = cflags or []
self._ldflags: list[str] = ldflags or []
self._libraries_dirs: list[str] = libraries_dirs or []
self._libraries: list[str] = libraries or []
# Some args are hard to abstract to OS compatible, passthrough directly.
self._passthrough_args: list[str] = passthrough_args or []
# Optionally, the path to a precompiled header which should be included on the
# build command line.
self.precompiled_header: Optional[str] = None
self._aot_mode: bool = aot_mode
self._use_relative_path: bool = use_relative_path
self._compile_only: bool = compile_only
self._precompiling: bool = precompiling
self._preprocessing: bool = preprocessing
def _process_compile_only_options(self) -> None:
if self._compile_only:
self._libraries_dirs = []
self._libraries = []
def _remove_duplicate_options(self) -> None:
self._definitions = _remove_duplication_in_list(self._definitions)
self._include_dirs = _remove_duplication_in_list(self._include_dirs)
self._cflags = _remove_duplication_in_list(self._cflags)
self._ldflags = _remove_duplication_in_list(self._ldflags)
self._libraries_dirs = _remove_duplication_in_list(self._libraries_dirs)
self._libraries = _remove_duplication_in_list(self._libraries)
self._passthrough_args = _remove_duplication_in_list(self._passthrough_args)
def _finalize_options(self) -> None:
self._process_compile_only_options()
self._remove_duplicate_options()
def get_compiler(self) -> str:
return self._compiler
def get_definitions(self) -> list[str]:
return self._definitions
def get_include_dirs(self) -> list[str]:
return self._include_dirs
def get_cflags(self) -> list[str]:
return self._cflags
def get_ldflags(self) -> list[str]:
return self._ldflags
def get_libraries_dirs(self) -> list[str]:
return self._libraries_dirs
def get_libraries(self) -> list[str]:
return self._libraries
def get_passthrough_args(self) -> list[str]:
return self._passthrough_args
def get_aot_mode(self) -> bool:
return self._aot_mode
def get_use_relative_path(self) -> bool:
return self._use_relative_path
def get_compile_only(self) -> bool:
return self._compile_only
def get_precompiling(self) -> bool:
return self._precompiling
def get_preprocessing(self) -> bool:
return self._preprocessing
def save_flags_to_json(self, file: str) -> None:
attrs = {
"compiler": self.get_compiler(),
"definitions": self.get_definitions(),
"include_dirs": self.get_include_dirs(),
"cflags": self.get_cflags(),
"ldflags": self.get_ldflags(),
"libraries_dirs": self.get_libraries_dirs(),
"libraries": self.get_libraries(),
"passthrough_args": self.get_passthrough_args(),
"aot_mode": self.get_aot_mode(),
"use_relative_path": self.get_use_relative_path(),
"compile_only": self.get_compile_only(),
}
with open(file, "w") as f:
json.dump(attrs, f)
def _get_warning_all_cflag(warning_all: bool = True) -> list[str]:
if not _IS_WINDOWS:
return ["Wall"] if warning_all else []
else:
return []
def _get_cpp_std_cflag(std_num: str = "c++17") -> list[str]:
if _IS_WINDOWS:
"""
On Windows, only c++20 can support `std::enable_if_t`.
Ref: https://learn.microsoft.com/en-us/cpp/overview/cpp-conformance-improvements-2019?view=msvc-170#checking-for-abstract-class-types # noqa: B950
Note:
Only setup c++20 for Windows inductor. I tried to upgrade all project to c++20, but it is failed:
https://github.com/pytorch/pytorch/pull/131504
"""
std_num = "c++20"
return [f"std:{std_num}"]
else:
return [f"std={std_num}"]
def _get_os_related_cpp_cflags(cpp_compiler: str) -> list[str]:
if _IS_WINDOWS:
cflags = [
"wd4819",
"wd4251",
"wd4244",
"wd4267",
"wd4275",
"wd4018",
"wd4190",
"wd4624",
"wd4067",
"wd4068",
"EHsc",
# For Intel oneAPI, ref: https://learn.microsoft.com/en-us/cpp/build/reference/zc-cplusplus?view=msvc-170
"Zc:__cplusplus",
# Enable max compatible to msvc for oneAPI headers.
# ref: https://github.com/pytorch/pytorch/blob/db38c44ad639e7ada3e9df2ba026a2cb5e40feb0/cmake/public/utils.cmake#L352-L358 # noqa: B950
"permissive-",
]
else:
cflags = ["Wno-unused-variable", "Wno-unknown-pragmas"]
if _is_clang(cpp_compiler):
ignored_optimization_argument = (
"Werror=ignored-optimization-argument"
if config.aot_inductor.raise_error_on_ignored_optimization
else "Wno-ignored-optimization-argument"
)
cflags.append(ignored_optimization_argument)
if _is_gcc(cpp_compiler):
# Issue all the warnings demanded by strict ISO C and ISO C++.
# Ref: https://github.com/pytorch/pytorch/issues/153180#issuecomment-2986676878
cflags.append("pedantic")
return cflags
def _get_os_related_cpp_definitions(cpp_compiler: str) -> list[str]:
os_definitions: list[str] = []
if _IS_WINDOWS:
# On Windows, we need disable min/max macro to avoid C2589 error, as PyTorch CMake:
# https://github.com/pytorch/pytorch/blob/9a41570199155eee92ebd28452a556075e34e1b4/CMakeLists.txt#L1118-L1119
os_definitions.append("NOMINMAX")
return os_definitions
def _get_ffast_math_flags() -> list[str]:
if _IS_WINDOWS:
flags = []
else:
# ffast-math is equivalent to these flags as in
# https://github.com/gcc-mirror/gcc/blob/4700ad1c78ccd7767f846802fca148b2ea9a1852/gcc/opts.cc#L3458-L3468
# however gcc<13 sets the FTZ/DAZ flags for runtime on x86 even if we have
# -ffast-math -fno-unsafe-math-optimizations because the flags for runtime
# are added by linking in crtfastmath.o. This is done by the spec file which
# only does globbing for -ffast-math.
flags = [
"fno-trapping-math",
"funsafe-math-optimizations",
"ffinite-math-only",
"fno-signed-zeros",
"fno-math-errno",
]
flags.append("fno-finite-math-only")
if not config.cpp.enable_unsafe_math_opt_flag:
flags.append("fno-unsafe-math-optimizations")
flags.append(f"ffp-contract={config.cpp.enable_floating_point_contract_flag}")
if is_gcc():
flags.append("fexcess-precision=fast")
return flags
def _get_inductor_debug_symbol_cflags() -> tuple[list[str], list[str]]:
"""
When we turn on generate debug symbol.
On Windows, it should create a [module_name].pdb file. It helps debug by WinDBG.
On Linux, it should create some debug sections in binary file.
"""
cflags: list[str] = []
ldflags: list[str] = []
if _IS_WINDOWS:
cflags = ["ZI", "_DEBUG"]
ldflags = ["DEBUG", "ASSEMBLYDEBUG ", "OPT:REF", "OPT:ICF"]
else:
cflags.append("g")
return cflags, ldflags
def _get_optimization_cflags(
cpp_compiler: str, min_optimize: bool = False
) -> tuple[list[str], list[str]]:
cflags: list[str] = []
ldflags: list[str] = []
should_use_optimized_flags = not (
config.aot_inductor.debug_compile
or os.environ.get("TORCHINDUCTOR_DEBUG_COMPILE", "0") == "1"
)
should_add_debug_symbol_flags = (
config.aot_inductor.debug_compile
or config.aot_inductor.debug_symbols
or os.environ.get("TORCHINDUCTOR_DEBUG_COMPILE", "0") == "1"
or os.environ.get("TORCHINDUCTOR_DEBUG_SYMBOL", "0") == "1"
)
if should_use_optimized_flags:
if _IS_WINDOWS:
cflags += ["O1" if min_optimize else "O2"]
else:
cflags += [
config.aot_inductor.compile_wrapper_opt_level if min_optimize else "O3",
"DNDEBUG",
]
else:
if _IS_WINDOWS:
cflags += ["Od", "Ob0", "Oy-"]
else:
cflags += ["O0"]
if should_add_debug_symbol_flags:
debug_cflags, debug_ldflags = _get_inductor_debug_symbol_cflags()
cflags += debug_cflags
ldflags += debug_ldflags
cflags += _get_ffast_math_flags()
if _IS_WINDOWS:
pass
else:
if sys.platform != "darwin":
# on macos, unknown argument: '-fno-tree-loop-vectorize'
if _is_gcc(cpp_compiler):
cflags.append("fno-tree-loop-vectorize")
# https://stackoverflow.com/questions/65966969/why-does-march-native-not-work-on-apple-m1
# `-march=native` is unrecognized option on M1
if not config.is_fbcode():
if platform.machine() == "ppc64le":
cflags.append("mcpu=native")
elif platform.machine() == "riscv64":
cflags.append("march=rv64gc")
elif platform.machine() == "riscv32":
cflags.append("march=rv32gc")
else:
cflags.append("march=native")
if config.aot_inductor.enable_lto and _is_clang(cpp_compiler):
cflags.append("flto=thin")
return cflags, ldflags
def _get_shared_cflags(do_link: bool) -> list[str]:
if _IS_WINDOWS:
"""
MSVC `/MD` using python `ucrtbase.dll` lib as runtime.
https://learn.microsoft.com/en-us/cpp/c-runtime-library/crt-library-features?view=msvc-170
"""
return ["DLL", "MD"]
if platform.system() == "Darwin" and "clang" in get_cpp_compiler():
# This causes undefined symbols to behave the same as linux
return ["shared", "fPIC", "undefined dynamic_lookup"]
flags = []
if do_link:
flags.append("shared")
flags.append("fPIC")
return flags
def get_cpp_options(
cpp_compiler: str,
do_link: bool,
warning_all: bool = True,
extra_flags: Sequence[str] = (),
min_optimize: bool = False,
) -> tuple[list[str], list[str], list[str], list[str], list[str], list[str], list[str]]:
definitions: list[str] = []
include_dirs: list[str] = []
cflags: list[str] = []
ldflags: list[str] = []
libraries_dirs: list[str] = []
libraries: list[str] = []
passthrough_args: list[str] = []
opt_cflags, opt_ldflags = _get_optimization_cflags(cpp_compiler, min_optimize)
cflags = (
opt_cflags
+ _get_shared_cflags(do_link)
+ _get_warning_all_cflag(warning_all)
+ _get_cpp_std_cflag()
+ _get_os_related_cpp_cflags(cpp_compiler)
)
definitions += _get_os_related_cpp_definitions(cpp_compiler)
if not _IS_WINDOWS and config.aot_inductor.enable_lto and _is_clang(cpp_compiler):
ldflags.append("fuse-ld=lld")
ldflags.append("flto=thin")
passthrough_args.append(" ".join(extra_flags))
if config.aot_inductor.cross_target_platform == "windows":
passthrough_args.extend(["-static-libstdc++", "-static-libgcc"])
if check_mingw_win32_flavor(MINGW_GXX) == "posix":
passthrough_args.append("-Wl,-Bstatic -lwinpthread -Wl,-Bdynamic")
return (
definitions,
include_dirs,
cflags,
ldflags + opt_ldflags,
libraries_dirs,
libraries,
passthrough_args,
)
| BuildOptionsBase |
python | tiangolo__fastapi | tests/test_pydantic_v1_v2_multifile/modelsv2.py | {
"start": 65,
"end": 115
} | class ____(BaseModel):
new_sub_name: str
| SubItem |
python | astropy__astropy | astropy/constants/codata2018.py | {
"start": 354,
"end": 477
} | class ____(Constant):
default_reference = "CODATA 2018"
_registry = {}
_has_incompatible_units = set()
| CODATA2018 |
python | getsentry__sentry | src/sentry/integrations/gitlab/webhooks.py | {
"start": 5012,
"end": 7579
} | class ____(GitlabWebhook):
"""
Handle Merge Request Hook
See https://docs.gitlab.com/ee/user/project/integrations/webhooks.html#merge-request-events
"""
@property
def event_type(self) -> IntegrationWebhookEventType:
return IntegrationWebhookEventType.PULL_REQUEST
def __call__(self, event: Mapping[str, Any], **kwargs):
if not (
(organization := kwargs.get("organization"))
and (integration := kwargs.get("integration"))
):
raise ValueError("Organization and integration must be provided")
repo = self.get_repo(integration, organization, event)
if repo is None:
return
# while we're here, make sure repo data is up to date
self.update_repo_data(repo, event)
try:
number = event["object_attributes"]["iid"]
title = event["object_attributes"]["title"]
body = event["object_attributes"]["description"]
created_at = event["object_attributes"]["created_at"]
merge_commit_sha = event["object_attributes"]["merge_commit_sha"]
last_commit = event["object_attributes"]["last_commit"]
author_email = None
author_name = None
if last_commit:
author_email = last_commit["author"]["email"]
author_name = last_commit["author"]["name"]
except KeyError as e:
logger.info(
"gitlab.webhook.invalid-merge-data",
extra={"integration_id": integration.id, "error": str(e)},
)
logger.exception("Invalid merge data.")
# TODO(mgaeta): This try/catch is full of reportUnboundVariable errors.
return
if not author_email:
raise Http404()
author = CommitAuthor.objects.get_or_create(
organization_id=organization.id, email=author_email, defaults={"name": author_name}
)[0]
author.preload_users()
try:
PullRequest.objects.update_or_create(
organization_id=organization.id,
repository_id=repo.id,
key=number,
defaults={
"title": title,
"author": author,
"message": body,
"merge_commit_sha": merge_commit_sha,
"date_added": parse_date(created_at).astimezone(timezone.utc),
},
)
except IntegrityError:
pass
| MergeEventWebhook |
python | fastai__fastai | fastai/vision/learner.py | {
"start": 9055,
"end": 19974
} | class ____(nn.Module):
def __init__(self, model, pretrained:bool=True, cut=None, n_in:int=3):
super().__init__()
self.needs_pool = model.default_cfg.get('pool_size', None) is not None
self.model = model if cut is None else cut_model(model, cut)
def forward(self,x): return self.model.forward_features(x) if self.needs_pool else self.model(x)
# %% ../../nbs/21_vision.learner.ipynb 36
def create_timm_model(arch, n_out, cut=None, pretrained=True, n_in=3, init=nn.init.kaiming_normal_, custom_head=None,
concat_pool=True, pool=True, lin_ftrs=None, ps=0.5, first_bn=True, bn_final=False, lin_first=False, y_range=None, **kwargs):
"Create custom architecture using `arch`, `n_in` and `n_out` from the `timm` library"
model = timm.create_model(arch, pretrained=pretrained, num_classes=0, in_chans=n_in, **kwargs)
body = TimmBody(model, pretrained, None, n_in)
nf = body.model.num_features
res = add_head(body, nf, n_out, init=init, head=custom_head, concat_pool=concat_pool, pool=body.needs_pool,
lin_ftrs=lin_ftrs, ps=ps, first_bn=first_bn, bn_final=bn_final, lin_first=lin_first, y_range=y_range)
return res,model.default_cfg
# %% ../../nbs/21_vision.learner.ipynb 39
def _add_norm(dls, meta, pretrained, n_in=3):
if not pretrained: return
stats = meta.get('stats')
if stats is None: return
if n_in != len(stats[0]): return
if not dls.after_batch.fs.filter(risinstance(Normalize)):
dls.add_tfms([Normalize.from_stats(*stats)],'after_batch')
# %% ../../nbs/21_vision.learner.ipynb 41
def _timm_norm(dls, cfg, pretrained, n_in=3):
if not pretrained: return
if n_in != len(cfg['mean']): return
if not dls.after_batch.fs.filter(risinstance(Normalize)):
tfm = Normalize.from_stats(cfg['mean'],cfg['std'])
dls.add_tfms([tfm],'after_batch')
# %% ../../nbs/21_vision.learner.ipynb 42
@delegates(create_vision_model)
def vision_learner(dls, arch, normalize=True, n_out=None, pretrained=True, weights=None,
# learner args
loss_func=None, opt_func=Adam, lr=defaults.lr, splitter=None, cbs=None, metrics=None, path=None,
model_dir='models', wd=None, wd_bn_bias=False, train_bn=True, moms=(0.95,0.85,0.95),
# model & head args
cut=None, init=nn.init.kaiming_normal_, custom_head=None, concat_pool=True, pool=True,
lin_ftrs=None, ps=0.5, first_bn=True, bn_final=False, lin_first=False, y_range=None, **kwargs):
"Build a vision learner from `dls` and `arch`"
if n_out is None: n_out = get_c(dls)
assert n_out, "`n_out` is not defined, and could not be inferred from data, set `dls.c` or pass `n_out`"
meta = model_meta.get(arch, _default_meta)
model_args = dict(init=init, custom_head=custom_head, concat_pool=concat_pool, pool=pool, lin_ftrs=lin_ftrs, ps=ps,
first_bn=first_bn, bn_final=bn_final, lin_first=lin_first, y_range=y_range, **kwargs)
n_in = kwargs['n_in'] if 'n_in' in kwargs else 3
if isinstance(arch, str):
model,cfg = create_timm_model(arch, n_out, default_split, pretrained, **model_args)
if normalize: _timm_norm(dls, cfg, pretrained, n_in)
else:
if normalize: _add_norm(dls, meta, pretrained, n_in)
model = create_vision_model(arch, n_out, pretrained=pretrained, weights=weights, **model_args)
splitter = ifnone(splitter, meta['split'])
learn = Learner(dls=dls, model=model, loss_func=loss_func, opt_func=opt_func, lr=lr, splitter=splitter, cbs=cbs,
metrics=metrics, path=path, model_dir=model_dir, wd=wd, wd_bn_bias=wd_bn_bias, train_bn=train_bn, moms=moms)
if pretrained: learn.freeze()
# keep track of args for loggers
store_attr('arch,normalize,n_out,pretrained', self=learn, **kwargs)
return learn
# %% ../../nbs/21_vision.learner.ipynb 51
@delegates(models.unet.DynamicUnet.__init__)
def create_unet_model(arch, n_out, img_size, pretrained=True, weights=None, cut=None, n_in=3, **kwargs):
"Create custom unet architecture"
meta = model_meta.get(arch, _default_meta)
if parse(torchvision.__version__) >= parse('0.13') and 'weights' in meta:
if weights is not None and not pretrained:
warn(f'{pretrained=} but `weights` are set {weights=}. To randomly initialize set `pretrained=False` & `weights=None`')
model = arch(weights=meta['weights'] if (weights is None and pretrained) else weights)
else:
model = arch(pretrained=pretrained)
body = create_body(model, n_in, pretrained, ifnone(cut, meta['cut']))
model = models.unet.DynamicUnet(body, n_out, img_size, **kwargs)
return model
# %% ../../nbs/21_vision.learner.ipynb 54
@delegates(create_unet_model)
def unet_learner(dls, arch, normalize=True, n_out=None, pretrained=True, weights=None, config=None,
# learner args
loss_func=None, opt_func=Adam, lr=defaults.lr, splitter=None, cbs=None, metrics=None, path=None,
model_dir='models', wd=None, wd_bn_bias=False, train_bn=True, moms=(0.95,0.85,0.95), **kwargs):
"Build a unet learner from `dls` and `arch`"
if config:
warnings.warn('config param is deprecated. Pass your args directly to unet_learner.')
kwargs = {**config, **kwargs}
meta = model_meta.get(arch, _default_meta)
n_in = kwargs['n_in'] if 'n_in' in kwargs else 3
if normalize: _add_norm(dls, meta, pretrained, n_in)
n_out = ifnone(n_out, get_c(dls))
assert n_out, "`n_out` is not defined, and could not be inferred from data, set `dls.c` or pass `n_out`"
img_size = dls.one_batch()[0].shape[-2:]
assert img_size, "image size could not be inferred from data"
model = create_unet_model(arch, n_out, img_size, pretrained=pretrained, weights=weights, **kwargs)
splitter = ifnone(splitter, meta['split'])
learn = Learner(dls=dls, model=model, loss_func=loss_func, opt_func=opt_func, lr=lr, splitter=splitter, cbs=cbs,
metrics=metrics, path=path, model_dir=model_dir, wd=wd, wd_bn_bias=wd_bn_bias, train_bn=train_bn,
moms=moms)
if pretrained: learn.freeze()
# keep track of args for loggers
store_attr('arch,normalize,n_out,pretrained', self=learn, **kwargs)
return learn
# %% ../../nbs/21_vision.learner.ipynb 59
def create_cnn_model(*args, **kwargs):
"Deprecated name for `create_vision_model` -- do not use"
warn("`create_cnn_model` has been renamed to `create_vision_model` -- please update your code")
return create_vision_model(*args, **kwargs)
# %% ../../nbs/21_vision.learner.ipynb 60
def cnn_learner(*args, **kwargs):
"Deprecated name for `vision_learner` -- do not use"
warn("`cnn_learner` has been renamed to `vision_learner` -- please update your code")
return vision_learner(*args, **kwargs)
# %% ../../nbs/21_vision.learner.ipynb 62
@dispatch
def show_results(x:TensorImage, y, samples, outs, ctxs=None, max_n=10, nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, figsize=figsize)
ctxs = show_results[object](x, y, samples, outs, ctxs=ctxs, max_n=max_n, **kwargs)
return ctxs
# %% ../../nbs/21_vision.learner.ipynb 63
@dispatch
def show_results(x:TensorImage, y:TensorCategory, samples, outs, ctxs=None, max_n=10, nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, figsize=figsize)
for i in range(2):
ctxs = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs,range(max_n))]
ctxs = [r.show(ctx=c, color='green' if b==r else 'red', **kwargs)
for b,r,c,_ in zip(samples.itemgot(1),outs.itemgot(0),ctxs,range(max_n))]
return ctxs
# %% ../../nbs/21_vision.learner.ipynb 64
@dispatch
def show_results(x:TensorImage, y:TensorMask|TensorPoint|TensorBBox, samples, outs, ctxs=None, max_n=6,
nrows=None, ncols=1, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, figsize=figsize, double=True,
title='Target/Prediction')
for i in range(2):
ctxs[::2] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs[::2],range(2*max_n))]
for o in [samples,outs]:
ctxs[1::2] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(o.itemgot(0),ctxs[1::2],range(2*max_n))]
return ctxs
# %% ../../nbs/21_vision.learner.ipynb 65
@dispatch
def show_results(x:TensorImage, y:TensorImage, samples, outs, ctxs=None, max_n=10, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(3*min(len(samples), max_n), ncols=3, figsize=figsize, title='Input/Target/Prediction')
for i in range(2):
ctxs[i::3] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs[i::3],range(max_n))]
ctxs[2::3] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(outs.itemgot(0),ctxs[2::3],range(max_n))]
return ctxs
# %% ../../nbs/21_vision.learner.ipynb 66
@dispatch
def plot_top_losses(x: TensorImage, y:TensorCategory, samples, outs, raws, losses, nrows=None, ncols=None, figsize=None, **kwargs):
axs = get_grid(len(samples), nrows=nrows, ncols=ncols, figsize=figsize, title='Prediction/Actual/Loss/Probability')
for ax,s,o,r,l in zip(axs, samples, outs, raws, losses):
s[0].show(ctx=ax, **kwargs)
ax.set_title(f'{o[0]}/{s[1]} / {l.item():.2f} / {r.max().item():.2f}')
# %% ../../nbs/21_vision.learner.ipynb 67
@dispatch
def plot_top_losses(x: TensorImage, y:TensorMultiCategory, samples, outs, raws, losses, nrows=None, ncols=None, figsize=None, **kwargs):
axs = get_grid(len(samples), nrows=nrows, ncols=ncols, figsize=figsize)
for i,(ax,s) in enumerate(zip(axs, samples)): s[0].show(ctx=ax, title=f'Image {i}', **kwargs)
rows = get_empty_df(len(samples))
outs = L(s[1:] + o + (TitledStr(r), TitledFloat(l.item())) for s,o,r,l in zip(samples, outs, raws, losses))
for i,l in enumerate(["target", "predicted", "probabilities", "loss"]):
rows = [b.show(ctx=r, label=l, **kwargs) for b,r in zip(outs.itemgot(i),rows)]
display_df(pd.DataFrame(rows))
# %% ../../nbs/21_vision.learner.ipynb 68
@dispatch
def plot_top_losses(x:TensorImage, y:TensorMask, samples, outs, raws, losses, nrows=None, ncols=None, figsize=None, **kwargs):
axes = get_grid(len(samples)*3, nrows=len(samples), ncols=3, figsize=figsize, flatten=False, title="Input | Target | Prediction")
if axes.ndim == 1: axes = (axes,)
titles = ["input", "target", "pred"]
for axs,s,o,l in zip(axes, samples, outs, losses):
imgs = (s[0], s[1], o[0])
for ax,im,title in zip(axs, imgs, titles):
if title=="pred": title += f"; loss = {l.item():.4f}"
im.show(ctx=ax, **kwargs)
ax.set_title(title)
| TimmBody |
python | mlflow__mlflow | mlflow/sklearn/__init__.py | {
"start": 21548,
"end": 25859
} | class ____(pickle.PicklingError):
"""
Exception for describing error raised during pickling custom sklearn estimator
"""
def __init__(self, sk_model, original_exception):
"""
Args:
sk_model: The custom sklearn model to be pickled
original_exception: The original exception raised
"""
super().__init__(
f"Pickling custom sklearn model {sk_model.__class__.__name__} failed "
f"when saving model: {original_exception}"
)
self.original_exception = original_exception
def _dump_model(pickle_lib, sk_model, out):
try:
# Using python's default protocol to optimize compatibility.
# Otherwise cloudpickle uses latest protocol leading to incompatibilities.
# See https://github.com/mlflow/mlflow/issues/5419
pickle_lib.dump(sk_model, out, protocol=pickle.DEFAULT_PROTOCOL)
except (pickle.PicklingError, TypeError, AttributeError) as e:
if sk_model.__class__ not in _gen_estimators_to_patch():
raise _SklearnCustomModelPicklingError(sk_model, e)
else:
raise
def _save_model(sk_model, output_path, serialization_format):
"""
Args:
sk_model: The scikit-learn model to serialize.
output_path: The file path to which to write the serialized model.
serialization_format: The format in which to serialize the model. This should be one of
the following: ``mlflow.sklearn.SERIALIZATION_FORMAT_PICKLE`` or
``mlflow.sklearn.SERIALIZATION_FORMAT_CLOUDPICKLE``.
"""
with open(output_path, "wb") as out:
if serialization_format == SERIALIZATION_FORMAT_PICKLE:
_dump_model(pickle, sk_model, out)
elif serialization_format == SERIALIZATION_FORMAT_CLOUDPICKLE:
import cloudpickle
_dump_model(cloudpickle, sk_model, out)
else:
raise MlflowException(
message=f"Unrecognized serialization format: {serialization_format}",
error_code=INTERNAL_ERROR,
)
def load_model(model_uri, dst_path=None):
"""
Load a scikit-learn model from a local file or a run.
Args:
model_uri: The location, in URI format, of the MLflow model, for example:
- ``/Users/me/path/to/local/model``
- ``relative/path/to/local/model``
- ``s3://my_bucket/path/to/model``
- ``runs:/<mlflow_run_id>/run-relative/path/to/model``
- ``models:/<model_name>/<model_version>``
- ``models:/<model_name>/<stage>``
For more information about supported URI schemes, see
`Referencing Artifacts <https://www.mlflow.org/docs/latest/concepts.html#
artifact-locations>`_.
dst_path: The local filesystem path to which to download the model artifact.
This directory must already exist. If unspecified, a local output
path will be created.
Returns:
A scikit-learn model.
.. code-block:: python
:caption: Example
import mlflow.sklearn
sk_model = mlflow.sklearn.load_model("runs:/96771d893a5e46159d9f3b49bf9013e2/sk_models")
# use Pandas DataFrame to make predictions
pandas_df = ...
predictions = sk_model.predict(pandas_df)
"""
local_model_path = _download_artifact_from_uri(artifact_uri=model_uri, output_path=dst_path)
flavor_conf = _get_flavor_configuration(model_path=local_model_path, flavor_name=FLAVOR_NAME)
_add_code_from_conf_to_system_path(local_model_path, flavor_conf)
sklearn_model_artifacts_path = os.path.join(local_model_path, flavor_conf["pickled_model"])
serialization_format = flavor_conf.get("serialization_format", SERIALIZATION_FORMAT_PICKLE)
return _load_model_from_local_file(
path=sklearn_model_artifacts_path, serialization_format=serialization_format
)
# The `_apis_autologging_disabled` contains APIs which is incompatible with autologging,
# when user call these APIs, autolog is temporarily disabled.
_apis_autologging_disabled = [
"cross_validate",
"cross_val_predict",
"cross_val_score",
"learning_curve",
"permutation_test_score",
"validation_curve",
]
| _SklearnCustomModelPicklingError |
python | bokeh__bokeh | src/bokeh/models/ui/icons.py | {
"start": 3545,
"end": 3866
} | class ____(Icon):
""" SVG icons with inline definitions. """
# explicit __init__ to support Init signatures
def __init__(self, svg: Init[str] = Intrinsic, **kwargs: Any) -> None:
super().__init__(svg=svg, **kwargs)
svg = Required(String, help="""
The SVG definition of an icon.
""")
| SVGIcon |
python | readthedocs__readthedocs.org | readthedocs/projects/migrations/0008_add_subproject_alias_prefix.py | {
"start": 100,
"end": 485
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("projects", "0007_migrate_canonical_data"),
]
operations = [
migrations.AddField(
model_name="projectrelationship",
name="alias",
field=models.CharField(max_length=255, null=True, verbose_name="Alias", blank=True),
),
]
| Migration |
python | getsentry__sentry | src/sentry/issues/endpoints/group_integration_details.py | {
"start": 2516,
"end": 17393
} | class ____(GroupEndpoint):
owner = ApiOwner.ECOSYSTEM
publish_status = {
"GET": ApiPublishStatus.UNKNOWN,
"POST": ApiPublishStatus.UNKNOWN,
"PUT": ApiPublishStatus.UNKNOWN,
"DELETE": ApiPublishStatus.UNKNOWN,
}
def get(self, request: Request, group, integration_id) -> Response:
"""
Retrieves the config needed to either link or create an external issue for a group.
"""
if not request.user.is_authenticated:
return Response(status=400)
elif not self._has_issue_feature(group.organization, request.user):
return Response({"detail": MISSING_FEATURE_MESSAGE}, status=400)
# Keep link/create separate since create will likely require
# many external API calls that aren't necessary if the user is
# just linking
action = request.GET.get("action")
if action not in {"link", "create"}:
return Response({"detail": "Action is required and should be either link or create"})
organization_id = group.project.organization_id
result = integration_service.organization_context(
organization_id=organization_id, integration_id=integration_id
)
integration = result.integration
org_integration = result.organization_integration
if not integration or not org_integration:
return Response(status=404)
if not self._has_issue_feature_on_integration(integration):
return Response(
{"detail": "This feature is not supported for this integration."}, status=400
)
installation = self._get_installation(integration, organization_id)
try:
if action == "link":
config = installation.get_link_issue_config(group, params=request.GET)
elif action == "create":
config = installation.get_create_issue_config(
group, request.user, params=request.GET
)
else:
raise AssertionError("unreachable")
except IntegrationError as e:
return Response({"detail": str(e)}, status=400)
return Response(
serialize(
integration,
request.user,
IntegrationIssueConfigSerializer(group, action, config),
organization_id=organization_id,
)
)
def post(self, request: Request, group, integration_id) -> Response:
"""
Creates a new external issue and links it to a group.
"""
if not request.user.is_authenticated:
return Response(status=400)
elif not self._has_issue_feature(group.organization, request.user):
return Response({"detail": MISSING_FEATURE_MESSAGE}, status=400)
organization_id = group.project.organization_id
result = integration_service.organization_context(
organization_id=organization_id, integration_id=integration_id
)
integration = result.integration
org_integration = result.organization_integration
if not integration or not org_integration:
return Response(status=404)
if not self._has_issue_feature_on_integration(integration):
return Response(
{"detail": "This feature is not supported for this integration."}, status=400
)
installation = self._get_installation(integration, organization_id)
with ProjectManagementEvent(
action_type=ProjectManagementActionType.CREATE_EXTERNAL_ISSUE_VIA_ISSUE_DETAIL,
integration=integration,
).capture() as lifecycle:
lifecycle.add_extras(
{
"provider": integration.provider,
"integration_id": integration.id,
}
)
try:
data = installation.create_issue(request.data)
except IntegrationConfigurationError as exc:
lifecycle.record_halt(exc)
return Response({"non_field_errors": [str(exc)]}, status=400)
except IntegrationFormError as exc:
lifecycle.record_halt(exc)
return Response(exc.field_errors, status=400)
except IntegrationError as e:
lifecycle.record_failure(e)
return Response({"non_field_errors": [str(e)]}, status=400)
except IntegrationProviderError as exc:
lifecycle.record_halt(exc)
return Response(
{
"detail": f"Something went wrong while communicating with {integration.provider}"
},
status=503,
)
external_issue_key = installation.make_external_key(data)
external_issue, created = ExternalIssue.objects.get_or_create(
organization_id=organization_id,
integration_id=integration.id,
key=external_issue_key,
defaults={
"title": data.get("title"),
"description": data.get("description"),
"metadata": data.get("metadata"),
},
)
try:
with transaction.atomic(router.db_for_write(GroupLink)):
GroupLink.objects.create(
group_id=group.id,
project_id=group.project_id,
linked_type=GroupLink.LinkedType.issue,
linked_id=external_issue.id,
relationship=GroupLink.Relationship.references,
)
except IntegrityError:
return Response({"detail": "That issue is already linked"}, status=400)
if created:
integration_issue_created.send_robust(
integration=integration,
organization=group.project.organization,
user=request.user,
sender=self.__class__,
)
installation.store_issue_last_defaults(group.project, request.user, request.data)
self.create_issue_activity(request, group, installation, external_issue, new=True)
# TODO(jess): return serialized issue
url = data.get("url") or installation.get_issue_url(external_issue.key)
context = {
"id": external_issue.id,
"key": external_issue.key,
"url": url,
"integrationId": external_issue.integration_id,
"displayName": installation.get_issue_display_name(external_issue),
}
return Response(context, status=201)
def put(self, request: Request, group, integration_id) -> Response:
"""
Links an existing external issue to a group.
"""
if not request.user.is_authenticated:
return Response(status=400)
elif not self._has_issue_feature(group.organization, request.user):
return Response({"detail": MISSING_FEATURE_MESSAGE}, status=400)
external_issue_id = request.data.get("externalIssue")
if not external_issue_id:
return Response({"externalIssue": ["Issue ID is required"]}, status=400)
organization_id = group.project.organization_id
result = integration_service.organization_context(
organization_id=organization_id, integration_id=integration_id
)
integration = result.integration
org_integration = result.organization_integration
if not integration or not org_integration:
return Response(status=404)
with ProjectManagementEvent(
action_type=ProjectManagementActionType.LINK_EXTERNAL_ISSUE,
integration=integration,
).capture() as lifecycle:
if not self._has_issue_feature_on_integration(integration):
return Response(
{"detail": "This feature is not supported for this integration."}, status=400
)
installation = self._get_installation(integration, organization_id)
try:
data = installation.get_issue(external_issue_id, data=request.data)
except IntegrationFormError as exc:
lifecycle.record_halt(exc)
return Response(exc.field_errors, status=400)
except IntegrationError as e:
lifecycle.record_failure(e)
return Response({"non_field_errors": [str(e)]}, status=400)
defaults = {
"title": data.get("title"),
"description": data.get("description"),
"metadata": data.get("metadata"),
}
external_issue_key = installation.make_external_key(data)
external_issue, created = ExternalIssue.objects.get_or_create(
organization_id=organization_id,
integration_id=integration.id,
key=external_issue_key,
defaults=defaults,
)
if created:
integration_issue_linked.send_robust(
integration=integration,
organization=group.project.organization,
user=request.user,
sender=self.__class__,
)
else:
external_issue.update(**defaults)
installation.store_issue_last_defaults(group.project, request.user, request.data)
try:
installation.after_link_issue(external_issue, data=request.data)
except IntegrationFormError as exc:
lifecycle.record_halt(exc)
return Response(exc.field_errors, status=400)
except IntegrationError as e:
lifecycle.record_failure(e)
return Response({"non_field_errors": [str(e)]}, status=400)
try:
with transaction.atomic(router.db_for_write(GroupLink)):
GroupLink.objects.create(
group_id=group.id,
project_id=group.project_id,
linked_type=GroupLink.LinkedType.issue,
linked_id=external_issue.id,
relationship=GroupLink.Relationship.references,
)
except IntegrityError as exc:
lifecycle.record_halt(exc)
return Response({"non_field_errors": ["That issue is already linked"]}, status=400)
self.create_issue_activity(request, group, installation, external_issue, new=False)
# TODO(jess): would be helpful to return serialized external issue
# once we have description, title, etc
url = data.get("url") or installation.get_issue_url(external_issue.key)
context = {
"id": external_issue.id,
"key": external_issue.key,
"url": url,
"integrationId": external_issue.integration_id,
"displayName": installation.get_issue_display_name(external_issue),
}
return Response(context, status=201)
def delete(self, request: Request, group, integration_id) -> Response:
"""
Deletes a link between a group and an external issue.
"""
if not self._has_issue_feature(group.organization, request.user):
return Response({"detail": MISSING_FEATURE_MESSAGE}, status=400)
# note here externalIssue refers to `ExternalIssue.id` whereas above
# it refers to the id from the provider
external_issue_id = request.GET.get("externalIssue")
if not external_issue_id:
return Response({"detail": "External ID required"}, status=400)
organization_id = group.project.organization_id
result = integration_service.organization_context(
organization_id=organization_id, integration_id=integration_id
)
integration = result.integration
org_integration = result.organization_integration
if not integration or not org_integration:
return Response(status=404)
if not self._has_issue_feature_on_integration(integration):
return Response(
{"detail": "This feature is not supported for this integration."}, status=400
)
try:
external_issue = ExternalIssue.objects.get(
organization_id=organization_id, integration_id=integration.id, id=external_issue_id
)
except ExternalIssue.DoesNotExist:
return Response(status=404)
with transaction.atomic(router.db_for_write(GroupLink)):
GroupLink.objects.get_group_issues(group, external_issue_id).delete()
# check if other groups reference this external issue
# and delete if not
if not GroupLink.objects.filter(
linked_type=GroupLink.LinkedType.issue, linked_id=external_issue_id
).exists():
external_issue.delete()
return Response(status=204)
def _has_issue_feature(self, organization, user) -> bool:
has_issue_basic = features.has(
"organizations:integrations-issue-basic", organization, actor=user
)
has_issue_sync = features.has(
"organizations:integrations-issue-sync", organization, actor=user
)
return has_issue_sync or has_issue_basic
def _has_issue_feature_on_integration(self, integration: RpcIntegration) -> bool:
return integration.has_feature(
feature=IntegrationFeatures.ISSUE_BASIC
) or integration.has_feature(feature=IntegrationFeatures.ISSUE_SYNC)
def _get_installation(
self, integration: RpcIntegration, organization_id: int
) -> IssueBasicIntegration:
installation = integration.get_installation(organization_id=organization_id)
if not isinstance(installation, IssueBasicIntegration):
raise ValueError(installation)
return installation
def create_issue_activity(
self,
request: Request,
group: Group,
installation: IssueBasicIntegration,
external_issue: ExternalIssue,
new: bool,
):
issue_information = {
"title": external_issue.title,
"provider": installation.model.get_provider().name,
"location": installation.get_issue_url(external_issue.key),
"label": installation.get_issue_display_name(external_issue) or external_issue.key,
"new": new,
}
Activity.objects.create(
project=group.project,
group=group,
type=ActivityType.CREATE_ISSUE.value,
user_id=request.user.id,
data=issue_information,
)
| GroupIntegrationDetailsEndpoint |
python | django__django | tests/admin_views/test_actions.py | {
"start": 752,
"end": 19377
} | class ____(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="super@example.com"
)
cls.s1 = ExternalSubscriber.objects.create(
name="John Doe", email="john@example.org"
)
cls.s2 = Subscriber.objects.create(
name="Max Mustermann", email="max@example.org"
)
def setUp(self):
self.client.force_login(self.superuser)
def test_model_admin_custom_action(self):
"""A custom action defined in a ModelAdmin method."""
action_data = {
ACTION_CHECKBOX_NAME: [self.s1.pk],
"action": "mail_admin",
"index": 0,
}
self.client.post(
reverse("admin:admin_views_subscriber_changelist"), action_data
)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, "Greetings from a ModelAdmin action")
def test_model_admin_default_delete_action(self):
action_data = {
ACTION_CHECKBOX_NAME: [self.s1.pk, self.s2.pk],
"action": "delete_selected",
"index": 0,
}
delete_confirmation_data = {
ACTION_CHECKBOX_NAME: [self.s1.pk, self.s2.pk],
"action": "delete_selected",
"post": "yes",
}
confirmation = self.client.post(
reverse("admin:admin_views_subscriber_changelist"), action_data
)
self.assertIsInstance(confirmation, TemplateResponse)
self.assertContains(
confirmation, "Are you sure you want to delete the selected subscribers?"
)
self.assertContains(confirmation, "<h1>Delete multiple objects</h1>")
self.assertContains(confirmation, "<h2>Summary</h2>")
self.assertContains(confirmation, "<li>Subscribers: 2</li>")
self.assertContains(confirmation, "<li>External subscribers: 1</li>")
self.assertContains(confirmation, ACTION_CHECKBOX_NAME, count=2)
with CaptureQueriesContext(connection) as ctx:
self.client.post(
reverse("admin:admin_views_subscriber_changelist"),
delete_confirmation_data,
)
# Log entries are inserted in bulk.
self.assertEqual(
len(
[
q["sql"]
for q in ctx.captured_queries
if q["sql"].startswith("INSERT")
]
),
1,
)
self.assertEqual(Subscriber.objects.count(), 0)
def test_default_delete_action_nonexistent_pk(self):
self.assertFalse(Subscriber.objects.filter(id=9998).exists())
action_data = {
ACTION_CHECKBOX_NAME: ["9998"],
"action": "delete_selected",
"index": 0,
}
response = self.client.post(
reverse("admin:admin_views_subscriber_changelist"), action_data
)
self.assertContains(
response, "Are you sure you want to delete the selected subscribers?"
)
self.assertContains(response, "<ul></ul>", html=True)
@override_settings(USE_THOUSAND_SEPARATOR=True, NUMBER_GROUPING=3)
def test_non_localized_pk(self):
"""
If USE_THOUSAND_SEPARATOR is set, the ids for the objects selected for
deletion are rendered without separators.
"""
s = ExternalSubscriber.objects.create(id=9999)
action_data = {
ACTION_CHECKBOX_NAME: [s.pk, self.s2.pk],
"action": "delete_selected",
"index": 0,
}
response = self.client.post(
reverse("admin:admin_views_subscriber_changelist"), action_data
)
self.assertTemplateUsed(response, "admin/delete_selected_confirmation.html")
self.assertContains(response, 'value="9999"') # Instead of 9,999
self.assertContains(response, 'value="%s"' % self.s2.pk)
def test_model_admin_default_delete_action_protected(self):
"""
The default delete action where some related objects are protected
from deletion.
"""
q1 = Question.objects.create(question="Why?")
a1 = Answer.objects.create(question=q1, answer="Because.")
a2 = Answer.objects.create(question=q1, answer="Yes.")
q2 = Question.objects.create(question="Wherefore?")
action_data = {
ACTION_CHECKBOX_NAME: [q1.pk, q2.pk],
"action": "delete_selected",
"index": 0,
}
delete_confirmation_data = action_data.copy()
delete_confirmation_data["post"] = "yes"
response = self.client.post(
reverse("admin:admin_views_question_changelist"), action_data
)
self.assertContains(
response, "would require deleting the following protected related objects"
)
self.assertContains(
response,
'<li>Answer: <a href="%s">Because.</a></li>'
% reverse("admin:admin_views_answer_change", args=(a1.pk,)),
html=True,
)
self.assertContains(
response,
'<li>Answer: <a href="%s">Yes.</a></li>'
% reverse("admin:admin_views_answer_change", args=(a2.pk,)),
html=True,
)
# A POST request to delete protected objects displays the page which
# says the deletion is prohibited.
response = self.client.post(
reverse("admin:admin_views_question_changelist"), delete_confirmation_data
)
self.assertContains(
response, "would require deleting the following protected related objects"
)
self.assertEqual(Question.objects.count(), 2)
def test_model_admin_default_delete_action_no_change_url(self):
"""
The default delete action doesn't break if a ModelAdmin removes the
change_view URL (#20640).
"""
obj = UnchangeableObject.objects.create()
action_data = {
ACTION_CHECKBOX_NAME: obj.pk,
"action": "delete_selected",
"index": "0",
}
response = self.client.post(
reverse("admin:admin_views_unchangeableobject_changelist"), action_data
)
# No 500 caused by NoReverseMatch. The page doesn't display a link to
# the nonexistent change page.
self.assertContains(
response, "<li>Unchangeable object: %s</li>" % obj, 1, html=True
)
def test_delete_queryset_hook(self):
delete_confirmation_data = {
ACTION_CHECKBOX_NAME: [self.s1.pk, self.s2.pk],
"action": "delete_selected",
"post": "yes",
"index": 0,
}
SubscriberAdmin.overridden = False
self.client.post(
reverse("admin:admin_views_subscriber_changelist"), delete_confirmation_data
)
# SubscriberAdmin.delete_queryset() sets overridden to True.
self.assertIs(SubscriberAdmin.overridden, True)
self.assertEqual(Subscriber.objects.count(), 0)
def test_delete_selected_uses_get_deleted_objects(self):
"""The delete_selected action uses ModelAdmin.get_deleted_objects()."""
book = Book.objects.create(name="Test Book")
data = {
ACTION_CHECKBOX_NAME: [book.pk],
"action": "delete_selected",
"index": 0,
}
response = self.client.post(reverse("admin2:admin_views_book_changelist"), data)
# BookAdmin.get_deleted_objects() returns custom text.
self.assertContains(response, "a deletable object")
def test_custom_function_mail_action(self):
"""A custom action may be defined in a function."""
action_data = {
ACTION_CHECKBOX_NAME: [self.s1.pk],
"action": "external_mail",
"index": 0,
}
self.client.post(
reverse("admin:admin_views_externalsubscriber_changelist"), action_data
)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, "Greetings from a function action")
def test_custom_function_action_with_redirect(self):
"""Another custom action defined in a function."""
action_data = {
ACTION_CHECKBOX_NAME: [self.s1.pk],
"action": "redirect_to",
"index": 0,
}
response = self.client.post(
reverse("admin:admin_views_externalsubscriber_changelist"), action_data
)
self.assertEqual(response.status_code, 302)
def test_default_redirect(self):
"""
Actions which don't return an HttpResponse are redirected to the same
page, retaining the querystring (which may contain changelist info).
"""
action_data = {
ACTION_CHECKBOX_NAME: [self.s1.pk],
"action": "external_mail",
"index": 0,
}
url = reverse("admin:admin_views_externalsubscriber_changelist") + "?o=1"
response = self.client.post(url, action_data)
self.assertRedirects(response, url)
def test_custom_function_action_streaming_response(self):
"""A custom action may return a StreamingHttpResponse."""
action_data = {
ACTION_CHECKBOX_NAME: [self.s1.pk],
"action": "download",
"index": 0,
}
response = self.client.post(
reverse("admin:admin_views_externalsubscriber_changelist"), action_data
)
content = b"".join(list(response))
self.assertEqual(content, b"This is the content of the file")
self.assertEqual(response.status_code, 200)
def test_custom_function_action_no_perm_response(self):
"""A custom action may returns an HttpResponse with a 403 code."""
action_data = {
ACTION_CHECKBOX_NAME: [self.s1.pk],
"action": "no_perm",
"index": 0,
}
response = self.client.post(
reverse("admin:admin_views_externalsubscriber_changelist"), action_data
)
self.assertEqual(response.status_code, 403)
self.assertEqual(response.content, b"No permission to perform this action")
def test_actions_ordering(self):
"""Actions are ordered as expected."""
response = self.client.get(
reverse("admin:admin_views_externalsubscriber_changelist")
)
self.assertContains(
response,
"""<label>Action: <select name="action" required>
<option value="" selected>---------</option>
<option value="delete_selected">Delete selected external
subscribers</option>
<option value="redirect_to">Redirect to (Awesome action)</option>
<option value="external_mail">External mail (Another awesome
action)</option>
<option value="download">Download subscription</option>
<option value="no_perm">No permission to run</option>
</select>""",
html=True,
)
def test_model_without_action(self):
"""A ModelAdmin might not have any actions."""
response = self.client.get(
reverse("admin:admin_views_oldsubscriber_changelist")
)
self.assertIsNone(response.context["action_form"])
self.assertNotContains(
response,
'<input type="checkbox" class="action-select"',
msg_prefix="Found an unexpected action toggle checkboxbox in response",
)
self.assertNotContains(response, '<input type="checkbox" class="action-select"')
def test_model_without_action_still_has_jquery(self):
"""
A ModelAdmin without any actions still has jQuery included on the page.
"""
response = self.client.get(
reverse("admin:admin_views_oldsubscriber_changelist")
)
self.assertIsNone(response.context["action_form"])
self.assertContains(
response,
"jquery.min.js",
msg_prefix=(
"jQuery missing from admin pages for model with no admin actions"
),
)
def test_action_column_class(self):
"""The checkbox column class is present in the response."""
response = self.client.get(reverse("admin:admin_views_subscriber_changelist"))
self.assertIsNotNone(response.context["action_form"])
self.assertContains(response, "action-checkbox-column")
def test_multiple_actions_form(self):
"""
Actions come from the form whose submit button was pressed (#10618).
"""
action_data = {
ACTION_CHECKBOX_NAME: [self.s1.pk],
# Two different actions selected on the two forms...
"action": ["external_mail", "delete_selected"],
# ...but "go" was clicked on the top form.
"index": 0,
}
self.client.post(
reverse("admin:admin_views_externalsubscriber_changelist"), action_data
)
# The action sends mail rather than deletes.
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, "Greetings from a function action")
def test_media_from_actions_form(self):
"""
The action form's media is included in the changelist view's media.
"""
response = self.client.get(reverse("admin:admin_views_subscriber_changelist"))
media_path = MediaActionForm.Media.js[0]
self.assertIsInstance(response.context["action_form"], MediaActionForm)
self.assertIn("media", response.context)
self.assertIn(media_path, response.context["media"]._js)
self.assertContains(response, media_path)
def test_user_message_on_none_selected(self):
"""
User sees a warning when 'Go' is pressed and no items are selected.
"""
action_data = {
ACTION_CHECKBOX_NAME: [],
"action": "delete_selected",
"index": 0,
}
url = reverse("admin:admin_views_subscriber_changelist")
response = self.client.post(url, action_data)
self.assertRedirects(response, url, fetch_redirect_response=False)
response = self.client.get(response.url)
msg = (
"Items must be selected in order to perform actions on them. No items have "
"been changed."
)
self.assertContains(response, msg)
self.assertEqual(Subscriber.objects.count(), 2)
def test_user_message_on_no_action(self):
"""
User sees a warning when 'Go' is pressed and no action is selected.
"""
action_data = {
ACTION_CHECKBOX_NAME: [self.s1.pk, self.s2.pk],
"action": "",
"index": 0,
}
url = reverse("admin:admin_views_subscriber_changelist")
response = self.client.post(url, action_data)
self.assertRedirects(response, url, fetch_redirect_response=False)
response = self.client.get(response.url)
self.assertContains(response, "No action selected.")
self.assertEqual(Subscriber.objects.count(), 2)
def test_selection_counter(self):
"""The selection counter is there."""
response = self.client.get(reverse("admin:admin_views_subscriber_changelist"))
self.assertContains(response, "0 of 2 selected")
def test_popup_actions(self):
"""Actions aren't shown in popups."""
changelist_url = reverse("admin:admin_views_subscriber_changelist")
response = self.client.get(changelist_url)
self.assertIsNotNone(response.context["action_form"])
response = self.client.get(changelist_url + "?%s" % IS_POPUP_VAR)
self.assertIsNone(response.context["action_form"])
def test_popup_template_response_on_add(self):
"""
Success on popups shall be rendered from template in order to allow
easy customization.
"""
response = self.client.post(
reverse("admin:admin_views_actor_add") + "?%s=1" % IS_POPUP_VAR,
{"name": "Troy McClure", "age": "55", IS_POPUP_VAR: "1"},
)
self.assertEqual(response.status_code, 200)
self.assertEqual(
response.template_name,
[
"admin/admin_views/actor/popup_response.html",
"admin/admin_views/popup_response.html",
"admin/popup_response.html",
],
)
self.assertTemplateUsed(response, "admin/popup_response.html")
def test_popup_template_response_on_change(self):
instance = Actor.objects.create(name="David Tennant", age=45)
response = self.client.post(
reverse("admin:admin_views_actor_change", args=(instance.pk,))
+ "?%s=1" % IS_POPUP_VAR,
{"name": "David Tennant", "age": "46", IS_POPUP_VAR: "1"},
)
self.assertEqual(response.status_code, 200)
self.assertEqual(
response.template_name,
[
"admin/admin_views/actor/popup_response.html",
"admin/admin_views/popup_response.html",
"admin/popup_response.html",
],
)
self.assertTemplateUsed(response, "admin/popup_response.html")
def test_popup_template_response_on_delete(self):
instance = Actor.objects.create(name="David Tennant", age=45)
response = self.client.post(
reverse("admin:admin_views_actor_delete", args=(instance.pk,))
+ "?%s=1" % IS_POPUP_VAR,
{IS_POPUP_VAR: "1"},
)
self.assertEqual(response.status_code, 200)
self.assertEqual(
response.template_name,
[
"admin/admin_views/actor/popup_response.html",
"admin/admin_views/popup_response.html",
"admin/popup_response.html",
],
)
self.assertTemplateUsed(response, "admin/popup_response.html")
def test_popup_template_escaping(self):
popup_response_data = json.dumps(
{
"new_value": "new_value\\",
"obj": "obj\\",
"value": "value\\",
}
)
context = {
"popup_response_data": popup_response_data,
}
output = render_to_string("admin/popup_response.html", context)
self.assertIn(r""value\\"", output)
self.assertIn(r""new_value\\"", output)
self.assertIn(r""obj\\"", output)
@override_settings(ROOT_URLCONF="admin_views.urls")
| AdminActionsTest |
python | numpy__numpy | numpy/lib/tests/test_io.py | {
"start": 12918,
"end": 23577
} | class ____:
def test_array(self):
a = np.array([[1, 2], [3, 4]], float)
fmt = "%.18e"
c = BytesIO()
np.savetxt(c, a, fmt=fmt)
c.seek(0)
assert_equal(c.readlines(),
[asbytes((fmt + ' ' + fmt + '\n') % (1, 2)),
asbytes((fmt + ' ' + fmt + '\n') % (3, 4))])
a = np.array([[1, 2], [3, 4]], int)
c = BytesIO()
np.savetxt(c, a, fmt='%d')
c.seek(0)
assert_equal(c.readlines(), [b'1 2\n', b'3 4\n'])
def test_1D(self):
a = np.array([1, 2, 3, 4], int)
c = BytesIO()
np.savetxt(c, a, fmt='%d')
c.seek(0)
lines = c.readlines()
assert_equal(lines, [b'1\n', b'2\n', b'3\n', b'4\n'])
def test_0D_3D(self):
c = BytesIO()
assert_raises(ValueError, np.savetxt, c, np.array(1))
assert_raises(ValueError, np.savetxt, c, np.array([[[1], [2]]]))
def test_structured(self):
a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
c = BytesIO()
np.savetxt(c, a, fmt='%d')
c.seek(0)
assert_equal(c.readlines(), [b'1 2\n', b'3 4\n'])
def test_structured_padded(self):
# gh-13297
a = np.array([(1, 2, 3), (4, 5, 6)], dtype=[
('foo', 'i4'), ('bar', 'i4'), ('baz', 'i4')
])
c = BytesIO()
np.savetxt(c, a[['foo', 'baz']], fmt='%d')
c.seek(0)
assert_equal(c.readlines(), [b'1 3\n', b'4 6\n'])
def test_multifield_view(self):
a = np.ones(1, dtype=[('x', 'i4'), ('y', 'i4'), ('z', 'f4')])
v = a[['x', 'z']]
with temppath(suffix='.npy') as path:
path = Path(path)
np.save(path, v)
data = np.load(path)
assert_array_equal(data, v)
def test_delimiter(self):
a = np.array([[1., 2.], [3., 4.]])
c = BytesIO()
np.savetxt(c, a, delimiter=',', fmt='%d')
c.seek(0)
assert_equal(c.readlines(), [b'1,2\n', b'3,4\n'])
def test_format(self):
a = np.array([(1, 2), (3, 4)])
c = BytesIO()
# Sequence of formats
np.savetxt(c, a, fmt=['%02d', '%3.1f'])
c.seek(0)
assert_equal(c.readlines(), [b'01 2.0\n', b'03 4.0\n'])
# A single multiformat string
c = BytesIO()
np.savetxt(c, a, fmt='%02d : %3.1f')
c.seek(0)
lines = c.readlines()
assert_equal(lines, [b'01 : 2.0\n', b'03 : 4.0\n'])
# Specify delimiter, should be overridden
c = BytesIO()
np.savetxt(c, a, fmt='%02d : %3.1f', delimiter=',')
c.seek(0)
lines = c.readlines()
assert_equal(lines, [b'01 : 2.0\n', b'03 : 4.0\n'])
# Bad fmt, should raise a ValueError
c = BytesIO()
assert_raises(ValueError, np.savetxt, c, a, fmt=99)
def test_header_footer(self):
# Test the functionality of the header and footer keyword argument.
c = BytesIO()
a = np.array([(1, 2), (3, 4)], dtype=int)
test_header_footer = 'Test header / footer'
# Test the header keyword argument
np.savetxt(c, a, fmt='%1d', header=test_header_footer)
c.seek(0)
assert_equal(c.read(),
asbytes('# ' + test_header_footer + '\n1 2\n3 4\n'))
# Test the footer keyword argument
c = BytesIO()
np.savetxt(c, a, fmt='%1d', footer=test_header_footer)
c.seek(0)
assert_equal(c.read(),
asbytes('1 2\n3 4\n# ' + test_header_footer + '\n'))
# Test the commentstr keyword argument used on the header
c = BytesIO()
commentstr = '% '
np.savetxt(c, a, fmt='%1d',
header=test_header_footer, comments=commentstr)
c.seek(0)
assert_equal(c.read(),
asbytes(commentstr + test_header_footer + '\n' + '1 2\n3 4\n'))
# Test the commentstr keyword argument used on the footer
c = BytesIO()
commentstr = '% '
np.savetxt(c, a, fmt='%1d',
footer=test_header_footer, comments=commentstr)
c.seek(0)
assert_equal(c.read(),
asbytes('1 2\n3 4\n' + commentstr + test_header_footer + '\n'))
@pytest.mark.parametrize("filename_type", [Path, str])
def test_file_roundtrip(self, filename_type):
with temppath() as name:
a = np.array([(1, 2), (3, 4)])
np.savetxt(filename_type(name), a)
b = np.loadtxt(filename_type(name))
assert_array_equal(a, b)
def test_complex_arrays(self):
ncols = 2
nrows = 2
a = np.zeros((ncols, nrows), dtype=np.complex128)
re = np.pi
im = np.e
a[:] = re + 1.0j * im
# One format only
c = BytesIO()
np.savetxt(c, a, fmt=' %+.3e')
c.seek(0)
lines = c.readlines()
assert_equal(
lines,
[b' ( +3.142e+00+ +2.718e+00j) ( +3.142e+00+ +2.718e+00j)\n',
b' ( +3.142e+00+ +2.718e+00j) ( +3.142e+00+ +2.718e+00j)\n'])
# One format for each real and imaginary part
c = BytesIO()
np.savetxt(c, a, fmt=' %+.3e' * 2 * ncols)
c.seek(0)
lines = c.readlines()
assert_equal(
lines,
[b' +3.142e+00 +2.718e+00 +3.142e+00 +2.718e+00\n',
b' +3.142e+00 +2.718e+00 +3.142e+00 +2.718e+00\n'])
# One format for each complex number
c = BytesIO()
np.savetxt(c, a, fmt=['(%.3e%+.3ej)'] * ncols)
c.seek(0)
lines = c.readlines()
assert_equal(
lines,
[b'(3.142e+00+2.718e+00j) (3.142e+00+2.718e+00j)\n',
b'(3.142e+00+2.718e+00j) (3.142e+00+2.718e+00j)\n'])
def test_complex_negative_exponent(self):
# Previous to 1.15, some formats generated x+-yj, gh 7895
ncols = 2
nrows = 2
a = np.zeros((ncols, nrows), dtype=np.complex128)
re = np.pi
im = np.e
a[:] = re - 1.0j * im
c = BytesIO()
np.savetxt(c, a, fmt='%.3e')
c.seek(0)
lines = c.readlines()
assert_equal(
lines,
[b' (3.142e+00-2.718e+00j) (3.142e+00-2.718e+00j)\n',
b' (3.142e+00-2.718e+00j) (3.142e+00-2.718e+00j)\n'])
def test_custom_writer(self):
class CustomWriter(list):
def write(self, text):
self.extend(text.split(b'\n'))
w = CustomWriter()
a = np.array([(1, 2), (3, 4)])
np.savetxt(w, a)
b = np.loadtxt(w)
assert_array_equal(a, b)
def test_unicode(self):
utf8 = b'\xcf\x96'.decode('UTF-8')
a = np.array([utf8], dtype=np.str_)
with tempdir() as tmpdir:
# set encoding as on windows it may not be unicode even on py3
np.savetxt(os.path.join(tmpdir, 'test.csv'), a, fmt=['%s'],
encoding='UTF-8')
def test_unicode_roundtrip(self):
utf8 = b'\xcf\x96'.decode('UTF-8')
a = np.array([utf8], dtype=np.str_)
# our gz wrapper support encoding
suffixes = ['', '.gz']
if HAS_BZ2:
suffixes.append('.bz2')
if HAS_LZMA:
suffixes.extend(['.xz', '.lzma'])
with tempdir() as tmpdir:
for suffix in suffixes:
np.savetxt(os.path.join(tmpdir, 'test.csv' + suffix), a,
fmt=['%s'], encoding='UTF-16-LE')
b = np.loadtxt(os.path.join(tmpdir, 'test.csv' + suffix),
encoding='UTF-16-LE', dtype=np.str_)
assert_array_equal(a, b)
def test_unicode_bytestream(self):
utf8 = b'\xcf\x96'.decode('UTF-8')
a = np.array([utf8], dtype=np.str_)
s = BytesIO()
np.savetxt(s, a, fmt=['%s'], encoding='UTF-8')
s.seek(0)
assert_equal(s.read().decode('UTF-8'), utf8 + '\n')
def test_unicode_stringstream(self):
utf8 = b'\xcf\x96'.decode('UTF-8')
a = np.array([utf8], dtype=np.str_)
s = StringIO()
np.savetxt(s, a, fmt=['%s'], encoding='UTF-8')
s.seek(0)
assert_equal(s.read(), utf8 + '\n')
@pytest.mark.parametrize("iotype", [StringIO, BytesIO])
def test_unicode_and_bytes_fmt(self, iotype):
# string type of fmt should not matter, see also gh-4053
a = np.array([1.])
s = iotype()
np.savetxt(s, a, fmt="%f")
s.seek(0)
if iotype is StringIO:
assert_equal(s.read(), "%f\n" % 1.)
else:
assert_equal(s.read(), b"%f\n" % 1.)
@pytest.mark.skipif(sys.platform == 'win32', reason="files>4GB may not work")
@pytest.mark.slow
@requires_memory(free_bytes=7e9)
@pytest.mark.thread_unsafe(reason="crashes with low memory")
def test_large_zip(self):
def check_large_zip(memoryerror_raised):
memoryerror_raised.value = False
try:
# The test takes at least 6GB of memory, writes a file larger
# than 4GB. This tests the ``allowZip64`` kwarg to ``zipfile``
test_data = np.asarray([np.random.rand(
np.random.randint(50, 100), 4)
for i in range(800000)], dtype=object)
with tempdir() as tmpdir:
np.savez(os.path.join(tmpdir, 'test.npz'),
test_data=test_data)
except MemoryError:
memoryerror_raised.value = True
raise
# run in a subprocess to ensure memory is released on PyPy, see gh-15775
# Use an object in shared memory to re-raise the MemoryError exception
# in our process if needed, see gh-16889
memoryerror_raised = Value(c_bool)
# Since Python 3.8, the default start method for multiprocessing has
# been changed from 'fork' to 'spawn' on macOS, causing inconsistency
# on memory sharing model, leading to failed test for check_large_zip
ctx = get_context('fork')
p = ctx.Process(target=check_large_zip, args=(memoryerror_raised,))
p.start()
p.join()
if memoryerror_raised.value:
raise MemoryError("Child process raised a MemoryError exception")
# -9 indicates a SIGKILL, probably an OOM.
if p.exitcode == -9:
msg = "subprocess got a SIGKILL, apparently free memory was not sufficient"
pytest.xfail(msg)
assert p.exitcode == 0
| TestSaveTxt |
python | astropy__astropy | astropy/io/votable/exceptions.py | {
"start": 41249,
"end": 41717
} | class ____(VOWarning, ValueError):
"""
The ``type`` attribute on the ``VALUES`` element must be either
``legal`` or ``actual``.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:values>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:values>`__
"""
message_template = "type must be 'legal' or 'actual', but is '{}'"
default_args = ("x",)
| E08 |
python | ansible__ansible | lib/ansible/module_utils/_internal/_patches/_sys_intern_patch.py | {
"start": 252,
"end": 849
} | class ____(CallablePatch):
"""Patch `sys.intern` so that subclasses of `str` are accepted."""
target_container: t.ClassVar = sys
target_attribute = 'intern'
@classmethod
def is_patch_needed(cls) -> bool:
with contextlib.suppress(TypeError):
sys.intern(_CustomStr("x"))
return False
return True
def __call__(self, value: str):
if type(value) is not str and isinstance(value, str): # pylint: disable=unidiomatic-typecheck
value = str(value)
return type(self).unpatched_implementation(value)
| SysInternPatch |
python | milvus-io__pymilvus | pymilvus/orm/connections.py | {
"start": 1911,
"end": 20522
} | class ____(metaclass=SingleInstanceMetaClass):
"""Class for managing all connections of milvus. Used as a singleton in this module."""
def __init__(self) -> None:
"""Constructs a default milvus alias config
default config will be read from env: MILVUS_URI and MILVUS_CONN_ALIAS
with default value: default="localhost:19530"
Read default connection config from environment variable: MILVUS_URI.
Format is:
[scheme://][<user>@<password>]host[:<port>]
scheme is one of: http, https, or <empty>
Examples:
localhost
localhost:19530
test_user@localhost:19530
http://test_userlocalhost:19530
https://test_user:password@localhost:19530
"""
self._alias_config = {}
self._alias_handlers = {}
self._env_uri = None
if Config.MILVUS_URI != "":
address, parsed_uri = self.__parse_address_from_uri(Config.MILVUS_URI)
self._env_uri = (address, parsed_uri)
default_conn_config = {
"user": parsed_uri.username if parsed_uri.username is not None else "",
"address": address,
}
else:
default_conn_config = {
"user": "",
"address": f"{Config.DEFAULT_HOST}:{Config.DEFAULT_PORT}",
}
self.add_connection(**{Config.MILVUS_CONN_ALIAS: default_conn_config})
def __verify_host_port(self, host: str, port: Union[int, str]):
if not is_legal_host(host):
raise ConnectionConfigException(message=ExceptionsMessage.HostType)
if not is_legal_port(port):
raise ConnectionConfigException(message=ExceptionsMessage.PortType)
if not 0 <= int(port) < 65535:
msg = f"port number {port} out of range, valid range [0, 65535)"
raise ConnectionConfigException(message=msg)
def __parse_address_from_uri(self, uri: str) -> (str, parse.ParseResult):
illegal_uri_msg = (
"Illegal uri: [{}], expected form 'http[s]://[user:password@]example.com[:12345]'"
)
try:
parsed_uri = parse.urlparse(uri)
except Exception as e:
raise ConnectionConfigException(
message=f"{illegal_uri_msg.format(uri)}: <{type(e).__name__}, {e}>"
) from None
if len(parsed_uri.netloc) == 0:
raise ConnectionConfigException(message=f"{illegal_uri_msg.format(uri)}") from None
host = parsed_uri.hostname if parsed_uri.hostname is not None else Config.DEFAULT_HOST
default_port = "443" if parsed_uri.scheme == "https" else Config.DEFAULT_PORT
port = parsed_uri.port if parsed_uri.port is not None else default_port
addr = f"{host}:{port}"
self.__verify_host_port(host, port)
if not is_legal_address(addr):
raise ConnectionConfigException(message=illegal_uri_msg.format(uri))
return addr, parsed_uri
def add_connection(self, **kwargs):
"""Configures a milvus connection.
Addresses priority in kwargs: address, uri, host and port
:param kwargs:
* *address* (``str``) -- Optional. The actual address of Milvus instance.
Example address: "localhost:19530"
* *uri* (``str``) -- Optional. The uri of Milvus instance.
Example uri: "http://localhost:19530", "tcp:localhost:19530", "https://ok.s3.south.com:19530".
* *host* (``str``) -- Optional. The host of Milvus instance.
Default at "localhost", PyMilvus will fill in the default host
if only port is provided.
* *port* (``str/int``) -- Optional. The port of Milvus instance.
Default at 19530, PyMilvus will fill in the default port if only host is provided.
Example::
connections.add_connection(
default={"host": "localhost", "port": "19530"},
dev1={"host": "localhost", "port": "19531"},
dev2={"uri": "http://random.com/random"},
dev3={"uri": "http://localhost:19530"},
dev4={"uri": "tcp://localhost:19530"},
dev5={"address": "localhost:19530"},
prod={"uri": "http://random.random.random.com:19530"},
)
"""
for alias, config in kwargs.items():
addr, parsed_uri = self.__get_full_address(
config.get("address", ""),
config.get("uri", ""),
config.get("host", ""),
config.get("port", ""),
)
if alias in self._alias_handlers and self._alias_config[alias].get("address") != addr:
raise ConnectionConfigException(message=ExceptionsMessage.ConnDiffConf % alias)
alias_config = {
"address": addr,
"user": config.get("user", ""),
}
if parsed_uri is not None and parsed_uri.scheme == "https":
alias_config["secure"] = True
self._alias_config[alias] = alias_config
def __get_full_address(
self,
address: str = "",
uri: str = "",
host: str = "",
port: str = "",
) -> (str, parse.ParseResult):
if address != "":
if not is_legal_address(address):
raise ConnectionConfigException(
message=f"Illegal address: {address}, should be in form 'localhost:19530'"
)
return address, None
if uri != "":
if isinstance(uri, str) and uri.startswith("unix:"):
return uri, None
address, parsed = self.__parse_address_from_uri(uri)
return address, parsed
_host = host if host != "" else Config.DEFAULT_HOST
_port = port if port != "" else Config.DEFAULT_PORT
self.__verify_host_port(_host, _port)
addr = f"{_host}:{_port}"
if not is_legal_address(addr):
raise ConnectionConfigException(
message=f"Illegal host: {host} or port: {port}, should be in form of '111.1.1.1', '19530'"
)
return addr, None
def disconnect(self, alias: str):
"""Disconnects connection from the registry.
:param alias: The name of milvus connection
:type alias: str
"""
if not isinstance(alias, str):
raise ConnectionConfigException(message=ExceptionsMessage.AliasType % type(alias))
if alias in self._alias_handlers:
self._alias_handlers.pop(alias).close()
async def async_disconnect(self, alias: str):
if not isinstance(alias, str):
raise ConnectionConfigException(message=ExceptionsMessage.AliasType % type(alias))
if alias in self._alias_handlers:
await self._alias_handlers.pop(alias).close()
async def async_remove_connection(self, alias: str):
await self.async_disconnect(alias)
self._alias_config.pop(alias, None)
def remove_connection(self, alias: str):
"""Removes connection from the registry.
:param alias: The name of milvus connection
:type alias: str
"""
if not isinstance(alias, str):
raise ConnectionConfigException(message=ExceptionsMessage.AliasType % type(alias))
self.disconnect(alias)
self._alias_config.pop(alias, None)
def connect(
self,
alias: str = Config.MILVUS_CONN_ALIAS,
user: str = "",
password: str = "",
db_name: str = "default",
token: str = "",
_async: bool = False,
**kwargs,
) -> None:
"""Constructs a milvus connection and register it under given alias.
Args:
alias (str): Default to "default". The name of connection. Each alias corresponds to one
connection.
user (str, Optional): The user of milvus server.
password (str, Optional): The password of milvus server.
token (str, Optional): Serving as the key for authentication.
db_name (str): The database name of milvus server.
timeout (float, Optional) The timeout for the connection. Default is 10 seconds.
Unit: second
**kwargs:
* address (str, Optional) -- The actual address of Milvus instance.
Example: "localhost:19530"
* uri (str, Recommanded) -- The uri of Milvus instance.
Example uri: "http://localhost:19530", "tcp:localhost:19530", "https://ok.s3.south.com:19530".
* host (str, Optional) -- The host of Milvus instance. Default at "localhost",
PyMilvus will fill in the default host if only port is provided.
* port (str/int, Optional) -- The port of Milvus instance. Default at 19530,
PyMilvus will fill in the default port if only host is provided.
* keep_alive (bool, Optional) -- Default is false. If set to true,
client will keep an alive connection.
* secure (bool, Optional) -- Default is false. If set to true, tls will be enabled.
If use "https://" scheme in uri, secure will be true.
* client_key_path (str, Optional) -- Needed when use tls two-way authentication.
* client_pem_path (str, Optional) -- Needed when use tls two-way authentication.
* ca_pem_path (str, Optional) -- Needed when use tls two-way authentication.
* server_pem_path (str, Optional) -- Needed when use tls one-way authentication.
* server_name (str, Optional) -- Needed when enabled tls.
Example:
>>> from pymilvus import connections
>>> connections.connect("test", uri="http://localhost:19530", token="abcdefg")
Raises:
ConnectionConfigException: If connection parameters are illegal.
MilvusException: If anything goes wrong.
"""
if kwargs.get("uri") and parse.urlparse(kwargs["uri"]).scheme.lower() not in [
"unix",
"http",
"https",
"tcp",
"grpc",
]:
# start milvuslite
if not kwargs["uri"].endswith(".db"):
raise ConnectionConfigException(
message=f"uri: {kwargs['uri']} is illegal, needs start with [unix, http, https, tcp] or a local file endswith [.db]"
)
logger.info(f"Pass in the local path {kwargs['uri']}, and run it using milvus-lite")
parent_path = pathlib.Path(kwargs["uri"]).parent
if not parent_path.is_dir():
raise ConnectionConfigException(
message=f"Open local milvus failed, dir: {parent_path} not exists"
)
# ruff: noqa: PLC0415
try:
from milvus_lite.server_manager import (
server_manager_instance,
)
except ImportError as e:
raise ConnectionConfigException(
message="milvus-lite is required for local database connections. "
"Please install it with: pip install pymilvus[milvus_lite]"
) from e
local_uri = server_manager_instance.start_and_get_uri(kwargs["uri"])
if local_uri is None:
raise ConnectionConfigException(message="Open local milvus failed")
kwargs["uri"] = local_uri
# kwargs_copy is used for auto reconnect
kwargs_copy = copy.deepcopy(kwargs)
kwargs_copy["user"] = user
kwargs_copy["password"] = password
kwargs_copy["db_name"] = db_name
kwargs_copy["token"] = token
def connect_milvus(**kwargs):
gh = GrpcHandler(**kwargs) if not _async else AsyncGrpcHandler(**kwargs)
config_to_keep = {
k: v
for k, v in kwargs.items()
if k not in ["password", "token", "db_name", "keep_alive"]
}
self._alias_handlers[alias] = gh
self._alias_config[alias] = config_to_keep
t = kwargs.get("timeout")
timeout = t if isinstance(t, (int, float)) else Config.MILVUS_CONN_TIMEOUT
if not _async:
try:
gh._wait_for_channel_ready(timeout=timeout)
if kwargs.pop("keep_alive", False):
gh.register_reconnect_handler(ReconnectHandler(self, alias, kwargs_copy))
except Exception as e:
self.remove_connection(alias)
raise e from e
def with_config(config: Tuple) -> bool:
return any(c != "" for c in config)
if not isinstance(alias, str):
raise ConnectionConfigException(message=ExceptionsMessage.AliasType % type(alias))
config = (
kwargs.pop("address", ""),
kwargs.pop("uri", ""),
kwargs.pop("host", ""),
kwargs.pop("port", ""),
)
# Make sure passed in None doesnt break
user, password, token = str(user) or "", str(password) or "", str(token) or ""
# 1st Priority: connection from params
if with_config(config):
addr, parsed_uri = self.__get_full_address(*config)
kwargs["address"] = addr
if self.has_connection(alias) and self._alias_config[alias].get("address") != addr:
raise ConnectionConfigException(message=ExceptionsMessage.ConnDiffConf % alias)
if parsed_uri is not None:
# Extract user and password from uri
user = parsed_uri.username or user
password = parsed_uri.password or password
# Extract db_name from URI path only if appropriate
# Priority:
# 1. If db_name is explicitly provided and not empty -> use it
# 2. If db_name is empty string and URI has path -> use URI path
# 3. If db_name is empty string and URI has no path -> use "default"
if db_name == "":
group = [segment for segment in parsed_uri.path.split("/") if segment]
# Use first path segment if group exists and fall back to "default" if empty
db_name = group[0] if group else "default"
# If db_name is not empty (including "default", "test_db", etc.), keep it as-is
# Set secure=True if https scheme
if parsed_uri.scheme == "https":
kwargs["secure"] = True
connect_milvus(**kwargs, user=user, password=password, token=token, db_name=db_name)
return
# 2nd Priority, connection configs from env
if self._env_uri is not None:
addr, parsed_uri = self._env_uri
kwargs["address"] = addr
user = parsed_uri.username if parsed_uri.username is not None else ""
password = parsed_uri.password if parsed_uri.password is not None else ""
# Set secure=True if https scheme
if parsed_uri.scheme == "https":
kwargs["secure"] = True
connect_milvus(**kwargs, user=user, password=password, db_name=db_name)
return
# 3rd Priority, connect to cached configs with provided user and password
if alias in self._alias_config:
connect_alias = dict(self._alias_config[alias].items())
connect_alias["user"] = user
connect_milvus(**connect_alias, password=password, db_name=db_name, **kwargs)
return
# No params, env, and cached configs for the alias
raise ConnectionConfigException(message=ExceptionsMessage.ConnLackConf % alias)
def list_connections(self) -> list:
"""List names of all connections.
:return list:
Names of all connections.
:example:
>>> from pymilvus import connections
>>> connections.connect("test", host="localhost", port="19530")
>>> connections.list_connections()
"""
return [(k, self._alias_handlers.get(k, None)) for k in self._alias_config]
def get_connection_addr(self, alias: str):
"""
Retrieves connection configure by alias.
:param alias: The name of milvus connection
:type alias: str
:return dict:
The connection configure which of the name is alias.
If alias does not exist, return empty dict.
:example:
>>> from pymilvus import connections
>>> connections.connect("test", host="localhost", port="19530")
>>> connections.list_connections()
>>> connections.get_connection_addr('test')
{'host': 'localhost', 'port': '19530'}
"""
if not isinstance(alias, str):
raise ConnectionConfigException(message=ExceptionsMessage.AliasType % type(alias))
return self._alias_config.get(alias, {})
def has_connection(self, alias: str) -> bool:
"""Check if connection named alias exists.
:param alias: The name of milvus connection
:type alias: str
:return bool:
if the connection of name alias exists.
:example:
>>> from pymilvus import connections
>>> connections.connect("test", host="localhost", port="19530")
>>> connections.list_connections()
>>> connections.get_connection_addr('test')
{'host': 'localhost', 'port': '19530'}
"""
if not isinstance(alias, str):
raise ConnectionConfigException(message=ExceptionsMessage.AliasType % type(alias))
return alias in self._alias_handlers
def _fetch_handler(
self, alias: str = Config.MILVUS_CONN_ALIAS
) -> Union[GrpcHandler, AsyncGrpcHandler]:
"""Retrieves a GrpcHandler by alias."""
if not isinstance(alias, str):
raise ConnectionConfigException(message=ExceptionsMessage.AliasType % type(alias))
conn = self._alias_handlers.get(alias, None)
if conn is None:
raise ConnectionNotExistException(message=ExceptionsMessage.ConnectFirst)
return conn
# Singleton Mode in Python
connections = Connections()
| Connections |
python | numba__numba | numba/cuda/deviceufunc.py | {
"start": 20852,
"end": 24584
} | class ____(object):
def __init__(self, kernelmap, engine):
self.kernelmap = kernelmap
self.engine = engine
self.max_blocksize = 2 ** 30
def __call__(self, *args, **kws):
callsteps = self._call_steps(self.engine.nin, self.engine.nout,
args, kws)
indtypes, schedule, outdtypes, kernel = self._schedule(
callsteps.inputs, callsteps.outputs)
callsteps.adjust_input_types(indtypes)
outputs = callsteps.prepare_outputs(schedule, outdtypes)
inputs = callsteps.prepare_inputs()
parameters = self._broadcast(schedule, inputs, outputs)
callsteps.launch_kernel(kernel, schedule.loopn, parameters)
return callsteps.post_process_outputs(outputs)
def _schedule(self, inputs, outs):
input_shapes = [a.shape for a in inputs]
schedule = self.engine.schedule(input_shapes)
# find kernel
indtypes = tuple(i.dtype for i in inputs)
try:
outdtypes, kernel = self.kernelmap[indtypes]
except KeyError:
# No exact match, then use the first compatible.
# This does not match the numpy dispatching exactly.
# Later, we may just jit a new version for the missing signature.
indtypes = self._search_matching_signature(indtypes)
# Select kernel
outdtypes, kernel = self.kernelmap[indtypes]
# check output
for sched_shape, out in zip(schedule.output_shapes, outs):
if out is not None and sched_shape != out.shape:
raise ValueError('output shape mismatch')
return indtypes, schedule, outdtypes, kernel
def _search_matching_signature(self, idtypes):
"""
Given the input types in `idtypes`, return a compatible sequence of
types that is defined in `kernelmap`.
Note: Ordering is guaranteed by `kernelmap` being a OrderedDict
"""
for sig in self.kernelmap.keys():
if all(np.can_cast(actual, desired)
for actual, desired in zip(sig, idtypes)):
return sig
else:
raise TypeError("no matching signature")
def _broadcast(self, schedule, params, retvals):
assert schedule.loopn > 0, "zero looping dimension"
odim = 1 if not schedule.loopdims else schedule.loopn
newparams = []
for p, cs in zip(params, schedule.ishapes):
if not cs and p.size == 1:
# Broadcast scalar input
devary = self._broadcast_scalar_input(p, odim)
newparams.append(devary)
else:
# Broadcast vector input
newparams.append(self._broadcast_array(p, odim, cs))
newretvals = []
for retval, oshape in zip(retvals, schedule.oshapes):
newretvals.append(retval.reshape(odim, *oshape))
return tuple(newparams) + tuple(newretvals)
def _broadcast_array(self, ary, newdim, innerdim):
newshape = (newdim,) + innerdim
# No change in shape
if ary.shape == newshape:
return ary
# Creating new dimension
elif len(ary.shape) < len(newshape):
assert newshape[-len(ary.shape):] == ary.shape, \
"cannot add dim and reshape at the same time"
return self._broadcast_add_axis(ary, newshape)
# Collapsing dimension
else:
return ary.reshape(*newshape)
def _broadcast_add_axis(self, ary, newshape):
raise NotImplementedError("cannot add new axis")
def _broadcast_scalar_input(self, ary, shape):
raise NotImplementedError
| GeneralizedUFunc |
python | dagster-io__dagster | python_modules/dagster/dagster_tests/declarative_automation_tests/scenario_utils/automation_condition_scenario.py | {
"start": 1173,
"end": 1558
} | class ____(dg.AutomationCondition):
"""Always returns the empty subset."""
label: Optional[str] = None
@property
def description(self) -> str:
return ""
def evaluate(self, context: AutomationContext) -> dg.AutomationResult:
return dg.AutomationResult(context, true_subset=context.get_empty_subset())
@dataclass(frozen=True)
| FalseAutomationCondition |
python | jina-ai__jina | tests/integration/issues/github_2103/test_search_attributes.py | {
"start": 914,
"end": 1718
} | class ____(Executor):
@requests
def foo(self, docs, *args, **kwargs):
for doc in docs:
doc.tags['tag'] = 'test'
def test_no_matches_rest(query_dict):
port = helper.random_port()
with Flow(
protocol='http',
port=port,
).add(uses=MockExecutor):
# temporarily adding sleep
time.sleep(0.5)
query = json.dumps(query_dict).encode('utf-8')
req = request.Request(
f'http://localhost:{port}/search',
data=query,
headers={'content-type': 'application/json'},
)
resp = request.urlopen(req).read().decode('utf8')
doc = json.loads(resp)['data'][0]
assert len(Document.from_dict(doc).matches) == 0
assert Document.from_dict(doc).tags['tag'] == 'test'
| MockExecutor |
python | dagster-io__dagster | python_modules/automation/automation_tests/dagster_docs_tests/test_exclude_lists_audit.py | {
"start": 5375,
"end": 9340
} | class ____:
"""Test suite for _audit_exclude_missing_export function."""
def test_audit_exclude_missing_export_all_valid(self):
"""Test audit when all EXCLUDE_MISSING_EXPORT entries are still valid."""
# Mock validator with symbols that are not exported
mock_symbols = [
PublicSymbol(
module_path="test.module",
symbol_name="one",
symbol_type="function",
is_exported=False,
source_file="/path/to/test/module.py",
),
PublicSymbol(
module_path="test.module",
symbol_name="two",
symbol_type="class",
is_exported=False,
source_file="/path/to/test/module.py",
),
]
mock_validator = Mock()
mock_validator.find_public_symbols.return_value = mock_symbols
with patch(
"automation.dagster_docs.commands.check.EXCLUDE_MISSING_EXPORT",
{"test.module.one", "test.module.two"},
):
with patch("automation.dagster_docs.commands.check.click.echo") as mock_echo:
result = _audit_exclude_missing_export(mock_validator)
assert result == 0
mock_echo.assert_called_with(
"✓ All entries in EXCLUDE_MISSING_EXPORT are still valid (symbols still not exported at top-level)"
)
def test_audit_exclude_missing_export_some_exported(self):
"""Test audit when some EXCLUDE_MISSING_EXPORT entries are now exported."""
# Mock validator with some symbols that are exported
mock_symbols = [
PublicSymbol(
module_path="test.module",
symbol_name="one",
symbol_type="function",
is_exported=True,
source_file="/path/to/test/module.py",
),
PublicSymbol(
module_path="test.module",
symbol_name="two",
symbol_type="class",
is_exported=False,
source_file="/path/to/test/module.py",
),
PublicSymbol(
module_path="test.module",
symbol_name="three",
symbol_type="function",
is_exported=True,
source_file="/path/to/test/module.py",
),
]
mock_validator = Mock()
mock_validator.find_public_symbols.return_value = mock_symbols
with patch(
"automation.dagster_docs.commands.check.EXCLUDE_MISSING_EXPORT",
{"test.module.one", "test.module.two", "test.module.three"},
):
with patch("automation.dagster_docs.commands.check.click.echo") as mock_echo:
result = _audit_exclude_missing_export(mock_validator)
assert result == 1
# Should report the symbols that can be removed (the exported ones)
call_args = [str(call.args[0]) for call in mock_echo.call_args_list if call.args]
output_text = "\n".join(call_args)
assert "test.module.one" in output_text
assert "test.module.three" in output_text
assert "test.module.two" not in output_text # This one is not exported
def test_audit_exclude_missing_export_empty_exclude_list(self):
"""Test audit when EXCLUDE_MISSING_EXPORT is empty."""
mock_validator = Mock()
mock_validator.find_public_symbols.return_value = []
with patch("automation.dagster_docs.commands.check.EXCLUDE_MISSING_EXPORT", set()):
with patch("automation.dagster_docs.commands.check.click.echo") as mock_echo:
result = _audit_exclude_missing_export(mock_validator)
assert result == 0
mock_echo.assert_called_with(
"✓ All entries in EXCLUDE_MISSING_EXPORT are still valid (symbols still not exported at top-level)"
)
| TestAuditExcludeMissingExport |
python | walkccc__LeetCode | solutions/1547. Minimum Cost to Cut a Stick/1547.py | {
"start": 0,
"end": 298
} | class ____:
def minCost(self, n: int, cuts: list[int]) -> int:
A = sorted([0] + cuts + [n])
@functools.lru_cache(None)
def dp(i, j):
if j - i <= 1:
return 0
return min(A[j] - A[i] + dp(i, k) + dp(k, j) for k in range(i + 1, j))
return dp(0, len(A) - 1)
| Solution |
python | PyCQA__pylint | tests/functional/p/protected_access_access_different_scopes.py | {
"start": 60,
"end": 235
} | class ____:
async def method(self):
pass
def function():
assert self.attr # [undefined-variable]
def func():
self.attr += 2 # [undefined-variable]
| MyClass |
python | coleifer__peewee | peewee.py | {
"start": 216301,
"end": 216604
} | class ____(Metadata):
models = []
def __init__(self, model, *args, **kwargs):
super(SubclassAwareMetadata, self).__init__(model, *args, **kwargs)
self.models.append(model)
def map_models(self, fn):
for model in self.models:
fn(model)
| SubclassAwareMetadata |
python | doocs__leetcode | solution/2700-2799/2750.Ways to Split Array Into Good Subarrays/Solution.py | {
"start": 0,
"end": 334
} | class ____:
def numberOfGoodSubarraySplits(self, nums: List[int]) -> int:
mod = 10**9 + 7
ans, j = 1, -1
for i, x in enumerate(nums):
if x == 0:
continue
if j > -1:
ans = ans * (i - j) % mod
j = i
return 0 if j == -1 else ans
| Solution |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.