language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
django__django
|
tests/template_tests/filter_tests/test_dictsort.py
|
{
"start": 309,
"end": 4167
}
|
class ____(SimpleTestCase):
def test_property_resolver(self):
user = User()
dict_data = {
"a": {
"b1": {"c": "result1"},
"b2": user,
"b3": {"0": "result2"},
"b4": [0, 1, 2],
}
}
list_data = ["a", "b", "c"]
tests = [
("a.b1.c", dict_data, "result1"),
("a.b2.password", dict_data, "abc"),
("a.b2.test_property", dict_data, "cde"),
# The method should not get called.
("a.b2.test_method", dict_data, user.test_method),
("a.b3.0", dict_data, "result2"),
(0, list_data, "a"),
]
for arg, data, expected_value in tests:
with self.subTest(arg=arg):
self.assertEqual(_property_resolver(arg)(data), expected_value)
# Invalid lookups.
fail_tests = [
("a.b1.d", dict_data, AttributeError),
("a.b2.password.0", dict_data, AttributeError),
("a.b2._private", dict_data, AttributeError),
("a.b4.0", dict_data, AttributeError),
("a", list_data, AttributeError),
("0", list_data, TypeError),
(4, list_data, IndexError),
]
for arg, data, expected_exception in fail_tests:
with self.subTest(arg=arg):
with self.assertRaises(expected_exception):
_property_resolver(arg)(data)
def test_sort(self):
sorted_dicts = dictsort(
[
{"age": 23, "name": "Barbara-Ann"},
{"age": 63, "name": "Ra Ra Rasputin"},
{"name": "Jonny B Goode", "age": 18},
],
"age",
)
self.assertEqual(
[sorted(dict.items()) for dict in sorted_dicts],
[
[("age", 18), ("name", "Jonny B Goode")],
[("age", 23), ("name", "Barbara-Ann")],
[("age", 63), ("name", "Ra Ra Rasputin")],
],
)
def test_dictsort_complex_sorting_key(self):
"""
Since dictsort uses dict.get()/getattr() under the hood, it can sort
on keys like 'foo.bar'.
"""
data = [
{"foo": {"bar": 1, "baz": "c"}},
{"foo": {"bar": 2, "baz": "b"}},
{"foo": {"bar": 3, "baz": "a"}},
]
sorted_data = dictsort(data, "foo.baz")
self.assertEqual([d["foo"]["bar"] for d in sorted_data], [3, 2, 1])
def test_sort_list_of_tuples(self):
data = [("a", "42"), ("c", "string"), ("b", "foo")]
expected = [("a", "42"), ("b", "foo"), ("c", "string")]
self.assertEqual(dictsort(data, 0), expected)
def test_sort_list_of_tuple_like_dicts(self):
data = [
{"0": "a", "1": "42"},
{"0": "c", "1": "string"},
{"0": "b", "1": "foo"},
]
expected = [
{"0": "a", "1": "42"},
{"0": "b", "1": "foo"},
{"0": "c", "1": "string"},
]
self.assertEqual(dictsort(data, "0"), expected)
def test_invalid_values(self):
"""
If dictsort is passed something other than a list of dictionaries,
fail silently.
"""
self.assertEqual(dictsort([1, 2, 3], "age"), "")
self.assertEqual(dictsort("Hello!", "age"), "")
self.assertEqual(dictsort({"a": 1}, "age"), "")
self.assertEqual(dictsort(1, "age"), "")
def test_invalid_args(self):
"""Fail silently if invalid lookups are passed."""
self.assertEqual(dictsort([{}], "._private"), "")
self.assertEqual(dictsort([{"_private": "test"}], "_private"), "")
self.assertEqual(
dictsort([{"nested": {"_private": "test"}}], "nested._private"), ""
)
|
FunctionTests
|
python
|
getsentry__sentry
|
tests/sentry/issues/endpoints/test_group_details.py
|
{
"start": 11910,
"end": 29988
}
|
class ____(APITestCase):
def test_resolve(self) -> None:
self.login_as(user=self.user)
group = self.create_group()
url = f"/api/0/issues/{group.id}/"
response = self.client.put(url, data={"status": "resolved"}, format="json")
assert response.status_code == 200, response.content
group = Group.objects.get(id=group.id, project=group.project.id)
assert group.status == GroupStatus.RESOLVED
assert GroupSubscription.objects.filter(
user_id=self.user.id, group=group, is_active=True
).exists()
def test_resolved_in_next_release_non_semver(self) -> None:
self.login_as(user=self.user)
project = self.create_project_with_releases()
group = self.create_group_with_no_release(project)
# Two releases are created, the most recent one will be used for group resolution
Release.get_or_create(version="abcd", project=group.project)
most_recent_version = Release.get_or_create(version="def", project=group.project)
assert group.status == GroupStatus.UNRESOLVED
assert GroupResolution.objects.all().count() == 0
url = f"/api/0/issues/{group.id}/"
response = self.client.put(url, data={"status": "resolvedInNextRelease"})
assert response.status_code == 200, response.content
# Refetch from DB to ensure the latest state is fetched
group = Group.objects.get(id=group.id, project=group.project.id)
assert group.status == GroupStatus.RESOLVED
group_resolution = GroupResolution.objects.filter(group=group).first()
assert group_resolution is not None
assert group_resolution.group == group
assert group_resolution.type == GroupResolution.Type.in_next_release
assert group_resolution.status == GroupResolution.Status.pending
assert group_resolution.release.version == most_recent_version.version
def create_project_with_releases(self) -> Project:
project = self.create_project()
project.flags.has_releases = True
project.save()
return project
def create_group_with_no_release(self, project: Project) -> Group:
# Using store_event() instead of create_group() produces GroupRelease objects
# which is considered during the update_groups() call
event = self.store_event(data={}, project_id=project.id)
group = event.group
assert group is not None
return group
def resolved_in_next_release_helper(self, with_first_release: bool = True) -> None:
self.login_as(user=self.user)
project = self.create_project_with_releases()
releases = [
Release.get_or_create(version="com.foo.bar@1.0+0", project=project),
Release.get_or_create(version="com.foo.bar@2.0+0", project=project),
# We create this release last but it is not the greatest version, thus, it should not be picked up
Release.get_or_create(version="com.foo.bar@1.0+1", project=project),
]
first_release = releases[0]
greatest_version = releases[1]
data = {}
if with_first_release:
data["release"] = first_release.version
# Using store_event() instead of create_group() produces GroupRelease objects
# which is considered during the update_groups() call
event = self.store_event(data=data, project_id=project.id)
group = event.group
assert group is not None
assert group.status == GroupStatus.UNRESOLVED
assert group.substatus == GroupSubStatus.NEW
if with_first_release:
assert group.first_release == first_release
else:
assert group.first_release is None
assert GroupResolution.objects.all().count() == 0
url = f"/api/0/issues/{group.id}/"
data = {"status": "resolvedInNextRelease"}
response = self.client.put(url, data=data)
assert response.status_code == 200, response.content == {}
# Refetch from DB to ensure the latest state is fetched
group = Group.objects.get(id=group.id, project=project.id)
assert group.status == GroupStatus.RESOLVED
group_resolution = GroupResolution.objects.filter(group=group).first()
assert group_resolution is not None
assert group_resolution.group == group
# For semver projects, we consider resolution based on an expression rather than a specific release,
# thus, it is considered resolved in the release that has the highest semver
assert group_resolution.type == GroupResolution.Type.in_release
assert group_resolution.status == GroupResolution.Status.resolved
assert group_resolution.release.version == greatest_version.version
assert response.data["statusDetails"]["inRelease"] == greatest_version.version
new_release = Release.get_or_create(version="com.foo.bar@3.0+0", project=project)
# A lesser release than 2.x but created more recently than 3.x
old_version = Release.get_or_create(version="com.foo.bar@1.1+0", project=project)
# Let's test that none of these releases regress the group
for release in releases + [old_version]:
event = self.store_event(data={"release": release.version}, project_id=project.id)
assert event.group == group
# Refetch from DB to ensure the latest state is fetched
group = Group.objects.get(id=group.id, project=project.id)
assert group.status == GroupStatus.RESOLVED
# Let's test that the latest semver release regress the group
event = self.store_event(data={"release": new_release.version}, project_id=project.id)
group = Group.objects.get(id=group.id, project=project.id)
assert group.status == GroupStatus.UNRESOLVED
assert group.substatus == GroupSubStatus.REGRESSED
def test_resolved_in_next_release_semver_no_first_release(self) -> None:
self.resolved_in_next_release_helper(with_first_release=False)
def test_resolved_in_next_release_semver_and_first_release(self) -> None:
self.resolved_in_next_release_helper(with_first_release=True)
def test_resolved_in_next_release_no_release(self) -> None:
self.login_as(user=self.user)
project = self.create_project_with_releases()
group = self.create_group_with_no_release(project)
url = f"/api/0/organizations/{group.organization.slug}/issues/{group.id}/"
response = self.client.put(url, data={"status": "resolvedInNextRelease"})
assert response.status_code == 200, response.content
# Refetch from DB to ensure the latest state is fetched
group = Group.objects.get(id=group.id, project=group.project.id)
assert group.status == GroupStatus.RESOLVED
# no GroupResolution because there is no release
assert not GroupResolution.objects.filter(group=group).exists()
assert response.data["statusDetails"] == {}
def test_snooze_duration(self) -> None:
group = self.create_group(status=GroupStatus.RESOLVED)
self.login_as(user=self.user)
url = f"/api/0/issues/{group.id}/"
response = self.client.put(
url, data={"status": "ignored", "ignoreDuration": 30}, format="json"
)
assert response.status_code == 200
snooze = GroupSnooze.objects.get(group=group)
assert snooze.until is not None
assert snooze.until > timezone.now() + timedelta(minutes=29)
assert snooze.until < timezone.now() + timedelta(minutes=31)
assert response.data["statusDetails"]["ignoreUntil"] == snooze.until
group = Group.objects.get(id=group.id)
assert group.get_status() == GroupStatus.IGNORED
assert GroupSubscription.objects.filter(
user_id=self.user.id, group=group, is_active=True
).exists()
def test_bookmark(self) -> None:
self.login_as(user=self.user)
group = self.create_group()
url = f"/api/0/issues/{group.id}/"
response = self.client.put(url, data={"isBookmarked": "1"}, format="json")
assert response.status_code == 200, response.content
# ensure we've created the bookmark
assert GroupBookmark.objects.filter(group=group, user_id=self.user.id).exists()
assert GroupSubscription.objects.filter(
user_id=self.user.id, group=group, is_active=True
).exists()
def test_assign_username(self) -> None:
self.login_as(user=self.user)
group = self.create_group()
url = f"/api/0/issues/{group.id}/"
response = self.client.put(url, data={"assignedTo": self.user.username}, format="json")
assert response.status_code == 200, response.content
assert GroupAssignee.objects.filter(group=group, user_id=self.user.id).exists()
assert (
Activity.objects.filter(
group=group, user_id=self.user.id, type=ActivityType.ASSIGNED.value
).count()
== 1
)
response = self.client.put(url, format="json")
assert response.status_code == 200, response.content
assert GroupAssignee.objects.filter(group=group, user_id=self.user.id).exists()
assert GroupSubscription.objects.filter(
user_id=self.user.id, group=group, is_active=True
).exists()
response = self.client.put(url, data={"assignedTo": ""}, format="json")
assert response.status_code == 200, response.content
assert not GroupAssignee.objects.filter(group=group, user_id=self.user.id).exists()
def test_assign_id(self) -> None:
self.login_as(user=self.user)
group = self.create_group()
url = f"/api/0/issues/{group.id}/"
response = self.client.put(url, data={"assignedTo": self.user.id}, format="json")
assert response.status_code == 200, response.content
assert GroupAssignee.objects.filter(group=group, user_id=self.user.id).exists()
assert (
Activity.objects.filter(
group=group, user_id=self.user.id, type=ActivityType.ASSIGNED.value
).count()
== 1
)
response = self.client.put(url, format="json")
assert response.status_code == 200, response.content
assert GroupAssignee.objects.filter(group=group, user_id=self.user.id).exists()
assert GroupSubscription.objects.filter(
user_id=self.user.id, group=group, is_active=True
).exists()
response = self.client.put(url, data={"assignedTo": ""}, format="json")
assert response.status_code == 200, response.content
assert not GroupAssignee.objects.filter(group=group, user_id=self.user.id).exists()
def test_assign_id_via_api_key(self) -> None:
# XXX: This test is written to verify that using api keys works when
# hitting an endpoint that uses `client.{get,put,post}` to redirect to
# another endpoint. This catches a regression that happened when
# migrating to DRF 3.x.
with assume_test_silo_mode(SiloMode.CONTROL):
api_key = ApiKey.objects.create(
organization_id=self.organization.id, scope_list=["event:write"]
)
group = self.create_group()
url = f"/api/0/issues/{group.id}/"
response = self.client.put(
url,
data={"assignedTo": self.user.id},
format="json",
HTTP_AUTHORIZATION=self.create_basic_auth_header(api_key.key),
)
assert response.status_code == 200, response.content
assert GroupAssignee.objects.filter(group=group, user_id=self.user.id).exists()
def test_assign_team(self) -> None:
self.login_as(user=self.user)
group = self.create_group()
team = self.create_team(organization=group.project.organization, members=[self.user])
group.project.add_team(team)
url = f"/api/0/issues/{group.id}/"
response = self.client.put(url, data={"assignedTo": f"team:{team.id}"}, format="json")
assert response.status_code == 200, response.content
assert GroupAssignee.objects.filter(group=group, team=team).exists()
assert Activity.objects.filter(group=group, type=ActivityType.ASSIGNED.value).count() == 1
assert GroupSubscription.objects.filter(
user_id=self.user.id, group=group, is_active=True
).exists()
response = self.client.put(url, data={"assignedTo": ""}, format="json")
assert response.status_code == 200, response.content
assert Activity.objects.filter(group=group).count() == 2
assert not GroupAssignee.objects.filter(group=group, team=team).exists()
def test_assign_unavailable_team(self) -> None:
self.login_as(user=self.user)
group = self.create_group()
team = self.create_team(organization=group.project.organization, members=[self.user])
url = f"/api/0/issues/{group.id}/"
response = self.client.put(url, data={"assignedTo": f"team:{team.id}"}, format="json")
assert response.status_code == 400, response.content
def test_mark_seen(self) -> None:
self.login_as(user=self.user)
group = self.create_group()
url = f"/api/0/issues/{group.id}/"
response = self.client.put(url, data={"hasSeen": "1"}, format="json")
assert response.status_code == 200, response.content
assert GroupSeen.objects.filter(group=group, user_id=self.user.id).exists()
response = self.client.put(url, data={"hasSeen": "0"}, format="json")
assert response.status_code == 200, response.content
assert not GroupSeen.objects.filter(group=group, user_id=self.user.id).exists()
def test_mark_seen_as_non_member(self) -> None:
user = self.create_user("foo@example.com", is_superuser=True)
self.login_as(user=user, superuser=True)
group = self.create_group()
url = f"/api/0/issues/{group.id}/"
response = self.client.put(url, data={"hasSeen": "1"}, format="json")
assert response.status_code == 200, response.content
assert not GroupSeen.objects.filter(group=group, user_id=self.user.id).exists()
def test_seen_by_deleted_user(self) -> None:
group = self.create_group()
url = f"/api/0/issues/{group.id}/"
self.login_as(user=self.user)
# Create a stale GroupSeen referencing a user that no longer exists
GroupSeen.objects.create(group=group, user_id=424242, project_id=self.project.id)
response = self.client.get(url)
assert response.status_code == 200, response.content
# Assert empty set for single invalid GroupSeen
assert response.data["seenBy"] == []
has_seen_response = self.client.put(url, data={"hasSeen": "1"}, format="json")
assert has_seen_response.status_code == 200
response = self.client.get(url)
assert response.status_code == 200, response.content
# Assert only valid GroupSeens are serialized
last_seen_data = response.data["seenBy"]
assert len(last_seen_data) == 1
assert last_seen_data[0]["id"] == str(self.user.id)
def test_subscription(self) -> None:
self.login_as(user=self.user)
group = self.create_group()
url = f"/api/0/issues/{group.id}/"
resp = self.client.put(url, data={"isSubscribed": "true"})
assert resp.status_code == 200, resp.content
assert GroupSubscription.objects.filter(
user_id=self.user.id, group=group, is_active=True
).exists()
resp = self.client.put(url, data={"isSubscribed": "false"})
assert resp.status_code == 200, resp.content
assert GroupSubscription.objects.filter(
user_id=self.user.id, group=group, is_active=False
).exists()
def test_discard(self) -> None:
self.login_as(user=self.user)
group = self.create_group()
group_hash = GroupHash.objects.create(hash="x" * 32, project=group.project, group=group)
url = f"/api/0/issues/{group.id}/"
with self.tasks():
with self.feature("projects:discard-groups"):
resp = self.client.put(url, data={"discard": True})
assert resp.status_code == 204
assert not Group.objects.filter(id=group.id).exists()
assert GroupHash.objects.filter(id=group_hash.id).exists()
tombstone = GroupTombstone.objects.get(
id=GroupHash.objects.get(id=group_hash.id).group_tombstone_id
)
assert tombstone.message == group.message
assert tombstone.culprit == group.culprit
assert tombstone.project == group.project
assert tombstone.data == group.data
def test_discard_performance_issue(self) -> None:
self.login_as(user=self.user)
group = self.create_group(type=PerformanceSlowDBQueryGroupType.type_id)
GroupHash.objects.create(hash="x" * 32, project=group.project, group=group)
url = f"/api/0/issues/{group.id}/"
with self.tasks():
with self.feature("projects:discard-groups"):
response = self.client.put(url, data={"discard": True})
assert response.status_code == 400, response.content
# Ensure it's still there
assert Group.objects.filter(id=group.id).exists()
assert GroupHash.objects.filter(group_id=group.id).exists()
@override_settings(SENTRY_SELF_HOSTED=False)
def test_ratelimit(self) -> None:
self.login_as(user=self.user)
group = self.create_group()
url = f"/api/0/issues/{group.id}/"
with freeze_time("2000-01-01"):
for i in range(10):
self.client.put(url, sort_by="date", limit=1)
response = self.client.put(url, sort_by="date", limit=1)
assert response.status_code == 429
|
GroupUpdateTest
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
|
{
"start": 16512,
"end": 16667
}
|
class ____(sgqlc.types.Enum):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__choices__ = ("AAD",)
|
OIDCProviderType
|
python
|
pytorch__pytorch
|
test/torch_np/numpy_tests/core/test_numeric.py
|
{
"start": 94417,
"end": 97120
}
|
class ____(TestCase):
# expected shape indexed by (axis, start) for array of
# shape (1, 2, 3, 4)
tgtshape = {
(0, 0): (1, 2, 3, 4),
(0, 1): (1, 2, 3, 4),
(0, 2): (2, 1, 3, 4),
(0, 3): (2, 3, 1, 4),
(0, 4): (2, 3, 4, 1),
(1, 0): (2, 1, 3, 4),
(1, 1): (1, 2, 3, 4),
(1, 2): (1, 2, 3, 4),
(1, 3): (1, 3, 2, 4),
(1, 4): (1, 3, 4, 2),
(2, 0): (3, 1, 2, 4),
(2, 1): (1, 3, 2, 4),
(2, 2): (1, 2, 3, 4),
(2, 3): (1, 2, 3, 4),
(2, 4): (1, 2, 4, 3),
(3, 0): (4, 1, 2, 3),
(3, 1): (1, 4, 2, 3),
(3, 2): (1, 2, 4, 3),
(3, 3): (1, 2, 3, 4),
(3, 4): (1, 2, 3, 4),
}
def test_exceptions(self):
a = np.arange(1 * 2 * 3 * 4).reshape(1, 2, 3, 4)
assert_raises(np.AxisError, np.rollaxis, a, -5, 0)
assert_raises(np.AxisError, np.rollaxis, a, 0, -5)
assert_raises(np.AxisError, np.rollaxis, a, 4, 0)
assert_raises(np.AxisError, np.rollaxis, a, 0, 5)
@xfail # XXX: ndarray.attributes
def test_results(self):
a = np.arange(1 * 2 * 3 * 4).reshape(1, 2, 3, 4).copy()
aind = np.indices(a.shape)
assert_(a.flags["OWNDATA"])
for i, j in self.tgtshape:
# positive axis, positive start
res = np.rollaxis(a, axis=i, start=j)
i0, i1, i2, i3 = aind[np.array(res.shape) - 1]
assert_(np.all(res[i0, i1, i2, i3] == a))
assert_(res.shape == self.tgtshape[(i, j)], str((i, j)))
assert_(not res.flags["OWNDATA"])
# negative axis, positive start
ip = i + 1
res = np.rollaxis(a, axis=-ip, start=j)
i0, i1, i2, i3 = aind[np.array(res.shape) - 1]
assert_(np.all(res[i0, i1, i2, i3] == a))
assert_(res.shape == self.tgtshape[(4 - ip, j)])
assert_(not res.flags["OWNDATA"])
# positive axis, negative start
jp = j + 1 if j < 4 else j
res = np.rollaxis(a, axis=i, start=-jp)
i0, i1, i2, i3 = aind[np.array(res.shape) - 1]
assert_(np.all(res[i0, i1, i2, i3] == a))
assert_(res.shape == self.tgtshape[(i, 4 - jp)])
assert_(not res.flags["OWNDATA"])
# negative axis, negative start
ip = i + 1
jp = j + 1 if j < 4 else j
res = np.rollaxis(a, axis=-ip, start=-jp)
i0, i1, i2, i3 = aind[np.array(res.shape) - 1]
assert_(np.all(res[i0, i1, i2, i3] == a))
assert_(res.shape == self.tgtshape[(4 - ip, 4 - jp)])
assert_(not res.flags["OWNDATA"])
|
TestRollaxis
|
python
|
huggingface__transformers
|
src/transformers/models/glm4v_moe/modeling_glm4v_moe.py
|
{
"start": 4154,
"end": 14522
}
|
class ____(nn.Module):
inv_freq: torch.Tensor # fix linting for `register_buffer`
def __init__(self, config: Glm4vMoeTextConfig, device=None, layer_type=None):
super().__init__()
self.max_seq_len_cached = config.max_position_embeddings
self.original_max_seq_len = config.max_position_embeddings
self.config = config
self.rope_type = self.config.rope_parameters["rope_type"]
rope_init_fn: Callable = self.compute_default_rope_parameters
if self.rope_type != "default":
rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
inv_freq, self.attention_scaling = rope_init_fn(self.config, device)
self.register_buffer("inv_freq", inv_freq, persistent=False)
self.original_inv_freq = inv_freq
@staticmethod
def compute_default_rope_parameters(
config: Optional[Glm4vMoeTextConfig] = None,
device: Optional["torch.device"] = None,
seq_len: Optional[int] = None,
) -> tuple["torch.Tensor", float]:
"""
Computes the inverse frequencies according to the original RoPE implementation
Args:
config ([`~transformers.PreTrainedConfig`]):
The model configuration.
device (`torch.device`):
The device to use for initialization of the inverse frequencies.
seq_len (`int`, *optional*):
The current sequence length. Unused for this type of RoPE.
Returns:
Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).
"""
base = config.rope_parameters["rope_theta"]
partial_rotary_factor = config.rope_parameters.get("partial_rotary_factor", 1.0)
head_dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads
dim = int(head_dim * partial_rotary_factor)
attention_factor = 1.0 # Unused in this type of RoPE
# Compute the inverse frequencies
inv_freq = 1.0 / (
base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / dim)
)
return inv_freq, attention_factor
@torch.no_grad()
@dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
def forward(self, x, position_ids):
# In contrast to other models, GLM4V_MOE different position ids for the grids
# So we expand the inv_freq to shape (3, ...)
inv_freq_expanded = self.inv_freq[None, None, :, None].float().expand(3, position_ids.shape[1], -1, 1)
position_ids_expanded = position_ids[:, :, None, :].float() # shape (3, bs, 1, positions)
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
with torch.autocast(device_type=device_type, enabled=False): # Force float32
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(2, 3)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
"""
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
"""
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
if n_rep == 1:
return hidden_states
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: float,
dropout: float = 0.0,
**kwargs: Unpack[TransformersKwargs],
):
key_states = repeat_kv(key, module.num_key_value_groups)
value_states = repeat_kv(value, module.num_key_value_groups)
attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
if attention_mask is not None:
causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
attn_weights = attn_weights + causal_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value_states)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
def rotate_half(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2 :]
return torch.cat((-x2, x1), dim=-1)
def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
"""Applies Rotary Position Embedding to the query and key tensors.
Args:
q (`torch.Tensor`): The query tensor.
k (`torch.Tensor`): The key tensor.
cos (`torch.Tensor`): The cosine part of the rotary embedding.
sin (`torch.Tensor`): The sine part of the rotary embedding.
position_ids (`torch.Tensor`, *optional*):
Deprecated and unused.
unsqueeze_dim (`int`, *optional*, defaults to 1):
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
Returns:
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
"""
cos = cos.unsqueeze(unsqueeze_dim)
sin = sin.unsqueeze(unsqueeze_dim)
# Interleave them instead of usual shape
cos = cos[..., : cos.shape[-1] // 2].repeat_interleave(2, dim=-1)
sin = sin[..., : sin.shape[-1] // 2].repeat_interleave(2, dim=-1)
# Keep half or full tensor for later concatenation
rotary_dim = cos.shape[-1]
q_rot, q_pass = q[..., :rotary_dim], q[..., rotary_dim:]
k_rot, k_pass = k[..., :rotary_dim], k[..., rotary_dim:]
# Apply rotary embeddings on the first half or full tensor
q_embed = (q_rot * cos) + (rotate_half(q_rot) * sin)
k_embed = (k_rot * cos) + (rotate_half(k_rot) * sin)
# Concatenate back to full shape
q_embed = torch.cat([q_embed, q_pass], dim=-1)
k_embed = torch.cat([k_embed, k_pass], dim=-1)
return q_embed, k_embed
def apply_multimodal_rotary_pos_emb(q, k, cos, sin, mrope_section, unsqueeze_dim=1):
"""Applies Rotary Position Embedding with Multimodal Sections to the query and key tensors (https://qwenlm.github.io/blog/qwen2-vl/).
Explanation:
Multimodal 3D rotary position embedding is an extension to 1D rotary position embedding. The input embedding
sequence contains vision (images / videos) embedding and text embedding or just contains text embedding. For
vision embedding part, we apply rotary position embedding on temporal, height and width dimension separately.
Here we split the channel dimension to 3 chunks for the temporal, height and width rotary position embedding.
For text embedding part, we just apply 1D rotary position embedding. The three rotary position index (temporal,
height and width) of text embedding is always the same, so the text embedding rotary position embedding has no
difference with modern LLMs.
Args:
q (`torch.Tensor`): The query tensor.
k (`torch.Tensor`): The key tensor.
cos (`torch.Tensor`): The cosine part of the rotary embedding.
sin (`torch.Tensor`): The sine part of the rotary embedding.
mrope_section(`List(int)`):
Multimodal rope section is for channel dimension of temporal, height and width in rope calculation.
unsqueeze_dim (`int`, *optional*, defaults to 1):
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
Returns:
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
"""
mrope_section = mrope_section * 2
cos = torch.cat([m[i % 3] for i, m in enumerate(cos.split(mrope_section, dim=-1))], dim=-1).unsqueeze(
unsqueeze_dim
)
sin = torch.cat([m[i % 3] for i, m in enumerate(sin.split(mrope_section, dim=-1))], dim=-1).unsqueeze(
unsqueeze_dim
)
# Keep half or full tensor for later concatenation
rotary_dim = cos.shape[-1]
q_rot, q_pass = q[..., :rotary_dim], q[..., rotary_dim:]
k_rot, k_pass = k[..., :rotary_dim], k[..., rotary_dim:]
# Apply rotary embeddings on the first half or full tensor
q_embed = (q_rot * cos) + (rotate_half(q_rot) * sin)
k_embed = (k_rot * cos) + (rotate_half(k_rot) * sin)
# Concatenate back to full shape
q_embed = torch.cat([q_embed, q_pass], dim=-1)
k_embed = torch.cat([k_embed, k_pass], dim=-1)
return q_embed, k_embed
|
Glm4vMoeTextRotaryEmbedding
|
python
|
scipy__scipy
|
scipy/sparse/_dia.py
|
{
"start": 19308,
"end": 21466
}
|
class ____(_dia_base, sparray):
"""
Sparse array with DIAgonal storage.
This can be instantiated in several ways:
dia_array(D)
where D is a 2-D ndarray
dia_array(S)
with another sparse array or matrix S (equivalent to S.todia())
dia_array((M, N), [dtype])
to construct an empty array with shape (M, N),
dtype is optional, defaulting to dtype='d'.
dia_array((data, offsets), shape=(M, N))
where the ``data[k,:]`` stores the diagonal entries for
diagonal ``offsets[k]`` (See example below)
Attributes
----------
dtype : dtype
Data type of the array
shape : 2-tuple
Shape of the array
ndim : int
Number of dimensions (this is always 2)
nnz
size
data
DIA format data array of the array
offsets
DIA format offset array of the array
T
Notes
-----
Sparse arrays can be used in arithmetic operations: they support
addition, subtraction, multiplication, division, and matrix power.
Sparse arrays with DIAgonal storage do not support slicing.
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import dia_array
>>> dia_array((3, 4), dtype=np.int8).toarray()
array([[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]], dtype=int8)
>>> data = np.array([[1, 2, 3, 4]]).repeat(3, axis=0)
>>> offsets = np.array([0, -1, 2])
>>> dia_array((data, offsets), shape=(4, 4)).toarray()
array([[1, 0, 3, 0],
[1, 2, 0, 4],
[0, 2, 3, 0],
[0, 0, 3, 4]])
>>> from scipy.sparse import dia_array
>>> n = 10
>>> ex = np.ones(n)
>>> data = np.array([ex, 2 * ex, ex])
>>> offsets = np.array([-1, 0, 1])
>>> dia_array((data, offsets), shape=(n, n)).toarray()
array([[2., 1., 0., ..., 0., 0., 0.],
[1., 2., 1., ..., 0., 0., 0.],
[0., 1., 2., ..., 0., 0., 0.],
...,
[0., 0., 0., ..., 2., 1., 0.],
[0., 0., 0., ..., 1., 2., 1.],
[0., 0., 0., ..., 0., 1., 2.]])
"""
|
dia_array
|
python
|
walkccc__LeetCode
|
solutions/1325. Delete Leaves With a Given Value/1325.py
|
{
"start": 0,
"end": 440
}
|
class ____:
def removeLeafNodes(
self,
root: TreeNode | None,
target: int,
) -> TreeNode | None:
if not root:
return None
root.left = self.removeLeafNodes(root.left, target)
root.right = self.removeLeafNodes(root.right, target)
return None if self._isLeaf(root) and root.val == target else root
def _isLeaf(self, root: TreeNode | None) -> bool:
return not root.left and not root.right
|
Solution
|
python
|
pytorch__pytorch
|
test/torch_np/numpy_tests/linalg/test_linalg.py
|
{
"start": 77369,
"end": 78144
}
|
class ____(TestCase):
@parametrize(
"a, axes",
[
(np.ones((4, 6, 8, 2)), None),
(np.ones((3, 3, 2)), (0, 2)),
],
)
def test_non_square_handling(self, a, axes):
with assert_raises((LinAlgError, RuntimeError)):
b = np.ones(a.shape[:2])
linalg.tensorsolve(a, b, axes=axes)
@skipif(numpy.__version__ < "1.22", reason="NP_VER: fails on CI with numpy 1.21.2")
@parametrize(
"shape",
[(2, 3, 6), (3, 4, 4, 3), (0, 3, 3, 0)],
)
def test_tensorsolve_result(self, shape):
a = np.random.randn(*shape)
b = np.ones(a.shape[:2])
x = np.linalg.tensorsolve(a, b)
assert_allclose(np.tensordot(a, x, axes=len(x.shape)), b)
|
TestTensorsolve
|
python
|
crytic__slither
|
slither/detectors/variables/could_be_immutable.py
|
{
"start": 370,
"end": 2037
}
|
class ____(AbstractDetector):
"""
State variables that could be declared immutable.
# Immutable attribute available in Solidity 0.6.5 and above
# https://blog.soliditylang.org/2020/04/06/solidity-0.6.5-release-announcement/
"""
# VULNERABLE_SOLC_VERSIONS =
ARGUMENT = "immutable-states"
HELP = "State variables that could be declared immutable"
IMPACT = DetectorClassification.OPTIMIZATION
CONFIDENCE = DetectorClassification.HIGH
WIKI = "https://github.com/crytic/slither/wiki/Detector-Documentation#state-variables-that-could-be-declared-immutable"
WIKI_TITLE = "State variables that could be declared immutable"
WIKI_DESCRIPTION = "State variables that are not updated following deployment should be declared immutable to save gas."
WIKI_RECOMMENDATION = "Add the `immutable` attribute to state variables that never change or are set only in the constructor."
def _detect(self) -> List[Output]:
"""Detect state variables that could be immutable"""
results = {}
unchanged_state_variables = UnchangedStateVariables(self.compilation_unit)
unchanged_state_variables.detect()
for variable in unchanged_state_variables.immutable_candidates:
results[variable.canonical_name] = self.generate_result(
[variable, " should be immutable \n"]
)
# Order by canonical name for deterministic results
return [results[k] for k in sorted(results)]
@staticmethod
def _format(compilation_unit: SlitherCompilationUnit, result: Dict) -> None:
custom_format(compilation_unit, result, "immutable")
|
CouldBeImmutable
|
python
|
rapidsai__cudf
|
python/cudf/cudf/core/dtypes.py
|
{
"start": 18492,
"end": 26288
}
|
class ____(_BaseDtype):
"""
Type to represent a struct data.
Parameters
----------
fields : dict
A mapping of field names to dtypes, the dtypes can themselves
be of ``StructDtype`` too.
Attributes
----------
fields
itemsize
Methods
-------
from_arrow
to_arrow
Examples
--------
>>> import cudf
>>> struct_dtype = cudf.StructDtype({"a": "int64", "b": "string"})
>>> struct_dtype
StructDtype({'a': dtype('int64'), 'b': dtype('O')})
A nested ``StructDtype`` can also be constructed in the following way:
>>> nested_struct_dtype = cudf.StructDtype({"dict_data": struct_dtype, "c": "uint8"})
>>> nested_struct_dtype
StructDtype({'dict_data': StructDtype({'a': dtype('int64'), 'b': dtype('O')}), 'c': dtype('uint8')})
"""
name = "struct"
def __init__(self, fields: dict[str, Dtype]) -> None:
with cudf.option_context("mode.pandas_compatible", False):
# We need to temporarily disable pandas compatibility mode
# because `cudf.dtype("object")` raises an error.
self._fields = {k: cudf.dtype(v) for k, v in fields.items()}
@property
def fields(self) -> dict[str, DtypeObj]:
"""
Returns an ordered dict of column name and dtype key-value.
Examples
--------
>>> import cudf
>>> struct_dtype = cudf.StructDtype({"a": "int64", "b": "string"})
>>> struct_dtype
StructDtype({'a': dtype('int64'), 'b': dtype('O')})
>>> struct_dtype.fields
{'a': dtype('int64'), 'b': dtype('O')}
"""
return self._fields
@property
def type(self):
# TODO: we should change this to return something like a
# StructDtypeType, once we figure out what that should look like
return dict
@classmethod
def from_arrow(cls, typ: pa.StructType) -> Self:
"""
Convert a ``pyarrow.StructType`` to ``StructDtype``.
Examples
--------
>>> import cudf
>>> import pyarrow as pa
>>> pa_struct_type = pa.struct({'x': pa.int32(), 'y': pa.string()})
>>> pa_struct_type
StructType(struct<x: int32, y: string>)
>>> cudf.StructDtype.from_arrow(pa_struct_type)
StructDtype({'x': dtype('int32'), 'y': dtype('O')})
"""
return cls(
{
typ.field(i).name: cudf_dtype_from_pa_type(typ.field(i).type)
for i in range(typ.num_fields)
}
# Once pyarrow 18 is the min version, replace with this version
# {
# field.name: cudf_dtype_from_pa_type(field.type)
# for field in typ.fields
# }
)
def to_arrow(self) -> pa.StructType:
"""
Convert a ``StructDtype`` to a ``pyarrow.StructType``.
Examples
--------
>>> import cudf
>>> struct_type = cudf.StructDtype({"x": "int32", "y": "string"})
>>> struct_type
StructDtype({'x': dtype('int32'), 'y': dtype('O')})
>>> struct_type.to_arrow()
StructType(struct<x: int32, y: string>)
"""
return pa.struct(
# dict[str, DataType] should be compatible but pyarrow stubs are too strict
{ # type: ignore[arg-type]
k: cudf_dtype_to_pa_type(dtype)
for k, dtype in self.fields.items()
}
)
def __eq__(self, other) -> bool:
if isinstance(other, str):
return other == self.name
if not isinstance(other, StructDtype):
return False
return self.to_arrow().equals(other.to_arrow())
def __repr__(self) -> str:
return f"{type(self).__name__}({self.fields})"
def __hash__(self) -> int:
return hash(self.to_arrow())
def serialize(self) -> tuple[dict, list]:
header: dict[str, Any] = {}
frames: list[Buffer] = []
fields: dict[str, str | tuple[Any, tuple[int, int]]] = {}
for k, dtype in self.fields.items():
if isinstance(dtype, _BaseDtype):
dtype_header, dtype_frames = dtype.device_serialize()
fields[k] = (
dtype_header,
(len(frames), len(frames) + len(dtype_frames)),
)
frames.extend(dtype_frames)
else:
fields[k] = dtype.str
header["fields"] = fields
header["frame_count"] = len(frames)
return header, frames
@classmethod
def deserialize(cls, header: dict, frames: list) -> Self:
_check_type(cls, header, frames)
fields = {}
for k, dtype in header["fields"].items():
if isinstance(dtype, tuple):
dtype_header, (start, stop) = dtype
fields[k] = Serializable.device_deserialize(
dtype_header,
frames[start:stop],
)
else:
fields[k] = np.dtype(dtype)
return cls(fields)
@cached_property
def itemsize(self) -> int:
return sum(field.itemsize for field in self.fields.values())
def _recursively_replace_fields(self, result: dict) -> dict:
"""
Return a new dict result but with the keys replaced by the keys in self.fields.keys().
Intended when result comes from pylibcudf without preserved nested field names.
"""
new_result = {}
for (new_field, field_dtype), result_value in zip(
self.fields.items(), result.values(), strict=True
):
if isinstance(field_dtype, StructDtype) and isinstance(
result_value, dict
):
new_result[new_field] = (
field_dtype._recursively_replace_fields(result_value)
)
else:
new_result[new_field] = result_value
return new_result
@classmethod
def from_struct_dtype(cls, obj) -> Self:
if isinstance(obj, StructDtype):
return obj
elif isinstance(obj, pa.StructType):
return cls.from_arrow(obj)
elif isinstance(obj, pd.ArrowDtype):
return cls.from_arrow(obj.pyarrow_dtype)
else:
raise TypeError(f"Cannot convert {type(obj)} to StructDtype")
decimal_dtype_template = textwrap.dedent(
"""
Type to represent a ``decimal{size}`` data.
Parameters
----------
precision : int
The total number of digits in each value of this dtype
scale : int, optional
The scale of the dtype. See Notes below.
Attributes
----------
precision
scale
itemsize
Methods
-------
to_arrow
from_arrow
Notes
-----
When the scale is positive:
- numbers with fractional parts (e.g., 0.0042) can be represented
- the scale is the total number of digits to the right of the
decimal point
When the scale is negative:
- only multiples of powers of 10 (including 10**0) can be
represented (e.g., 1729, 4200, 1000000)
- the scale represents the number of trailing zeros in the value.
For example, 42 is representable with precision=2 and scale=0.
13.0051 is representable with precision=6 and scale=4,
and *not* representable with precision<6 or scale<4.
Examples
--------
>>> import cudf
>>> decimal{size}_dtype = cudf.Decimal{size}Dtype(precision=9, scale=2)
>>> decimal{size}_dtype
Decimal{size}Dtype(precision=9, scale=2)
"""
)
|
StructDtype
|
python
|
scipy__scipy
|
scipy/interpolate/tests/test_interpolate.py
|
{
"start": 47061,
"end": 52478
}
|
class ____:
# test basic functionality for PPoly and BPoly
def test_sort_check(self, xp):
c = xp.asarray([[1, 4], [2, 5], [3, 6]])
x = xp.asarray([0, 1, 0.5])
assert_raises(ValueError, PPoly, c, x)
assert_raises(ValueError, BPoly, c, x)
def test_ctor_c(self):
# wrong shape: `c` must be at least 2D
with assert_raises(ValueError):
PPoly([1, 2], [0, 1])
def test_extend(self, xp):
# Test adding new points to the piecewise polynomial
np.random.seed(1234)
order = 3
x = np.unique(np.r_[0, 10 * np.random.rand(30), 10])
c = 2*np.random.rand(order+1, len(x)-1, 2, 3) - 1
c, x = xp.asarray(c), xp.asarray(x)
for cls in (PPoly, BPoly):
pp = cls(c[:, :9, ...], x[:10])
pp.extend(c[:, 9:, ...], x[10:])
pp2 = cls(c[:, 10:, ...], x[10:])
pp2.extend(c[:, :10, ...], x[:10])
pp3 = cls(c, x)
xp_assert_equal(pp.c, pp3.c)
xp_assert_equal(pp.x, pp3.x)
xp_assert_equal(pp2.c, pp3.c)
xp_assert_equal(pp2.x, pp3.x)
def test_extend_diff_orders(self, xp):
# Test extending polynomial with different order one
np.random.seed(1234)
x = xp.linspace(0, 1, 6)
c = xp.asarray(np.random.rand(2, 5))
x2 = xp.linspace(1, 2, 6)
c2 = xp.asarray(np.random.rand(4, 5))
for cls in (PPoly, BPoly):
pp1 = cls(c, x)
pp2 = cls(c2, x2)
pp_comb = cls(c, x)
pp_comb.extend(c2, x2[1:])
# NB. doesn't match to pp1 at the endpoint, because pp1 is not
# continuous with pp2 as we took random coefs.
xi1 = xp.linspace(0, 1, 300, endpoint=False)
xi2 = xp.linspace(1, 2, 300)
xp_assert_close(pp1(xi1), pp_comb(xi1))
xp_assert_close(pp2(xi2), pp_comb(xi2))
def test_extend_descending(self, xp):
np.random.seed(0)
order = 3
x = np.sort(np.random.uniform(0, 10, 20))
c = np.random.rand(order + 1, x.shape[0] - 1, 2, 3)
c, x = xp.asarray(c), xp.asarray(x)
for cls in (PPoly, BPoly):
p = cls(c, x)
p1 = cls(c[:, :9, ...], x[:10])
p1.extend(c[:, 9:, ...], x[10:])
p2 = cls(c[:, 10:, ...], x[10:])
p2.extend(c[:, :10, ...], x[:10])
xp_assert_equal(p1.c, p.c)
xp_assert_equal(p1.x, p.x)
xp_assert_equal(p2.c, p.c)
xp_assert_equal(p2.x, p.x)
def test_shape(self):
np.random.seed(1234)
c = np.random.rand(8, 12, 5, 6, 7)
x = np.sort(np.random.rand(13))
xp = np.random.rand(3, 4)
for cls in (PPoly, BPoly):
p = cls(c, x)
assert p(xp).shape == (3, 4, 5, 6, 7)
# 'scalars'
for cls in (PPoly, BPoly):
p = cls(c[..., 0, 0, 0], x)
assert np.shape(p(0.5)) == ()
assert np.shape(p(np.array(0.5))) == ()
assert_raises(ValueError, p, np.array([[0.1, 0.2], [0.4]], dtype=object))
def test_concurrency(self, xp):
# Check that no segfaults appear with concurrent access to BPoly, PPoly
c = np.random.rand(8, 12, 5, 6, 7)
x = np.sort(np.random.rand(13))
xpp = np.random.rand(3, 4)
c, x, xpp = map(xp.asarray, (c, x, xpp))
for cls in (PPoly, BPoly):
interp = cls(c, x)
def worker_fn(_, interp, xpp):
interp(xpp)
_run_concurrent_barrier(10, worker_fn, interp, xpp)
def test_complex_coef(self):
np.random.seed(12345)
x = np.sort(np.random.random(13))
c = np.random.random((8, 12)) * (1. + 0.3j)
c_re, c_im = c.real, c.imag
xp = np.random.random(5)
for cls in (PPoly, BPoly):
p, p_re, p_im = cls(c, x), cls(c_re, x), cls(c_im, x)
for nu in [0, 1, 2]:
xp_assert_close(p(xp, nu).real, p_re(xp, nu))
xp_assert_close(p(xp, nu).imag, p_im(xp, nu))
def test_axis(self, xp):
np.random.seed(12345)
c = np.random.rand(3, 4, 5, 6, 7, 8)
c_s = c.shape
xpp = np.random.random((1, 2))
c, xpp = xp.asarray(c), xp.asarray(xpp)
for axis in (0, 1, 2, 3):
m = c.shape[axis+1]
x = xp.asarray(np.sort(np.random.rand(m+1)))
for cls in (PPoly, BPoly):
p = cls(c, x, axis=axis)
assert p.c.shape == c_s[axis:axis+2] + c_s[:axis] + c_s[axis+2:]
res = p(xpp)
targ_shape = c_s[:axis] + xpp.shape + c_s[2+axis:]
assert res.shape == targ_shape
# deriv/antideriv does not drop the axis
for p1 in [cls(c, x, axis=axis).derivative(),
cls(c, x, axis=axis).derivative(2),
cls(c, x, axis=axis).antiderivative(),
cls(c, x, axis=axis).antiderivative(2)]:
assert p1.axis == p.axis
# c array needs two axes for the coefficients and intervals, so
# 0 <= axis < c.ndim-1; raise otherwise
for axis in (-1, 4, 5, 6):
for cls in (BPoly, PPoly):
assert_raises(ValueError, cls, **dict(c=c, x=x, axis=axis))
|
TestPPolyCommon
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/metaclass3.py
|
{
"start": 149,
"end": 190
}
|
class ____(metaclass=Meta1):
pass
|
Base1
|
python
|
davidhalter__jedi
|
test/completion/decorators.py
|
{
"start": 1152,
"end": 1504
}
|
class ____(object):
def __init__(self, func):
self.func = func
def __call__(self, *args, **kwargs):
return self.func(1, *args, **kwargs)
@Decorator
def nothing(a,b,c):
return a,b,c
#? int()
nothing("")[0]
#? str()
nothing("")[1]
@same_func
@Decorator
def nothing(a,b,c):
return a,b,c
#? int()
nothing("")[0]
|
Decorator
|
python
|
kamyu104__LeetCode-Solutions
|
Python/check-if-a-string-can-break-another-string.py
|
{
"start": 1001,
"end": 1323
}
|
class ____(object):
def checkIfCanBreak(self, s1, s2):
"""
:type s1: str
:type s2: str
:rtype: bool
"""
s1, s2 = sorted(s1), sorted(s2)
return all(a >= b for a, b in itertools.izip(s1, s2)) or \
all(a <= b for a, b in itertools.izip(s1, s2))
|
Solution3
|
python
|
encode__django-rest-framework
|
tests/test_lazy_hyperlinks.py
|
{
"start": 290,
"end": 469
}
|
class ____(models.Model):
text = models.CharField(max_length=100)
def __str__(self):
global str_called
str_called = True
return 'An example'
|
Example
|
python
|
matplotlib__matplotlib
|
lib/matplotlib/patches.py
|
{
"start": 156093,
"end": 166013
}
|
class ____(FancyArrowPatch):
"""A patch that connects two points (possibly in different Axes)."""
def __str__(self):
return "ConnectionPatch((%g, %g), (%g, %g))" % \
(self.xy1[0], self.xy1[1], self.xy2[0], self.xy2[1])
@_docstring.interpd
def __init__(self, xyA, xyB, coordsA, coordsB=None, *,
axesA=None, axesB=None,
arrowstyle="-",
connectionstyle="arc3",
patchA=None,
patchB=None,
shrinkA=0.,
shrinkB=0.,
mutation_scale=10.,
mutation_aspect=None,
clip_on=False,
**kwargs):
"""
Connect point *xyA* in *coordsA* with point *xyB* in *coordsB*.
Valid keys are
=============== ======================================================
Key Description
=============== ======================================================
arrowstyle the arrow style
connectionstyle the connection style
relpos default is (0.5, 0.5)
patchA default is bounding box of the text
patchB default is None
shrinkA default is 2 points
shrinkB default is 2 points
mutation_scale default is text size (in points)
mutation_aspect default is 1.
? any key for `matplotlib.patches.PathPatch`
=============== ======================================================
*coordsA* and *coordsB* are strings that indicate the
coordinates of *xyA* and *xyB*.
==================== ==================================================
Property Description
==================== ==================================================
'figure points' points from the lower left corner of the figure
'figure pixels' pixels from the lower left corner of the figure
'figure fraction' 0, 0 is lower left of figure and 1, 1 is upper
right
'subfigure points' points from the lower left corner of the subfigure
'subfigure pixels' pixels from the lower left corner of the subfigure
'subfigure fraction' fraction of the subfigure, 0, 0 is lower left.
'axes points' points from lower left corner of the Axes
'axes pixels' pixels from lower left corner of the Axes
'axes fraction' 0, 0 is lower left of Axes and 1, 1 is upper right
'data' use the coordinate system of the object being
annotated (default)
'offset points' offset (in points) from the *xy* value
'polar' you can specify *theta*, *r* for the annotation,
even in cartesian plots. Note that if you are
using a polar Axes, you do not need to specify
polar for the coordinate system since that is the
native "data" coordinate system.
==================== ==================================================
Alternatively they can be set to any valid
`~matplotlib.transforms.Transform`.
Note that 'subfigure pixels' and 'figure pixels' are the same
for the parent figure, so users who want code that is usable in
a subfigure can use 'subfigure pixels'.
.. note::
Using `ConnectionPatch` across two `~.axes.Axes` instances
is not directly compatible with :ref:`constrained layout
<constrainedlayout_guide>`. Add the artist
directly to the `.Figure` instead of adding it to a specific Axes,
or exclude it from the layout using ``con.set_in_layout(False)``.
.. code-block:: default
fig, ax = plt.subplots(1, 2, constrained_layout=True)
con = ConnectionPatch(..., axesA=ax[0], axesB=ax[1])
fig.add_artist(con)
"""
if coordsB is None:
coordsB = coordsA
# we'll draw ourself after the artist we annotate by default
self.xy1 = xyA
self.xy2 = xyB
self.coords1 = coordsA
self.coords2 = coordsB
self.axesA = axesA
self.axesB = axesB
super().__init__(posA=(0, 0), posB=(1, 1),
arrowstyle=arrowstyle,
connectionstyle=connectionstyle,
patchA=patchA, patchB=patchB,
shrinkA=shrinkA, shrinkB=shrinkB,
mutation_scale=mutation_scale,
mutation_aspect=mutation_aspect,
clip_on=clip_on,
**kwargs)
# if True, draw annotation only if self.xy is inside the Axes
self._annotation_clip = None
def _get_xy(self, xy, s, axes=None):
"""Calculate the pixel position of given point."""
s0 = s # For the error message, if needed.
if axes is None:
axes = self.axes
# preserve mixed type input (such as str, int)
x = np.array(xy[0])
y = np.array(xy[1])
fig = self.get_figure(root=False)
if s in ["figure points", "axes points"]:
x = x * fig.dpi / 72
y = y * fig.dpi / 72
s = s.replace("points", "pixels")
elif s == "figure fraction":
s = fig.transFigure
elif s == "subfigure fraction":
s = fig.transSubfigure
elif s == "axes fraction":
s = axes.transAxes
if s == 'data':
trans = axes.transData
x = cbook._to_unmasked_float_array(axes.xaxis.convert_units(x))
y = cbook._to_unmasked_float_array(axes.yaxis.convert_units(y))
return trans.transform((x, y))
elif s == 'offset points':
if self.xycoords == 'offset points': # prevent recursion
return self._get_xy(self.xy, 'data')
return (
self._get_xy(self.xy, self.xycoords) # converted data point
+ xy * self.get_figure(root=True).dpi / 72) # converted offset
elif s == 'polar':
theta, r = x, y
x = r * np.cos(theta)
y = r * np.sin(theta)
trans = axes.transData
return trans.transform((x, y))
elif s == 'figure pixels':
# pixels from the lower left corner of the figure
bb = self.get_figure(root=False).figbbox
x = bb.x0 + x if x >= 0 else bb.x1 + x
y = bb.y0 + y if y >= 0 else bb.y1 + y
return x, y
elif s == 'subfigure pixels':
# pixels from the lower left corner of the figure
bb = self.get_figure(root=False).bbox
x = bb.x0 + x if x >= 0 else bb.x1 + x
y = bb.y0 + y if y >= 0 else bb.y1 + y
return x, y
elif s == 'axes pixels':
# pixels from the lower left corner of the Axes
bb = axes.bbox
x = bb.x0 + x if x >= 0 else bb.x1 + x
y = bb.y0 + y if y >= 0 else bb.y1 + y
return x, y
elif isinstance(s, transforms.Transform):
return s.transform(xy)
else:
raise ValueError(f"{s0} is not a valid coordinate transformation")
def set_annotation_clip(self, b):
"""
Set the annotation's clipping behavior.
Parameters
----------
b : bool or None
- True: The annotation will be clipped when ``self.xy`` is
outside the Axes.
- False: The annotation will always be drawn.
- None: The annotation will be clipped when ``self.xy`` is
outside the Axes and ``self.xycoords == "data"``.
"""
self._annotation_clip = b
self.stale = True
def get_annotation_clip(self):
"""
Return the clipping behavior.
See `.set_annotation_clip` for the meaning of the return value.
"""
return self._annotation_clip
def _get_path_in_displaycoord(self):
"""Return the mutated path of the arrow in display coordinates."""
dpi_cor = self._dpi_cor
posA = self._get_xy(self.xy1, self.coords1, self.axesA)
posB = self._get_xy(self.xy2, self.coords2, self.axesB)
path = self.get_connectionstyle()(
posA, posB,
patchA=self.patchA, patchB=self.patchB,
shrinkA=self.shrinkA * dpi_cor, shrinkB=self.shrinkB * dpi_cor,
)
path, fillable = self.get_arrowstyle()(
path,
self.get_mutation_scale() * dpi_cor,
self.get_linewidth() * dpi_cor,
self.get_mutation_aspect()
)
return path, fillable
def _check_xy(self, renderer):
"""Check whether the annotation needs to be drawn."""
b = self.get_annotation_clip()
if b or (b is None and self.coords1 == "data"):
xy_pixel = self._get_xy(self.xy1, self.coords1, self.axesA)
if self.axesA is None:
axes = self.axes
else:
axes = self.axesA
if not axes.contains_point(xy_pixel):
return False
if b or (b is None and self.coords2 == "data"):
xy_pixel = self._get_xy(self.xy2, self.coords2, self.axesB)
if self.axesB is None:
axes = self.axes
else:
axes = self.axesB
if not axes.contains_point(xy_pixel):
return False
return True
def draw(self, renderer):
if not self.get_visible() or not self._check_xy(renderer):
return
super().draw(renderer)
|
ConnectionPatch
|
python
|
scrapy__scrapy
|
tests/test_downloader_handler_twisted_http2.py
|
{
"start": 7158,
"end": 7252
}
|
class ____(H2DownloadHandlerMixin, TestHttpsCustomCiphersBase):
pass
|
TestHttps2CustomCiphers
|
python
|
Pylons__pyramid
|
docs/quick_tutorial/routing/tutorial/views.py
|
{
"start": 105,
"end": 454
}
|
class ____:
def __init__(self, request):
self.request = request
@view_config(route_name='home')
def home(self):
first = self.request.matchdict['first']
last = self.request.matchdict['last']
return {
'name': 'Home View',
'first': first,
'last': last
}
|
TutorialViews
|
python
|
numpy__numpy
|
numpy/_core/tests/test_unicode.py
|
{
"start": 6084,
"end": 8098
}
|
class ____:
"""Check the assignment of unicode arrays with values"""
def content_check(self, ua, ua_scalar, nbytes):
# Check the length of the unicode base type
assert_(int(ua.dtype.str[2:]) == self.ulen)
# Check the length of the data buffer
assert_(buffer_length(ua) == nbytes)
# Small check that data in array element is ok
assert_(ua_scalar == self.ucs_value * self.ulen)
# Encode to UTF-8 and double check
assert_(ua_scalar.encode('utf-8') ==
(self.ucs_value * self.ulen).encode('utf-8'))
# Check buffer lengths for scalars
if self.ucs_value == ucs4_value:
# In UCS2, the \U0010FFFF will be represented using a
# surrogate *pair*
assert_(buffer_length(ua_scalar) == 2 * 2 * self.ulen)
else:
# In UCS2, the \uFFFF will be represented using a
# regular 2-byte word
assert_(buffer_length(ua_scalar) == 2 * self.ulen)
def test_values0D(self):
# Check assignment of 0-dimensional objects with values
ua = np.zeros((), dtype=f'U{self.ulen}')
ua[()] = self.ucs_value * self.ulen
self.content_check(ua, ua[()], 4 * self.ulen)
def test_valuesSD(self):
# Check assignment of single-dimensional objects with values
ua = np.zeros((2,), dtype=f'U{self.ulen}')
ua[0] = self.ucs_value * self.ulen
self.content_check(ua, ua[0], 4 * self.ulen * 2)
ua[1] = self.ucs_value * self.ulen
self.content_check(ua, ua[1], 4 * self.ulen * 2)
def test_valuesMD(self):
# Check assignment of multi-dimensional objects with values
ua = np.zeros((2, 3, 4), dtype=f'U{self.ulen}')
ua[0, 0, 0] = self.ucs_value * self.ulen
self.content_check(ua, ua[0, 0, 0], 4 * self.ulen * 2 * 3 * 4)
ua[-1, -1, -1] = self.ucs_value * self.ulen
self.content_check(ua, ua[-1, -1, -1], 4 * self.ulen * 2 * 3 * 4)
|
AssignValues
|
python
|
huggingface__transformers
|
src/transformers/models/whisper/tokenization_whisper.py
|
{
"start": 3883,
"end": 59613
}
|
class ____(TokenizersBackend):
"""
Construct a "fast" Whisper tokenizer (backed by HuggingFace's *tokenizers* library).
This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
Args:
vocab_file (`str`, *optional*):
Path to the vocabulary file.
merges_file (`str`, *optional*):
Path to the merges file.
normalizer_file (`str`, *optional*):
Path to the normalizer_file file.
tokenizer_file (`str`, *optional*):
Path to [tokenizers](https://github.com/huggingface/tokenizers) file (generally has a .json extension) that
contains everything needed to load the tokenizer.
unk_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
bos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
The beginning of sequence token. The `decoder_start_token_id` is used to set the first token as
`"<|startoftranscript|>"` when generating.
eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
The end of sequence token.
add_prefix_space (`bool`, *optional*, defaults to `False`):
Whether or not to add an initial space to the input. This allows to treat the leading word just as any
other word. (Whisper tokenizer detect beginning of words by the preceding space).
language (`str`, *optional*):
The language of the transcription text. The corresponding language id token is appended to the start of the
sequence for multilingual speech recognition and speech translation tasks, e.g. for Spanish the token
`"<|es|>"` is appended to the start of sequence. This should be used for multilingual fine-tuning only.
task (`str`, *optional*):
Task identifier to append at the start of sequence (if any). This should be used for mulitlingual
fine-tuning, with `"transcribe"` for speech recognition and `"translate"` for speech translation.
predict_timestamps (`bool`, *optional*, defaults to `False`):
Whether to omit the `<|notimestamps|>` token at the start of the sequence.
"""
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = ["input_ids", "attention_mask"]
def __init__(
self,
vocab=None,
merges=None,
normalizer_file=None,
unk_token="<|endoftext|>",
bos_token="<|endoftext|>",
eos_token="<|endoftext|>",
add_prefix_space=False,
language=None,
task=None,
predict_timestamps=False,
**kwargs,
):
bos_token = (
AddedToken(bos_token, lstrip=False, rstrip=False, normalized=False, special=True)
if isinstance(bos_token, str)
else bos_token
)
eos_token = (
AddedToken(eos_token, lstrip=False, rstrip=False, normalized=False, special=True)
if isinstance(eos_token, str)
else eos_token
)
unk_token = (
AddedToken(unk_token, lstrip=False, rstrip=False, normalized=False, special=True)
if isinstance(unk_token, str)
else unk_token
)
self._vocab = vocab if vocab is not None else {}
self._merges = merges if merges is not None else []
self._tokenizer = Tokenizer(
BPE(
vocab=self._vocab,
merges=self._merges,
dropout=None,
continuing_subword_prefix="",
end_of_word_suffix="",
fuse_unk=False,
)
)
self._tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=add_prefix_space)
self._tokenizer.decoder = decoders.ByteLevel()
super().__init__(
tokenizer_object=self._tokenizer,
unk_token=unk_token,
bos_token=bos_token,
eos_token=eos_token,
add_prefix_space=add_prefix_space,
normalizer_file=normalizer_file,
language=language,
task=task,
predict_timestamps=predict_timestamps,
**kwargs,
)
if normalizer_file is not None:
with open(normalizer_file, encoding="utf-8") as vocab_handle:
self.english_spelling_normalizer = json.load(vocab_handle)
else:
self.english_spelling_normalizer = None
self.timestamp_pat = re.compile(r"<\|(\d+\.\d+)\|>")
self.language = language
self.task = task
self.predict_timestamps = predict_timestamps
self._post_init()
def _post_init(self):
"""Post-initialization hook to set up prefix tokens after the tokenizer is fully loaded."""
super()._post_init()
# Set up prefix tokens if language or task is specified (may be set from config in from_pretrained)
if hasattr(self, "language") and hasattr(self, "task") and hasattr(self, "predict_timestamps"):
if self.language is not None or self.task is not None:
self.set_prefix_tokens(
language=self.language, task=self.task, predict_timestamps=self.predict_timestamps
)
# Copied from transformers.models.whisper.tokenization_whisper.WhisperTokenizer._decode_with_timestamps
def _decode_with_timestamps(
self, token_ids, skip_special_tokens=False, time_precision=0.02, segment_size=1500
) -> str:
"""
Timestamp tokens are above the special tokens' id range and are ignored by `decode()`. This method decodes
given tokens with timestamps tokens annotated, e.g. "<|1.08|>".
"""
timestamp_begin = self.all_special_ids[-1] + 1
outputs = [[]]
cur_max_timestamp = 0.0
prev_segments_len = 0.0
penultimate_timestamp = 0.0
for i, token in enumerate(token_ids):
if token >= timestamp_begin:
timestamp = float((token - timestamp_begin) * time_precision)
if timestamp < cur_max_timestamp:
# next segment has started
last_was_single_ending = i >= 2 and not (
token_ids[i - 1] >= timestamp_begin and token_ids[i - 2] >= timestamp_begin
)
if last_was_single_ending:
prev_segments_len += time_precision * segment_size
else:
cur_max_timestamp = penultimate_timestamp
prev_segments_len += penultimate_timestamp
outputs = outputs[:-2]
penultimate_timestamp = cur_max_timestamp
cur_max_timestamp = timestamp
outputs.append(f"<|{(timestamp + prev_segments_len):.2f}|>")
outputs.append([])
else:
outputs[-1].append(token)
# Decode token sequences outside list comprehension to avoid super() resolution issues
decoded_outputs = []
for s in outputs:
if isinstance(s, str):
decoded_outputs.append(s)
elif s:
decoded_outputs.append(super().decode(s, skip_special_tokens=skip_special_tokens))
else:
decoded_outputs.append("")
return "".join(decoded_outputs)
# Copied from transformers.models.whisper.tokenization_whisper.WhisperTokenizer._compute_offsets
def _compute_offsets(self, token_ids, time_precision=0.02, segment_size=1500):
"""
Compute offsets for a given tokenized input
Args:
token_ids (`Union[int, list[int], np.ndarray, torch.Tensor]`):
List of tokenized input ids. Can be obtained using the `__call__` method.
time_precision (`float`, *optional*, defaults to 0.02):
The time ratio to convert from token to time.
segment_size (`int`, *optional*, defaults to 1500):
The number of features in the input mel spectrogram.
"""
offsets = []
# ensure torch tensor of token ids is placed on cpu
if "torch" in str(type(token_ids)) and (hasattr(token_ids, "cpu") and callable(token_ids.cpu)):
token_ids = token_ids.cpu()
token_ids = np.array(token_ids)
if token_ids.shape[0] > 1 and len(token_ids.shape) > 1:
raise ValueError("Can only process a single input at a time")
timestamp_begin = self.all_special_ids[-1] + 1
timestamp_tokens = token_ids >= timestamp_begin
consecutive = np.where(timestamp_tokens[:-1] & timestamp_tokens[1:])[0] + 1
if consecutive.shape[0] == 0 and timestamp_tokens.sum() <= 1:
# either there are no timestamps or there are no consecutive ones
return []
elif np.where(timestamp_tokens)[0][-1] + 1 not in consecutive:
# we add the final timestamp if it is not already in the list
consecutive = np.append(consecutive, np.where(timestamp_tokens)[0][-1] + 1)
last_slice = np.where(timestamp_tokens)[0][0]
cur_max_timestamp = 0
prev_segments_len = 0
for current_slice in consecutive:
sliced_tokens = token_ids[last_slice:current_slice]
if len(sliced_tokens) > 1:
start_timestamp_position = sliced_tokens[0].item() - timestamp_begin
end_timestamp_position = sliced_tokens[-1].item() - timestamp_begin
if start_timestamp_position < cur_max_timestamp:
# next segment has started
is_single_ending = last_slice >= 2 and not (
token_ids[last_slice - 2] >= timestamp_begin and token_ids[last_slice - 1] >= timestamp_begin
)
if is_single_ending:
prev_segments_len += segment_size
else:
prev_segments_len += cur_max_timestamp
cur_max_timestamp = end_timestamp_position
# strip timestamp tokens from the text output
sliced_tokens = self._preprocess_token_ids(sliced_tokens)
text = self._decode(sliced_tokens)
text = self._filter_timestamp_ids(text)
offsets.append(
{
"text": text,
"timestamp": (
start_timestamp_position * time_precision + prev_segments_len * time_precision,
end_timestamp_position * time_precision + prev_segments_len * time_precision,
),
}
)
last_slice = current_slice
return offsets
@lru_cache
# Copied from transformers.models.whisper.tokenization_whisper.WhisperTokenizer.timestamp_ids
def timestamp_ids(self, time_precision=0.02):
"""
Compute the timestamp token ids for a given precision and save to least-recently used (LRU) cache.
Args:
time_precision (`float`, *optional*, defaults to 0.02):
The time ratio to convert from token to time.
"""
return self.convert_tokens_to_ids([("<|%.2f|>" % (i * time_precision)) for i in range(1500 + 1)])
# Copied from transformers.models.whisper.tokenization_whisper.WhisperTokenizer._preprocess_token_ids
def _preprocess_token_ids(self, token_ids, skip_special_tokens: bool = False):
"""
Pre-process the token ids for decoding by removing the prompt tokens ids and timestamp token ids.
Args:
token_ids (`Union[int, list[int], np.ndarray, torch.Tensor]`):
List of tokenized input ids. Typically, obtained using the `__call__` method of the tokenizer.
skip_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not to remove special tokens from the token ids. If `True`, the prompt token ids will be
removed.
"""
if skip_special_tokens:
prompt_token_id = self.convert_tokens_to_ids("<|startofprev|>")
decoder_start_token_id = self.convert_tokens_to_ids("<|startoftranscript|>")
token_ids = self._strip_prompt(token_ids, prompt_token_id, decoder_start_token_id)
return token_ids
# Copied from transformers.models.whisper.tokenization_whisper.WhisperTokenizer._filter_timestamp_ids
def _filter_timestamp_ids(self, text):
return re.sub(self.timestamp_pat, "", text)
# Copied from transformers.models.whisper.tokenization_whisper.WhisperTokenizer.decode
def decode(
self,
token_ids,
skip_special_tokens: bool = False,
clean_up_tokenization_spaces: Optional[bool] = None,
output_offsets: bool = False,
time_precision: float = 0.02,
decode_with_timestamps: bool = False,
normalize: bool = False,
basic_normalize: bool = False,
remove_diacritics: bool = False,
**kwargs,
) -> str:
"""
Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special
tokens and clean up tokenization spaces.
Similar to doing `self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))`.
Args:
token_ids (`Union[int, list[int], np.ndarray, torch.Tensor]`):
List of tokenized input ids. Can be obtained using the `__call__` method.
skip_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not to remove special tokens in the decoding. Will remove the previous tokens (pre-prompt)
if present.
clean_up_tokenization_spaces (`bool`, *optional*):
Whether or not to clean up the tokenization spaces. If `None`, will default to
`self.clean_up_tokenization_spaces` (available in the `tokenizer_config`).
output_offsets (`bool`, *optional*, defaults to `False`):
Whether or not to output the offsets of the tokens. This should only be set if the model predicted
timestamps. If there are previous tokens (pre-prompt) to decode, they will only appear in the decoded
text if they contain timestamp tokens.
time_precision (`float`, *optional*, defaults to 0.02):
The time ratio to convert from token to time.
decode_with_timestamps (`bool`, *optional*, defaults to `False`):
Whether or not to decode with timestamps included in the raw text.
normalize (`bool`, *optional*, defaults to `False`):
Whether or not to apply the English text normalizer to the decoded text. Only applicable when the
target text is in English. Otherwise, the basic text normalizer should be applied.
basic_normalize (`bool`, *optional*, defaults to `False`):
Whether or not to apply the Basic text normalizer to the decoded text. Applicable to multilingual
target text.
remove_diacritics (`bool`, *optional*, defaults to `False`):
Whether or not to remove diacritics when applying the Basic text normalizer. Removing diacritics may
destroy information in the decoded text, hence it should be used with caution.
kwargs (additional keyword arguments, *optional*):
Will be passed to the underlying model specific decode method.
Returns:
`str`: The decoded sentence.
"""
filtered_ids = self._preprocess_token_ids(
token_ids,
skip_special_tokens=skip_special_tokens,
)
text = super().decode(
filtered_ids,
skip_special_tokens=skip_special_tokens,
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
normalize=normalize,
basic_normalize=basic_normalize,
remove_diacritics=remove_diacritics,
**kwargs,
)
if decode_with_timestamps:
# legacy method to decode timestamps when not included in the tokenizer vocabulary
text = self._decode_with_timestamps(
filtered_ids, time_precision=time_precision, skip_special_tokens=skip_special_tokens
)
else:
# Handle both single string and batch (list of strings) outputs
if isinstance(text, list):
text = [self._filter_timestamp_ids(t) for t in text]
else:
text = self._filter_timestamp_ids(text)
# retrieve offsets
if output_offsets:
offsets = self._compute_offsets(token_ids, time_precision=time_precision)
return {"text": text, "offsets": offsets}
return text
def _decode(
self, *args, normalize: bool = False, basic_normalize: bool = False, remove_diacritics: bool = False, **kwargs
) -> str:
text = super()._decode(*args, **kwargs)
if normalize:
clean_text = self._normalize(text)
return clean_text
elif basic_normalize:
clean_text = self._basic_normalize(text, remove_diacritics=remove_diacritics)
return clean_text
else:
return text
# Copied from transformers.models.whisper.tokenization_whisper.WhisperTokenizer._normalize
def _normalize(self, text):
warnings.warn(
"The private method `_normalize` is deprecated and will be removed in v5 of Transformers."
"You can normalize an input string using the Whisper English normalizer using the `normalize` method."
)
return self.normalize(text)
# Copied from transformers.models.whisper.tokenization_whisper.WhisperTokenizer._basic_normalize
def _basic_normalize(self, text, remove_diacritics=False):
warnings.warn(
"The private method `_basic_normalize` is deprecated and will be removed in v5 of Transformers."
"You can normalize an input string using the Whisper basic normalizer using the `basic_normalize` method."
)
return self.basic_normalize(text, remove_diacritics=remove_diacritics)
# Copied from transformers.models.whisper.tokenization_whisper.WhisperTokenizer.normalize
def normalize(self, text):
"""
Normalize a given string using the `EnglishTextNormalizer` class, which performs commons transformation on
english text.
"""
normalizer = EnglishTextNormalizer(self.english_spelling_normalizer)
return normalizer(text)
@staticmethod
# Copied from transformers.models.whisper.tokenization_whisper.WhisperTokenizer.basic_normalize
def basic_normalize(text, remove_diacritics=False):
"""
Normalize a given string using the `BasicTextNormalizer` class, which performs commons transformation on
multilingual text.
"""
normalizer = BasicTextNormalizer(remove_diacritics=remove_diacritics)
return normalizer(text)
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple[str]:
if not os.path.isdir(save_directory):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
vocab_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
)
merge_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
)
normalizer_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["normalizer_file"]
)
with open(vocab_file, "w", encoding="utf-8") as f:
f.write(json.dumps(self._vocab, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
with open(merge_file, "w", encoding="utf-8") as writer:
writer.write("#version: 0.2\n")
writer.writelines(" ".join(merge_pair) + "\n" for merge_pair in self._merges)
if self.english_spelling_normalizer is not None:
with open(normalizer_file, "w", encoding="utf-8") as f:
f.write(
json.dumps(self.english_spelling_normalizer, indent=2, sort_keys=True, ensure_ascii=False) + "\n"
)
return (vocab_file, merge_file, normalizer_file)
def set_prefix_tokens(
self, language: Optional[str] = None, task: Optional[str] = None, predict_timestamps: Optional[bool] = None
):
"""
Override the prefix tokens appended to the start of the label sequence. This method can be used standalone to
update the prefix tokens as required when fine-tuning. Example:
```python
>>> # instantiate the tokenizer and set the prefix token to Spanish
>>> tokenizer = WhisperTokenizerFast.from_pretrained("openai/whisper-tiny", language="spanish")
>>> # now switch the prefix token from Spanish to French
>>> tokenizer.set_prefix_tokens(language="french")
```
Args:
language (`str`, *optional*, defaults to `None`):
The language of the transcription text.
task (`str`, *optional*, defaults to `None`):
Task identifier to append at the start of sequence (if any).
predict_timestamps (`bool`, *optional*, defaults to `None`):
Whether to omit the `<|notimestamps|>` token at the start of the sequence.
"""
self.language = language if language is not None else self.language
self.task = task if task is not None else self.task
self.predict_timestamps = predict_timestamps if predict_timestamps is not None else self.predict_timestamps
prefix_token_ids = self.prefix_tokens
prefixes = self.convert_ids_to_tokens(prefix_token_ids)
eos = self.eos_token
eos_token_id = self.eos_token_id
prefix_template = " ".join([f"{token}:0" for token in prefixes])
self.backend_tokenizer.post_processor = processors.TemplateProcessing(
single=f"{prefix_template} $A:0 {eos}:0",
pair=f"{prefix_template} $A:0 $B:1 {eos}:1",
special_tokens=[
(eos, eos_token_id),
*zip(prefixes, prefix_token_ids),
],
)
@property
# Copied from transformers.models.whisper.tokenization_whisper.WhisperTokenizer.prefix_tokens
def prefix_tokens(self) -> list[int]:
bos_token_id = self.convert_tokens_to_ids("<|startoftranscript|>")
translate_token_id = self.convert_tokens_to_ids("<|translate|>")
transcribe_token_id = self.convert_tokens_to_ids("<|transcribe|>")
notimestamps_token_id = self.convert_tokens_to_ids("<|notimestamps|>")
langs = tuple(LANGUAGES.keys())
if self.language is not None:
self.language = self.language.lower()
if self.language in TO_LANGUAGE_CODE:
language_id = TO_LANGUAGE_CODE[self.language]
elif self.language in TO_LANGUAGE_CODE.values():
language_id = self.language
else:
is_language_code = len(self.language) == 2
raise ValueError(
f"Unsupported language: {self.language}. Language should be one of:"
f" {list(TO_LANGUAGE_CODE.values()) if is_language_code else list(TO_LANGUAGE_CODE.keys())}."
)
if self.task is not None:
if self.task not in TASK_IDS:
raise ValueError(f"Unsupported task: {self.task}. Task should be in: {TASK_IDS}")
bos_sequence = [bos_token_id]
if self.language is not None:
bos_sequence.append(bos_token_id + 1 + langs.index(language_id))
if self.task is not None:
bos_sequence.append(transcribe_token_id if self.task == "transcribe" else translate_token_id)
if not self.predict_timestamps:
bos_sequence.append(notimestamps_token_id)
return bos_sequence
# Copied from transformers.models.whisper.tokenization_whisper.WhisperTokenizer.build_inputs_with_special_tokens
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None) -> list[int]:
"""Build model inputs from a sequence by appending eos_token_id."""
if token_ids_1 is None:
return self.prefix_tokens + token_ids_0 + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_0 + token_ids_1 + [self.eos_token_id]
# Copied from transformers.models.whisper.tokenization_whisper.WhisperTokenizer.get_special_tokens_mask
def get_special_tokens_mask(
self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None, already_has_special_tokens: bool = False
) -> list[int]:
"""
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
Args:
token_ids_0 (`list[int]`):
List of IDs.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`list[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
)
prefix_ones = [1] * len(self.prefix_tokens)
suffix_ones = [1]
if token_ids_1 is None:
return prefix_ones + ([0] * len(token_ids_0)) + suffix_ones
return prefix_ones + ([0] * len(token_ids_0)) + ([0] * len(token_ids_1)) + suffix_ones
# Copied from transformers.models.whisper.tokenization_whisper.WhisperTokenizer.get_decoder_prompt_ids
def get_decoder_prompt_ids(self, task=None, language=None, no_timestamps=True):
self.set_prefix_tokens(task=task, language=language, predict_timestamps=not no_timestamps)
# prefix tokens are of the form: <|startoftranscript|> <|lang_id|> <|task|> <|notimestamps|>
# we don't want to force the bos token at position 1, as this is the starting token
# when we generate, so we slice the prefix tokens to: <|lang_id|> <|task|> <|notimestamps|>
# to get the forced tokens
forced_tokens = self.prefix_tokens[1:]
forced_decoder_ids = [(rank + 1, token) for rank, token in enumerate(forced_tokens)]
return forced_decoder_ids
def _decode_asr(self, model_outputs, *, return_timestamps, return_language, time_precision):
return _decode_asr(
self,
model_outputs,
return_timestamps=return_timestamps,
return_language=return_language,
time_precision=time_precision,
)
# Copied from transformers.models.whisper.tokenization_whisper.WhisperTokenizer.get_prompt_ids
def get_prompt_ids(self, text: str, return_tensors="np"):
"""Converts prompt text to IDs that can be passed to [`~WhisperForConditionalGeneration.generate`]."""
batch_encoding = self("<|startofprev|>", " " + text.strip(), add_special_tokens=False)
# Check for special tokens
prompt_text_ids = batch_encoding["input_ids"][1:]
special_token_id = next((x for x in prompt_text_ids if x >= self.all_special_ids[0]), None)
if special_token_id is not None:
token = self.convert_ids_to_tokens(special_token_id)
raise ValueError(f"Encountered text in the prompt corresponding to disallowed special token: {token}.")
batch_encoding.convert_to_tensors(tensor_type=return_tensors)
return batch_encoding["input_ids"]
# Copied from transformers.models.whisper.tokenization_whisper.WhisperTokenizer._strip_prompt
def _strip_prompt(self, token_ids: list[int], prompt_token_id: int, decoder_start_token_id: int):
if not isinstance(token_ids, list):
token_ids = self._convert_to_list(token_ids)
# handle case of empty token_ids for decoding with timestamps.
# at this point token_ids is a list, so it is safe to use if not check.
if not token_ids:
return token_ids
has_prompt = token_ids[0] == prompt_token_id
if has_prompt:
if decoder_start_token_id in token_ids:
return token_ids[token_ids.index(decoder_start_token_id) :]
else:
return []
return token_ids
@staticmethod
# Copied from transformers.models.whisper.tokenization_whisper.WhisperTokenizer._convert_to_list
def _convert_to_list(token_ids):
# convert type to ndarray if necessary
if hasattr(token_ids, "numpy"):
token_ids = token_ids.cpu().numpy()
# now the token ids are either a numpy array, or a list of lists
if isinstance(token_ids, np.ndarray):
token_ids = token_ids.tolist()
return token_ids
def _combine_tokens_into_words(
tokenizer,
tokens: list[int],
language: Optional[str] = None,
prepend_punctuations: str = "\"'“¡¿([{-",
append_punctuations: str = "\"'.。,,!!??::”)]}、",
):
"""
Groups tokens by word. Returns a tuple containing a list of strings with the words, and a list of `token_id`
sequences with the tokens making up each word.
"""
if language is None:
language = tokenizer.language
if language is None:
language = "english"
if language in {"chinese", "japanese", "thai", "lao", "myanmar", "cantonese"}:
# These languages don't typically use spaces.
words, word_tokens, token_indices = _split_tokens_on_unicode(tokenizer, tokens)
else:
words, word_tokens, token_indices = _split_tokens_on_spaces(tokenizer, tokens)
_merge_punctuations(words, word_tokens, token_indices, prepend_punctuations, append_punctuations)
return words, word_tokens, token_indices
def _find_longest_common_sequence(sequences, token_timestamp_sequences=None):
# It would be much harder to do O(n) because of fault tolerance.
# We actually have a really good property which is that the total sequence
# MUST be those subsequences in order.
# If token_timestamp_sequences is provided, will split those sequences in
# exactly the same way.
left_sequence = sequences[0]
left_length = len(left_sequence)
total_sequence = []
if token_timestamp_sequences:
left_token_timestamp_sequence = token_timestamp_sequences[0]
total_token_timestamp_sequence = []
for seq_idx, right_sequence in enumerate(sequences[1:]):
# index = 0
max_ = 0.0
max_indices = (left_length, left_length, 0, 0)
# Here we're sliding matches
# [a, b, c, d]
# [c, d, f]
# = [c] == [d]
#
# [a, b, c, d]
# [c, d, f]
# = [c, d] == [c, d]
#
#
# [a, b, c, d]
# [c, d, f]
#
# = [b, c, d] == [c, d, f]
#
# [a, b, c, d]
# [c, d, f]
#
# [a, b, c] == [c, d, f]
#
# [a, b, c, d]
# [d, f]
#
# [a, b] == [d, f]
#
# [a, b, c, d]
# [f]
#
# [a] == [f]
right_length = len(right_sequence)
for i in range(1, left_length + right_length):
# epsilon to favor long perfect matches
eps = i / 10000.0
# Slightly convoluted because we don't want out of bound indices
# This will be necessary for a small conflict resolution optimization
# later
left_start = max(0, left_length - i)
left_stop = min(left_length, left_length + right_length - i)
left = np.array(left_sequence[left_start:left_stop])
right_start = max(0, i - left_length)
right_stop = min(right_length, i)
right = np.array(right_sequence[right_start:right_stop])
# We can only match subsequences of the same size.
if len(left) != len(right):
raise RuntimeError(
"There is a bug within whisper `decode_asr` function, please report it. Dropping to prevent bad inference."
)
if token_timestamp_sequences:
# Get length of longest subsequence of tokens that match
# and have timestamps that are in order
matches = sum(
1
for idx, elem in enumerate(left)
if (
elem == right[idx]
and left_token_timestamp_sequence[left_start + idx]
<= token_timestamp_sequences[seq_idx + 1][right_start + idx]
)
)
else:
matches = np.sum(left == right)
matching = matches / i + eps
if matches > 1 and matching > max_:
max_ = matching
max_indices = (left_start, left_stop, right_start, right_stop)
(left_start, left_stop, right_start, right_stop) = max_indices
# This is a small conflict optimization since those sequences overlap
# in audio.
# We're going to give more confidence to the left sequence
# for the left of the overlap,
# and to the right of the sequence, for the right of the overlap
left_mid = (left_stop + left_start) // 2
right_mid = (right_stop + right_start) // 2
total_sequence.extend(left_sequence[:left_mid])
left_sequence = right_sequence[right_mid:]
left_length = len(left_sequence)
if token_timestamp_sequences:
total_token_timestamp_sequence.extend(left_token_timestamp_sequence[:left_mid])
left_token_timestamp_sequence = token_timestamp_sequences[seq_idx + 1][right_mid:]
total_sequence.extend(left_sequence)
if token_timestamp_sequences is None:
return total_sequence
if len(token_timestamp_sequences) > 0:
total_token_timestamp_sequence.extend(left_token_timestamp_sequence)
return total_sequence, total_token_timestamp_sequence
else:
return total_sequence, []
def _decode_asr(tokenizer, model_outputs, *, return_timestamps, return_language, time_precision, segment_size=1500):
"""
Internal method meant to only be used by asr pipeline. Handles all the little quirks specific to whisper to handle
the various options not allowed in other seq2seq models
"""
# =========== Overview ============
# - iterate over all outputs
# - all tokens within output
# - Each token can be
# - language token
# - special token
# - timestamp token
# - text token
# - We accumulate the text tokens.
# - We split on end timestamps
# - Lots of complexity comes from stride and timestamps
last_language = None
def new_chunk():
return {"language": last_language, "timestamp": [None, None], "text": ""}
# Welcome to the state machine !
chunks = []
chunk = new_chunk()
time_offset = 0.0
timestamp_begin = tokenizer.convert_tokens_to_ids("<|notimestamps|>") + 1
previous_tokens = []
previous_token_timestamps = []
skip = False
right_stride_start = None
all_special_ids = set(tokenizer.all_special_ids)
prompt_token_id = tokenizer.convert_tokens_to_ids("<|startofprev|>")
decoder_start_token_id = tokenizer.convert_tokens_to_ids("<|startoftranscript|>")
# - iterate over all outputs
for chunk_id, output in enumerate(model_outputs):
# We can drop everything to Python list, it's going to make
# our lives easier
token_ids = output["tokens"][0].tolist()
# (possibly) remove the prompt from the token ids
token_ids = tokenizer._strip_prompt(token_ids, prompt_token_id, decoder_start_token_id)
if return_timestamps == "word":
token_timestamps = output["token_timestamps"][0].tolist()
# Those keep track of timestamps within strides
# Which need to be skipped and resolve all tokens in a single
# chunk.
last_timestamp = None
first_timestamp = timestamp_begin
# long form generation: we need to handle the case where the call to generate returns concatenated segments,
# with underlying multiple calls to generate
cur_max_timestamp = 0.0
prev_segments_len = 0.0
penultimate_timestamp = 0.0
if "stride" in output:
chunk_len, stride_left, stride_right = output["stride"]
# Offset the timings to account for the other `model_outputs`.
time_offset -= stride_left
right_stride_start = chunk_len - stride_right
# Keeping track of timestamps within strides
# We're going to NOT split on those, and delay until we're
# out of BOTH stride. Otherwise lots of issues occur and
# corner cases
if stride_left:
first_timestamp = stride_left / time_precision + timestamp_begin
if stride_right:
for token in reversed(token_ids):
if token >= timestamp_begin:
# There can be several token in the right stride
# But the last one is ALWAYS going to be skipped
if (
last_timestamp is not None
and (token - timestamp_begin) * time_precision < right_stride_start
):
break
last_timestamp = token
current_tokens = []
current_token_timestamps = []
# - all tokens within output
for i, token in enumerate(token_ids):
# 4 possible states for each token
# - 1/ Language code
# - 2/ all other special tokens (which we ignore)
# - 3/ Timestamp
# - 4/ Regular text
if token in all_special_ids:
# Either language code or other
text = tokenizer.decode([token])
# Removing outer shell <|XX|>
text = text[2:-2]
language = LANGUAGES.get(text)
if language is not None:
# 1/ Indeed some language
# TODO Handle when language is different from the previous
# one, and we cannot use timestamped tokens to create chunks
if last_language and language != last_language and not return_timestamps:
previous_tokens.append(current_tokens)
resolved_tokens = _find_longest_common_sequence(previous_tokens)
resolved_text = tokenizer.decode(resolved_tokens)
chunk["text"] = resolved_text
chunks.append(chunk)
# Flush all our temporary context
previous_tokens = []
current_tokens = []
chunk = new_chunk()
chunk["language"] = language
last_language = language
else:
# 2/ This is a regular special token, ignoring it
pass
elif token >= timestamp_begin:
# 3/ Timestamp token
timestamp = float((token - timestamp_begin) * time_precision)
if timestamp < cur_max_timestamp:
# next segment has started
last_was_single_ending = i >= 2 and not (
token_ids[i - 1] >= timestamp_begin and token_ids[i - 2] >= timestamp_begin
)
if last_was_single_ending:
prev_segments_len += time_precision * segment_size
else:
cur_max_timestamp = penultimate_timestamp
prev_segments_len += penultimate_timestamp
penultimate_timestamp = cur_max_timestamp
cur_max_timestamp = timestamp
time = (token - timestamp_begin) * time_precision + time_offset + prev_segments_len
time = round(time, 2)
if last_timestamp and token >= last_timestamp:
# Whisper outputted a timestamp token, but it falls within
# our stride, so we're going to skip it for the time being
# and resolve this later
# Skip is necessary because timestamp tokens always come
# by pair, so we need to skip the next one too (which would mark the start of another chunk).
skip = True
elif skip or (previous_tokens and token < first_timestamp):
skip = False
elif chunk["timestamp"][0] is None:
chunk["timestamp"][0] = time
else:
# This is the end of the timestamp chunk
if time == chunk["timestamp"][0]:
# This is a bug in timestamp token output
# where we're taking the duplicate token
# as a stop where it should be a start.
# This is an issue in the underlying model output
# Let's just skip it so it becomes de-factor
# a start again
pass
else:
chunk["timestamp"][1] = time
# Handling merges.
previous_tokens.append(current_tokens)
if return_timestamps == "word":
previous_token_timestamps.append(current_token_timestamps)
resolved_tokens, resolved_token_timestamps = _find_longest_common_sequence(
previous_tokens, previous_token_timestamps
)
resolved_text = tokenizer.decode(resolved_tokens)
chunk["text"] = resolved_text
if return_timestamps == "word":
chunk["words"] = _collate_word_timestamps(
tokenizer, resolved_tokens, resolved_token_timestamps, last_language, return_language
)
chunks.append(chunk)
# Flush all our temporary context
previous_tokens = []
current_tokens = []
previous_token_timestamps = []
current_token_timestamps = []
chunk = new_chunk()
else:
# 4/ Regular token
# We just append to the list of all tokens so we can handle
# merges later and decode into text.
current_tokens.append(token)
if return_timestamps == "word":
if i == 0:
start_time = round(0.0 + time_offset, 2)
else:
start_time = round(token_timestamps[i - 1] + time_offset, 2)
end_time = round(token_timestamps[i] + time_offset, 2)
current_token_timestamps.append((start_time, end_time))
if "stride" in output:
time_offset += chunk_len - stride_right
# Leftover tokens
if current_tokens:
previous_tokens.append(current_tokens)
if return_timestamps == "word":
previous_token_timestamps.append(current_token_timestamps)
elif not (any(p for p in previous_tokens)):
chunk = new_chunk()
previous_tokens = []
current_tokens = []
previous_token_timestamps = []
current_token_timestamps = []
if previous_tokens:
if return_timestamps:
logger.warning(
"Whisper did not predict an ending timestamp, which can happen if audio is cut off in the middle of a word. "
"Also make sure WhisperTimeStampLogitsProcessor was used during generation."
)
# Happens when we don't use timestamps
resolved_tokens, resolved_token_timestamps = _find_longest_common_sequence(
previous_tokens, previous_token_timestamps
)
resolved_text = tokenizer.decode(resolved_tokens)
chunk["text"] = resolved_text
if return_timestamps == "word":
chunk["words"] = _collate_word_timestamps(
tokenizer, resolved_tokens, resolved_token_timestamps, last_language, return_language
)
chunks.append(chunk)
# Preparing and cleaning up the pipeline output
full_text = "".join(chunk["text"] for chunk in chunks)
if return_timestamps or return_language:
for chunk in chunks:
if not return_timestamps:
chunk.pop("timestamp")
else:
chunk["timestamp"] = tuple(chunk["timestamp"])
if not return_language:
chunk.pop("language")
if return_timestamps == "word":
new_chunks = []
for chunk in chunks:
new_chunks.extend(chunk["words"])
optional = {"chunks": new_chunks}
else:
optional = {"chunks": chunks}
else:
optional = {}
return full_text, optional
def _find_longest_common_sequence(sequences, token_timestamp_sequences=None):
# It would be much harder to do O(n) because of fault tolerance.
# We actually have a really good property which is that the total sequence
# MUST be those subsequences in order.
# If token_timestamp_sequences is provided, will split those sequences in
# exactly the same way.
left_sequence = sequences[0]
left_length = len(left_sequence)
total_sequence = []
if token_timestamp_sequences:
left_token_timestamp_sequence = token_timestamp_sequences[0]
total_token_timestamp_sequence = []
for seq_idx, right_sequence in enumerate(sequences[1:]):
# index = 0
max_ = 0.0
max_indices = (left_length, left_length, 0, 0)
# Here we're sliding matches
# [a, b, c, d]
# [c, d, f]
# = [c] == [d]
#
# [a, b, c, d]
# [c, d, f]
# = [c, d] == [c, d]
#
#
# [a, b, c, d]
# [c, d, f]
#
# = [b, c, d] == [c, d, f]
#
# [a, b, c, d]
# [c, d, f]
#
# [a, b, c] == [c, d, f]
#
# [a, b, c, d]
# [d, f]
#
# [a, b] == [d, f]
#
# [a, b, c, d]
# [f]
#
# [a] == [f]
right_length = len(right_sequence)
for i in range(1, left_length + right_length):
# epsilon to favor long perfect matches
eps = i / 10000.0
# Slightly convoluted because we don't want out of bound indices
# This will be necessary for a small conflict resolution optimization
# later
left_start = max(0, left_length - i)
left_stop = min(left_length, left_length + right_length - i)
left = np.array(left_sequence[left_start:left_stop])
right_start = max(0, i - left_length)
right_stop = min(right_length, i)
right = np.array(right_sequence[right_start:right_stop])
# We can only match subsequences of the same size.
if len(left) != len(right):
raise RuntimeError(
"There is a bug within whisper `decode_asr` function, please report it. Dropping to prevent bad inference."
)
if token_timestamp_sequences:
# Get length of longest subsequence of tokens that match
# and have timestamps that are in order
matches = sum(
1
for idx, elem in enumerate(left)
if (
elem == right[idx]
and left_token_timestamp_sequence[left_start + idx]
<= token_timestamp_sequences[seq_idx + 1][right_start + idx]
)
)
else:
matches = np.sum(left == right)
matching = matches / i + eps
if matches > 1 and matching > max_:
max_ = matching
max_indices = (left_start, left_stop, right_start, right_stop)
(left_start, left_stop, right_start, right_stop) = max_indices
# This is a small conflict optimization since those sequences overlap
# in audio.
# We're going to give more confidence to the left sequence
# for the left of the overlap,
# and to the right of the sequence, for the right of the overlap
left_mid = (left_stop + left_start) // 2
right_mid = (right_stop + right_start) // 2
total_sequence.extend(left_sequence[:left_mid])
left_sequence = right_sequence[right_mid:]
left_length = len(left_sequence)
if token_timestamp_sequences:
total_token_timestamp_sequence.extend(left_token_timestamp_sequence[:left_mid])
left_token_timestamp_sequence = token_timestamp_sequences[seq_idx + 1][right_mid:]
total_sequence.extend(left_sequence)
if token_timestamp_sequences is None:
return total_sequence
if len(token_timestamp_sequences) > 0:
total_token_timestamp_sequence.extend(left_token_timestamp_sequence)
return total_sequence, total_token_timestamp_sequence
else:
return total_sequence, []
def _collate_word_timestamps(tokenizer, tokens, token_timestamps, language, return_language):
words, _, token_indices = _combine_tokens_into_words(tokenizer, tokens, language)
optional_language_field = {"language": language} if return_language else {}
timings = [
{
"text": word,
"timestamp": (token_timestamps[indices[0]][0], token_timestamps[indices[-1]][1]),
**optional_language_field,
}
for word, indices in zip(words, token_indices)
]
return timings
def _combine_tokens_into_words(
tokenizer,
tokens: list[int],
language: Optional[str] = None,
prepend_punctuations: str = "\"'“¡¿([{-",
append_punctuations: str = "\"'.。,,!!??::”)]}、",
):
"""
Groups tokens by word. Returns a tuple containing a list of strings with the words, and a list of `token_id`
sequences with the tokens making up each word.
"""
if language is None:
language = tokenizer.language
if language is None:
language = "english"
if language in {"chinese", "japanese", "thai", "lao", "myanmar", "cantonese"}:
# These languages don't typically use spaces.
words, word_tokens, token_indices = _split_tokens_on_unicode(tokenizer, tokens)
else:
words, word_tokens, token_indices = _split_tokens_on_spaces(tokenizer, tokens)
_merge_punctuations(words, word_tokens, token_indices, prepend_punctuations, append_punctuations)
return words, word_tokens, token_indices
def _split_tokens_on_unicode(tokenizer, tokens: list[int]):
"""Combine tokens into words by splitting at any position where the tokens are decoded as valid unicode points."""
decoded_full = tokenizer.decode(tokens, decode_with_timestamps=True)
replacement_char = "\ufffd"
words = []
word_tokens = []
token_indices = []
current_tokens = []
current_indices = []
unicode_offset = 0
for token_idx, token in enumerate(tokens):
current_tokens.append(token)
current_indices.append(token_idx)
decoded = tokenizer.decode(current_tokens, decode_with_timestamps=True)
if (
replacement_char not in decoded
or decoded_full[unicode_offset + decoded.index(replacement_char)] == replacement_char
):
words.append(decoded)
word_tokens.append(current_tokens)
token_indices.append(current_indices)
current_tokens = []
current_indices = []
unicode_offset += len(decoded)
return words, word_tokens, token_indices
def _split_tokens_on_spaces(tokenizer, tokens: list[int]):
"""Combine tokens into words by splitting at whitespace and punctuation tokens."""
subwords, subword_tokens_list, subword_indices_list = _split_tokens_on_unicode(tokenizer, tokens)
words = []
word_tokens = []
token_indices = []
for subword, subword_tokens, subword_indices in zip(subwords, subword_tokens_list, subword_indices_list):
special = subword_tokens[0] >= tokenizer.eos_token_id
with_space = subword.startswith(" ")
punctuation = subword.strip() in "!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~"
if special or with_space or punctuation or len(words) == 0:
words.append(subword)
word_tokens.append(subword_tokens)
token_indices.append(subword_indices)
else:
words[-1] = words[-1] + subword
word_tokens[-1].extend(subword_tokens)
token_indices[-1].extend(subword_indices)
return words, word_tokens, token_indices
def _merge_punctuations(words, tokens, indices, prepended, appended):
"""Merges punctuation tokens with neighboring words."""
# prepend punctuations
i = len(words) - 2
j = len(words) - 1
while i >= 0:
if words[i].startswith(" ") and words[i].strip() in prepended:
words[j] = words[i] + words[j]
tokens[j] = tokens[i] + tokens[j]
indices[j] = indices[i] + indices[j]
words[i] = ""
tokens[i] = []
indices[i] = []
else:
j = i
i -= 1
# append punctuations
i = 0
j = 1
while j < len(words):
if not words[i].endswith(" ") and words[j] in appended:
words[i] += words[j]
tokens[i] += tokens[j]
indices[i] += indices[j]
words[j] = ""
tokens[j] = []
indices[j] = []
else:
i = j
j += 1
# remove elements that are now empty
words[:] = [word for word in words if word]
tokens[:] = [token for token in tokens if token]
indices[:] = [idx for idx in indices if idx]
__all__ = ["WhisperTokenizer"]
|
WhisperTokenizer
|
python
|
google__pytype
|
pytype/tests/test_pyi2.py
|
{
"start": 109,
"end": 4097
}
|
class ____(test_base.BaseTest):
"""Tests for PYI."""
def test_unnecessary_any_import(self):
ty = self.Infer("""
import typing
def foo(**kwargs: typing.Any) -> int: return 1
def bar(*args: typing.Any) -> int: return 2
""")
self.assertTypesMatchPytd(
ty,
"""
import typing
def foo(**kwargs) -> int: ...
def bar(*args) -> int: ...
""",
)
def test_static_method_from_pyi_as_callable(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
class A:
@staticmethod
def callback(msg: str) -> None: ...
""",
)
self.Check(
"""
from typing import Any, Callable
import foo
def func(c: Callable[[Any], None], arg: Any) -> None:
c(arg)
func(foo.A.callback, 'hello, world')
""",
pythonpath=[d.path],
)
def test_class_method_from_pyi_as_callable(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
class A:
@classmethod
def callback(cls, msg: str) -> None: ...
""",
)
self.Check(
"""
from typing import Any, Callable
import foo
def func(c: Callable[[Any], None], arg: Any) -> None:
c(arg)
func(foo.A.callback, 'hello, world')
""",
pythonpath=[d.path],
)
def test_ellipsis(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
def f(x: Ellipsis) -> None: ...
""",
)
self.CheckWithErrors(
"""
import foo
x = foo.f(...)
y = foo.f(1) # wrong-arg-types
""",
pythonpath=[d.path],
)
def test_resolve_nested_type(self):
with test_utils.Tempdir() as d:
d.create_file(
"meta.pyi",
"""
class Meta(type): ...
""",
)
d.create_file(
"foo.pyi",
"""
import meta
class Foo:
class Bar(int, metaclass=meta.Meta): ...
CONST: Foo.Bar
""",
)
self.Check(
"""
import foo
print(foo.Foo.CONST)
""",
pythonpath=[d.path],
)
def test_partial_forward_reference(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
from typing import Generic, TypeVar
X1 = list['Y']
X2 = list['Z[str]']
X3 = int | 'Z[int]'
Y = int
T = TypeVar('T')
class Z(Generic[T]): ...
""",
)
self.Check(
"""
import foo
assert_type(foo.X1, "type[list[int]]")
assert_type(foo.X2, "type[list[foo.Z[str]]]")
assert_type(foo.X3, "type[Union[foo.Z[int], int]]")
""",
pythonpath=[d.path],
)
self.Check(
"""
from typing import Type, List, Union
import foo
assert_type(foo.X1, Type[List[int]])
assert_type(foo.X2, Type[List[foo.Z[str]]])
assert_type(foo.X3, Type[Union[foo.Z[int], int]])
""",
pythonpath=[d.path],
)
def test_bare_callable(self):
with self.DepTree([(
"foo.pyi",
"""
import types
def f(x) -> types.FunctionType: ...
""",
)]):
ty = self.Infer("""
import foo
def f(x):
return foo.f(x)
""")
self.assertTypesMatchPytd(
ty,
"""
import foo
from typing import Callable
def f(x) -> Callable[..., nothing]: ...
""",
)
def test_keyword_import(self):
with self.DepTree([(
"foo.pyi",
"""
import importlib
my_module = importlib.import_module('regretting.decisions.in.naming')
""",
)]):
self.Check("""
from foo import my_module
print(my_module.whatever)
""")
|
PYITest
|
python
|
tensorflow__tensorflow
|
tensorflow/python/distribute/values_test.py
|
{
"start": 2633,
"end": 11386
}
|
class ____(test.TestCase, parameterized.TestCase):
@combinations.generate(
combinations.combine(
distribution=(strategy_combinations.all_strategies_minus_default +
strategy_combinations.multiworker_strategies),
mode=["eager"]
))
def testMakeDistributedValueFromTensor(self, distribution):
if not tf2.enabled():
self.skipTest("Only V2 is supported.")
single_value = constant_op.constant(1)
def value_fn(ctx):
del ctx
return single_value
distributed_values = (
distribution.experimental_distribute_values_from_function(value_fn))
self.assertAllEqual(
ds_test_util.gather(distribution, distributed_values),
constant_op.constant(1., shape=(distribution.num_replicas_in_sync)))
@combinations.generate(
combinations.combine(
distribution=(strategy_combinations.all_strategies_minus_default +
strategy_combinations.multiworker_strategies),
mode=["eager"]
))
def testMakeDistributedValueSingleNumpyArrayConstant(self, distribution):
if not tf2.enabled():
self.skipTest("Only V2 is supported.")
array_value = np.array([1., 2., 3.])
def value_fn(ctx):
del ctx
return array_value
distributed_values = (
distribution.experimental_distribute_values_from_function(value_fn))
self.assertAllEqual(
ds_test_util.gather(distribution, distributed_values).numpy(),
[[1., 2., 3.]] * distribution.num_replicas_in_sync)
@combinations.generate(
combinations.combine(
distribution=(strategy_combinations.all_strategies_minus_default +
strategy_combinations.multiworker_strategies),
mode=["eager"]
))
def testMakeDistributedValueTupleConstant(self, distribution):
if not tf2.enabled():
self.skipTest("Only V2 is supported.")
tuple_value = (1., 2., 3.)
def value_fn(ctx):
del ctx
return tuple_value
distributed_values = (
distribution.experimental_distribute_values_from_function(value_fn))
distributed_values = ds_test_util.gather(distribution, distributed_values)
# Expected output for 2 replicas:
# ([1.0, 1.0], [2.0, 2.0], [3.0, 3.0])
expected = tuple([v for i in range(distribution.num_replicas_in_sync)]
for v in tuple_value)
self.assertAllEqual(distributed_values, expected)
@combinations.generate(
combinations.combine(
distribution=(strategy_combinations.all_strategies_minus_default +
strategy_combinations.multiworker_strategies),
mode=["eager"]
))
def testMakeDistributedValueNestedStructurePerReplica(self, distribution):
if not tf2.enabled():
self.skipTest("Only V2 is supported.")
tuple_value = (1., 2., 3.)
def value_fn(ctx):
per_replica = []
for val in tuple_value:
per_replica.append(val * ctx.replica_id_in_sync_group)
return tuple(per_replica)
distributed_values = (
distribution.experimental_distribute_values_from_function(value_fn))
distributed_values = ds_test_util.gather(distribution, distributed_values)
# Expected output for 2 replicas:
# ([0.0, 1.0], [0.0, 2.0], [0.0, 3.0])
expected = tuple([v * i for i in range(distribution.num_replicas_in_sync)]
for v in tuple_value)
self.assertAllEqual(distributed_values, expected)
# NOTE(priyag): Cannot test this with MultiWorkerMirroredStrategy because
# collective ops do not support SparseTensors.
@combinations.generate(
combinations.combine(
distribution=strategy_combinations.all_strategies_minus_default,
mode=["eager"]
))
def testMakeDistributedValueSpareTensor(self, distribution):
if not tf2.enabled():
self.skipTest("Only V2 is supported.")
def value_fn(ctx):
del ctx
return sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
distributed_values = (
distribution.experimental_distribute_values_from_function(value_fn))
local_results = distribution.experimental_local_results(distributed_values)
for i in range(distribution.num_replicas_in_sync):
self.assertAllEqual(
sparse_ops.sparse_tensor_to_dense(local_results[i]),
[[1, 0, 0, 0], [0, 0, 2, 0], [0, 0, 0, 0]])
@combinations.generate(
combinations.combine(
distribution=(strategy_combinations.all_strategies_minus_default +
strategy_combinations.multiworker_strategies),
mode=["eager"]
))
def testMakeDistributedValueExtractFromArray(self, distribution):
if not tf2.enabled():
self.skipTest("Only V2 is supported.")
multiple_values = range(distribution.num_replicas_in_sync)
def value_fn(ctx):
return multiple_values[ctx.replica_id_in_sync_group]
distributed_values = (
distribution.experimental_distribute_values_from_function(value_fn))
distributed_values = ds_test_util.gather(distribution, distributed_values)
expected = range(distribution.num_replicas_in_sync)
self.assertAllEqual(distributed_values, expected)
@combinations.generate(
combinations.combine(
distribution=(strategy_combinations.all_strategies_minus_default +
strategy_combinations.multiworker_strategies),
mode=["eager"]
))
def testMakeDistributedValueAndRun(self, distribution):
if not tf2.enabled():
self.skipTest("Only V2 is supported.")
@def_function.function
def run():
multiple_values = range(distribution.num_replicas_in_sync)
def value_fn(ctx):
return multiple_values[ctx.replica_id_in_sync_group]
distributed_values = (
distribution.experimental_distribute_values_from_function(value_fn))
def computation(x):
return math_ops.square(x)
outputs = ds_test_util.gather(
distribution,
distribution.run(computation, args=(distributed_values,)))
return outputs
results = run()
expected = [i**2 for i in range(distribution.num_replicas_in_sync)]
self.assertAllEqual(results, expected)
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations
.mirrored_strategy_with_two_gpus_no_merge_call,
strategy_combinations.tpu_strategy,
strategy_combinations.tpu_strategy_packed_var,
strategy_combinations.central_storage_strategy_with_two_gpus,
] + strategy_combinations.multiworker_strategies,
mode=["eager"]))
def testMakeDistributedValueDefaultDevicePlacement(self, distribution):
if not tf2.enabled():
self.skipTest("Only V2 is supported.")
def value_fn(ctx):
del ctx
return constant_op.constant(1.0)
distributed_values = (
distribution.experimental_distribute_values_from_function(value_fn))
default_device = array_ops.identity(constant_op.constant(1.0)).device
for i in range(len(distribution.extended.worker_devices)):
self.assertAllEqual(distributed_values._values[i].device, default_device)
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations
.mirrored_strategy_with_two_gpus_no_merge_call,
strategy_combinations.tpu_strategy,
strategy_combinations.tpu_strategy_packed_var,
strategy_combinations.central_storage_strategy_with_two_gpus,
] + strategy_combinations.multiworker_strategies,
mode=["eager"],
op_type=[constant_op.constant, array_ops.identity]))
def testMakeDistributedValueExplicitDevicePlacement(self, distribution,
op_type):
if not tf2.enabled():
self.skipTest("Only V2 is supported.")
worker_devices = distribution.extended.worker_devices
def value_fn(ctx):
# In multi client setup, worker_devices is just the devices on that
# worker.
worker_device_id = ctx.replica_id_in_sync_group % len(worker_devices)
with ops.device(worker_devices[worker_device_id]):
return op_type(1.0)
distributed_values = (
distribution.experimental_distribute_values_from_function(value_fn))
for i in range(len(distribution.extended.worker_devices)):
self.assertAllEqual(distributed_values._values[i].device,
worker_devices[i])
|
DistributedValuesTest
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/comparison/test_chart_clustered01.py
|
{
"start": 315,
"end": 2199
}
|
class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_clustered01.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "column"})
chart.axis_ids = [45886080, 45928832]
data = [
["Types", "Sub Type", "Value 1", "Value 2", "Value 3"],
["Type 1", "Sub Type A", 5000, 8000, 6000],
["", "Sub Type B", 2000, 3000, 4000],
["", "Sub Type C", 250, 1000, 2000],
["Type 2", "Sub Type D", 6000, 6000, 6500],
["", "Sub Type E", 500, 300, 200],
]
cat_data = [
["Type 1", None, None, "Type 2", None],
["Sub Type A", "Sub Type B", "Sub Type C", "Sub Type D", "Sub Type E"],
]
for row_num, row_data in enumerate(data):
worksheet.write_row(row_num, 0, row_data)
chart.add_series(
{
"name": "=Sheet1!$C$1",
"categories": "=Sheet1!$A$2:$B$6",
"values": "=Sheet1!$C$2:$C$6",
"categories_data": cat_data,
}
)
chart.add_series(
{
"name": "=Sheet1!$D$1",
"categories": "=Sheet1!$A$2:$B$6",
"values": "=Sheet1!$D$2:$D$6",
}
)
chart.add_series(
{
"name": "=Sheet1!$E$1",
"categories": "=Sheet1!$A$2:$B$6",
"values": "=Sheet1!$E$2:$E$6",
}
)
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
|
TestCompareXLSXFiles
|
python
|
pytorch__pytorch
|
torchgen/api/types/types_base.py
|
{
"start": 5731,
"end": 7137
}
|
class ____:
name: str
nctype: NamedCType
argument: Argument | TensorOptionsArguments | SelfArgument
# TODO: maybe don't represent default here
default: str | None = None
def rename(self, name: str) -> Binding:
return Binding(
name=name,
nctype=self.nctype,
argument=self.argument,
default=self.default,
)
@property
def type(self) -> str:
return self.nctype.cpp_type()
def no_default(self) -> Binding:
return Binding(
name=self.name,
nctype=self.nctype,
default=None,
argument=self.argument,
)
def decl(self, *, func_ptr_cast: bool = False) -> str:
mb_default = ""
if self.default is not None:
mb_default = f"={self.default}"
# casting only needs to know the type
if func_ptr_cast:
return f"{self.type}"
else:
return f"{self.type} {self.name}{mb_default}"
def defn(self) -> str:
return f"{self.type} {self.name}"
def with_name(self, name: str) -> Binding:
return Binding(
name=name, nctype=self.nctype, argument=self.argument, default=self.default
)
# An Expr is a C++ expression. It has a C++ string representing its syntax,
# as well as a CType saying what it provides.
@dataclass(frozen=True)
|
Binding
|
python
|
django__django
|
tests/apps/two_configs_one_default_app/apps.py
|
{
"start": 36,
"end": 131
}
|
class ____(AppConfig):
default = True
name = "apps.two_configs_one_default_app"
|
TwoConfig
|
python
|
langchain-ai__langchain
|
libs/core/tests/unit_tests/test_tools.py
|
{
"start": 47298,
"end": 84475
}
|
class ____(BaseTool):
name: str = "foo"
description: str = "foo."
args_schema: type[BaseModel] = fooSchema
@override
def _run(self, x: int, y: str) -> Any:
return y
@tool("foo", args_schema=fooSchema)
def injected_tool_with_schema(x: int, y: str) -> str:
return y
@pytest.mark.parametrize("tool_", [InjectedTool()])
def test_tool_injected_arg_without_schema(tool_: BaseTool) -> None:
assert _schema(tool_.get_input_schema()) == {
"title": "foo",
"description": "Foo.\n\nArgs:\n x: abc\n y: 123",
"type": "object",
"properties": {
"x": {"title": "X", "type": "integer"},
"y": {"title": "Y", "type": "string"},
},
"required": ["x", "y"],
}
assert _schema(tool_.tool_call_schema) == {
"title": "foo",
"description": "foo.",
"type": "object",
"properties": {"x": {"title": "X", "type": "integer"}},
"required": ["x"],
}
assert tool_.invoke({"x": 5, "y": "bar"}) == "bar"
assert tool_.invoke(
{
"name": "foo",
"args": {"x": 5, "y": "bar"},
"id": "123",
"type": "tool_call",
}
) == ToolMessage("bar", tool_call_id="123", name="foo")
expected_error = (
ValidationError if not isinstance(tool_, InjectedTool) else TypeError
)
with pytest.raises(expected_error):
tool_.invoke({"x": 5})
assert convert_to_openai_function(tool_) == {
"name": "foo",
"description": "foo.",
"parameters": {
"type": "object",
"properties": {"x": {"type": "integer"}},
"required": ["x"],
},
}
@pytest.mark.parametrize(
"tool_",
[injected_tool_with_schema, InjectedToolWithSchema()],
)
def test_tool_injected_arg_with_schema(tool_: BaseTool) -> None:
assert _schema(tool_.get_input_schema()) == {
"title": "fooSchema",
"description": "foo.",
"type": "object",
"properties": {
"x": {"description": "abc", "title": "X", "type": "integer"},
"y": {"description": "123", "title": "Y", "type": "string"},
},
"required": ["x", "y"],
}
assert _schema(tool_.tool_call_schema) == {
"title": "foo",
"description": "foo.",
"type": "object",
"properties": {"x": {"description": "abc", "title": "X", "type": "integer"}},
"required": ["x"],
}
assert tool_.invoke({"x": 5, "y": "bar"}) == "bar"
assert tool_.invoke(
{
"name": "foo",
"args": {"x": 5, "y": "bar"},
"id": "123",
"type": "tool_call",
}
) == ToolMessage("bar", tool_call_id="123", name="foo")
expected_error = (
ValidationError if not isinstance(tool_, InjectedTool) else TypeError
)
with pytest.raises(expected_error):
tool_.invoke({"x": 5})
assert convert_to_openai_function(tool_) == {
"name": "foo",
"description": "foo.",
"parameters": {
"type": "object",
"properties": {"x": {"type": "integer", "description": "abc"}},
"required": ["x"],
},
}
def test_tool_injected_arg() -> None:
tool_ = injected_tool
assert _schema(tool_.get_input_schema()) == {
"title": "foo",
"description": "Foo.",
"type": "object",
"properties": {
"x": {"description": "abc", "title": "X", "type": "integer"},
"y": {"description": "123", "title": "Y", "type": "string"},
},
"required": ["x", "y"],
}
assert _schema(tool_.tool_call_schema) == {
"title": "foo",
"description": "Foo.",
"type": "object",
"properties": {"x": {"description": "abc", "title": "X", "type": "integer"}},
"required": ["x"],
}
assert tool_.invoke({"x": 5, "y": "bar"}) == "bar"
assert tool_.invoke(
{
"name": "foo",
"args": {"x": 5, "y": "bar"},
"id": "123",
"type": "tool_call",
}
) == ToolMessage("bar", tool_call_id="123", name="foo")
expected_error = (
ValidationError if not isinstance(tool_, InjectedTool) else TypeError
)
with pytest.raises(expected_error):
tool_.invoke({"x": 5})
assert convert_to_openai_function(tool_) == {
"name": "foo",
"description": "Foo.",
"parameters": {
"type": "object",
"properties": {"x": {"type": "integer", "description": "abc"}},
"required": ["x"],
},
}
def test_tool_inherited_injected_arg() -> None:
class BarSchema(BaseModel):
"""bar."""
y: Annotated[str, "foobar comment", InjectedToolArg()] = Field(
..., description="123"
)
class FooSchema(BarSchema):
"""foo."""
x: int = Field(..., description="abc")
class InheritedInjectedArgTool(BaseTool):
name: str = "foo"
description: str = "foo."
args_schema: type[BaseModel] = FooSchema
@override
def _run(self, x: int, y: str) -> Any:
return y
tool_ = InheritedInjectedArgTool()
assert tool_.get_input_schema().model_json_schema() == {
"title": "FooSchema", # Matches the title from the provided schema
"description": "foo.",
"type": "object",
"properties": {
"x": {"description": "abc", "title": "X", "type": "integer"},
"y": {"description": "123", "title": "Y", "type": "string"},
},
"required": ["y", "x"],
}
# Should not include `y` since it's annotated as an injected tool arg
assert _get_tool_call_json_schema(tool_) == {
"title": "foo",
"description": "foo.",
"type": "object",
"properties": {"x": {"description": "abc", "title": "X", "type": "integer"}},
"required": ["x"],
}
assert tool_.invoke({"x": 5, "y": "bar"}) == "bar"
assert tool_.invoke(
{
"name": "foo",
"args": {"x": 5, "y": "bar"},
"id": "123",
"type": "tool_call",
}
) == ToolMessage("bar", tool_call_id="123", name="foo")
expected_error = (
ValidationError if not isinstance(tool_, InjectedTool) else TypeError
)
with pytest.raises(expected_error):
tool_.invoke({"x": 5})
assert convert_to_openai_function(tool_) == {
"name": "foo",
"description": "foo.",
"parameters": {
"type": "object",
"properties": {"x": {"type": "integer", "description": "abc"}},
"required": ["x"],
},
}
def _get_parametrized_tools() -> list:
def my_tool(x: int, y: str, some_tool: Annotated[Any, InjectedToolArg]) -> str:
"""my_tool."""
return some_tool
async def my_async_tool(
x: int, y: str, *, some_tool: Annotated[Any, InjectedToolArg]
) -> str:
"""my_tool."""
return some_tool
return [my_tool, my_async_tool]
@pytest.mark.parametrize("tool_", _get_parametrized_tools())
def test_fn_injected_arg_with_schema(tool_: Callable) -> None:
assert convert_to_openai_function(tool_) == {
"name": tool_.__name__,
"description": "my_tool.",
"parameters": {
"type": "object",
"properties": {
"x": {"type": "integer"},
"y": {"type": "string"},
},
"required": ["x", "y"],
},
}
def generate_models() -> list[Any]:
"""Generate a list of base models depending on the pydantic version."""
class FooProper(BaseModel):
a: int
b: str
return [FooProper]
def generate_backwards_compatible_v1() -> list[Any]:
"""Generate a model with pydantic 2 from the v1 namespace."""
class FooV1Namespace(BaseModelV1):
a: int
b: str
return [FooV1Namespace]
# This generates a list of models that can be used for testing that our APIs
# behave well with either pydantic 1 proper,
# pydantic v1 from pydantic 2,
# or pydantic 2 proper.
TEST_MODELS = generate_models()
if sys.version_info < (3, 14):
TEST_MODELS += generate_backwards_compatible_v1()
@pytest.mark.parametrize("pydantic_model", TEST_MODELS)
def test_args_schema_as_pydantic(pydantic_model: Any) -> None:
class SomeTool(BaseTool):
args_schema: type[pydantic_model] = pydantic_model
@override
def _run(self, *args: Any, **kwargs: Any) -> str:
return "foo"
tool = SomeTool(
name="some_tool", description="some description", args_schema=pydantic_model
)
assert tool.args == {
"a": {"title": "A", "type": "integer"},
"b": {"title": "B", "type": "string"},
}
input_schema = tool.get_input_schema()
if issubclass(input_schema, BaseModel):
input_json_schema = input_schema.model_json_schema()
elif issubclass(input_schema, BaseModelV1):
input_json_schema = input_schema.schema()
else:
msg = "Unknown input schema type"
raise TypeError(msg)
assert input_json_schema == {
"properties": {
"a": {"title": "A", "type": "integer"},
"b": {"title": "B", "type": "string"},
},
"required": ["a", "b"],
"title": pydantic_model.__name__,
"type": "object",
}
tool_json_schema = _get_tool_call_json_schema(tool)
assert tool_json_schema == {
"description": "some description",
"properties": {
"a": {"title": "A", "type": "integer"},
"b": {"title": "B", "type": "string"},
},
"required": ["a", "b"],
"title": "some_tool",
"type": "object",
}
def test_args_schema_explicitly_typed() -> None:
"""This should test that one can type the args schema as a Pydantic model.
Please note that this will test using pydantic 2 even though `BaseTool`
is a Pydantic 1 model!
"""
class Foo(BaseModel):
a: int
b: str
class SomeTool(BaseTool):
# type ignoring here since we're allowing overriding a type
# signature of pydantic.v1.BaseModel with pydantic.BaseModel
# for pydantic 2!
args_schema: type[BaseModel] = Foo
@override
def _run(self, *args: Any, **kwargs: Any) -> str:
return "foo"
tool = SomeTool(name="some_tool", description="some description")
assert tool.get_input_schema().model_json_schema() == {
"properties": {
"a": {"title": "A", "type": "integer"},
"b": {"title": "B", "type": "string"},
},
"required": ["a", "b"],
"title": "Foo",
"type": "object",
}
assert _get_tool_call_json_schema(tool) == {
"description": "some description",
"properties": {
"a": {"title": "A", "type": "integer"},
"b": {"title": "B", "type": "string"},
},
"required": ["a", "b"],
"title": "some_tool",
"type": "object",
}
@pytest.mark.parametrize("pydantic_model", TEST_MODELS)
def test_structured_tool_with_different_pydantic_versions(pydantic_model: Any) -> None:
"""This should test that one can type the args schema as a Pydantic model."""
def foo(a: int, b: str) -> str:
"""Hahaha."""
return "foo"
foo_tool = StructuredTool.from_function(
func=foo,
args_schema=pydantic_model,
)
assert foo_tool.invoke({"a": 5, "b": "hello"}) == "foo"
args_schema = cast("type[BaseModel]", foo_tool.args_schema)
if issubclass(args_schema, BaseModel):
args_json_schema = args_schema.model_json_schema()
elif issubclass(args_schema, BaseModelV1):
args_json_schema = args_schema.schema()
else:
msg = "Unknown input schema type"
raise TypeError(msg)
assert args_json_schema == {
"properties": {
"a": {"title": "A", "type": "integer"},
"b": {"title": "B", "type": "string"},
},
"required": ["a", "b"],
"title": pydantic_model.__name__,
"type": "object",
}
input_schema = foo_tool.get_input_schema()
if issubclass(input_schema, BaseModel):
input_json_schema = input_schema.model_json_schema()
elif issubclass(input_schema, BaseModelV1):
input_json_schema = input_schema.schema()
else:
msg = "Unknown input schema type"
raise TypeError(msg)
assert input_json_schema == {
"properties": {
"a": {"title": "A", "type": "integer"},
"b": {"title": "B", "type": "string"},
},
"required": ["a", "b"],
"title": pydantic_model.__name__,
"type": "object",
}
valid_tool_result_blocks = [
"foo",
{"type": "text", "text": "foo"},
{"type": "text", "blah": "foo"}, # note, only 'type' key is currently checked
{"type": "image_url", "image_url": {}}, # openai format
{
"type": "image",
"source": {
"type": "base64",
"media_type": "image/jpeg",
"data": "123",
},
}, # anthropic format
{"type": "json", "json": {}}, # bedrock format
]
invalid_tool_result_blocks = [
{"text": "foo"}, # missing type
{"results": "foo"}, # not content blocks
]
@pytest.mark.parametrize(
("obj", "expected"),
[
*([[block, True] for block in valid_tool_result_blocks]),
*([[block, False] for block in invalid_tool_result_blocks]),
],
)
def test__is_message_content_block(obj: Any, *, expected: bool) -> None:
assert _is_message_content_block(obj) is expected
@pytest.mark.parametrize(
("obj", "expected"),
[
("foo", True),
(valid_tool_result_blocks, True),
(invalid_tool_result_blocks, False),
],
)
def test__is_message_content_type(obj: Any, *, expected: bool) -> None:
assert _is_message_content_type(obj) is expected
@pytest.mark.parametrize("use_v1_namespace", [True, False])
def test__get_all_basemodel_annotations_v2(*, use_v1_namespace: bool) -> None:
A = TypeVar("A")
if use_v1_namespace:
if sys.version_info >= (3, 14):
pytest.skip("pydantic.v1 namespace not supported with Python 3.14+")
class ModelA(BaseModelV1, Generic[A], extra="allow"):
a: A
else:
class ModelA(BaseModel, Generic[A]): # type: ignore[no-redef]
a: A
model_config = ConfigDict(arbitrary_types_allowed=True, extra="allow")
class ModelB(ModelA[str]):
b: Annotated[ModelA[dict[str, Any]], "foo"]
class Mixin:
def foo(self) -> str:
return "foo"
class ModelC(Mixin, ModelB):
c: dict
expected = {"a": str, "b": Annotated[ModelA[dict[str, Any]], "foo"], "c": dict}
actual = get_all_basemodel_annotations(ModelC)
assert actual == expected
expected = {"a": str, "b": Annotated[ModelA[dict[str, Any]], "foo"]}
actual = get_all_basemodel_annotations(ModelB)
assert actual == expected
expected = {"a": Any}
actual = get_all_basemodel_annotations(ModelA)
assert actual == expected
expected = {"a": int}
actual = get_all_basemodel_annotations(ModelA[int])
assert actual == expected
D = TypeVar("D", bound=str | int)
class ModelD(ModelC, Generic[D]):
d: D | None
expected = {
"a": str,
"b": Annotated[ModelA[dict[str, Any]], "foo"],
"c": dict,
"d": str | int | None,
}
actual = get_all_basemodel_annotations(ModelD)
assert actual == expected
expected = {
"a": str,
"b": Annotated[ModelA[dict[str, Any]], "foo"],
"c": dict,
"d": int | None,
}
actual = get_all_basemodel_annotations(ModelD[int])
assert actual == expected
def test_get_all_basemodel_annotations_aliases() -> None:
class CalculatorInput(BaseModel):
a: int = Field(description="first number", alias="A")
b: int = Field(description="second number")
actual = get_all_basemodel_annotations(CalculatorInput)
assert actual == {"a": int, "b": int}
def test_tool_annotations_preserved() -> None:
"""Test that annotations are preserved when creating a tool."""
@tool
def my_tool(val: int, other_val: Annotated[dict, "my annotation"]) -> str:
"""Tool docstring."""
return "foo"
schema = my_tool.get_input_schema()
func = my_tool.func # type: ignore[attr-defined]
expected_type_hints = {
name: hint
for name, hint in func.__annotations__.items()
if name in inspect.signature(func).parameters
}
assert schema.__annotations__ == expected_type_hints
def test_create_retriever_tool() -> None:
class MyRetriever(BaseRetriever):
@override
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> list[Document]:
return [Document(page_content=f"foo {query}"), Document(page_content="bar")]
retriever = MyRetriever()
retriever_tool = tools.create_retriever_tool(
retriever, "retriever_tool_content", "Retriever Tool Content"
)
assert isinstance(retriever_tool, BaseTool)
assert retriever_tool.name == "retriever_tool_content"
assert retriever_tool.description == "Retriever Tool Content"
assert retriever_tool.invoke("bar") == "foo bar\n\nbar"
assert retriever_tool.invoke(
ToolCall(
name="retriever_tool_content",
args={"query": "bar"},
id="123",
type="tool_call",
)
) == ToolMessage(
"foo bar\n\nbar", tool_call_id="123", name="retriever_tool_content"
)
retriever_tool_artifact = tools.create_retriever_tool(
retriever,
"retriever_tool_artifact",
"Retriever Tool Artifact",
response_format="content_and_artifact",
)
assert isinstance(retriever_tool_artifact, BaseTool)
assert retriever_tool_artifact.name == "retriever_tool_artifact"
assert retriever_tool_artifact.description == "Retriever Tool Artifact"
assert retriever_tool_artifact.invoke("bar") == "foo bar\n\nbar"
assert retriever_tool_artifact.invoke(
ToolCall(
name="retriever_tool_artifact",
args={"query": "bar"},
id="123",
type="tool_call",
)
) == ToolMessage(
"foo bar\n\nbar",
artifact=[Document(page_content="foo bar"), Document(page_content="bar")],
tool_call_id="123",
name="retriever_tool_artifact",
)
def test_tool_args_schema_pydantic_v2_with_metadata() -> None:
class Foo(BaseModel):
x: list[int] = Field(
description="List of integers", min_length=10, max_length=15
)
@tool(args_schema=Foo)
def foo(x) -> list[int]: # type: ignore[no-untyped-def] # noqa: ANN001
"""Foo."""
return x
assert _get_tool_call_json_schema(foo) == {
"description": "Foo.",
"properties": {
"x": {
"description": "List of integers",
"items": {"type": "integer"},
"maxItems": 15,
"minItems": 10,
"title": "X",
"type": "array",
}
},
"required": ["x"],
"title": "foo",
"type": "object",
}
assert foo.invoke({"x": [0] * 10})
with pytest.raises(ValidationError):
foo.invoke({"x": [0] * 9})
def test_imports() -> None:
expected_all = [
"FILTERED_ARGS",
"SchemaAnnotationError",
"create_schema_from_function",
"ToolException",
"BaseTool",
"Tool",
"StructuredTool",
"tool",
"RetrieverInput",
"create_retriever_tool",
"ToolsRenderer",
"render_text_description",
"render_text_description_and_args",
"BaseToolkit",
"convert_runnable_to_tool",
"InjectedToolArg",
]
for module_name in expected_all:
assert hasattr(tools, module_name)
assert getattr(tools, module_name) is not None
def test_structured_tool_direct_init() -> None:
def foo(bar: str) -> str:
return bar
async def async_foo(bar: str) -> str:
return bar
class FooSchema(BaseModel):
bar: str = Field(..., description="The bar")
tool = StructuredTool(name="foo", args_schema=FooSchema, coroutine=async_foo)
with pytest.raises(NotImplementedError):
assert tool.invoke("hello") == "hello"
def test_injected_arg_with_complex_type() -> None:
"""Test that an injected tool arg can be a complex type."""
class Foo:
def __init__(self) -> None:
self.value = "bar"
@tool
def injected_tool(x: int, foo: Annotated[Foo, InjectedToolArg]) -> str:
"""Tool that has an injected tool arg."""
return foo.value
assert injected_tool.invoke({"x": 5, "foo": Foo()}) == "bar"
@pytest.mark.parametrize("schema_format", ["model", "json_schema"])
def test_tool_allows_extra_runtime_args_with_custom_schema(
schema_format: Literal["model", "json_schema"],
) -> None:
"""Ensure runtime args are preserved even if not in the args schema."""
class InputSchema(BaseModel):
query: str
captured: dict[str, Any] = {}
@dataclass
class MyRuntime(_DirectlyInjectedToolArg):
some_obj: object
args_schema = (
InputSchema if schema_format == "model" else InputSchema.model_json_schema()
)
@tool(args_schema=args_schema)
def runtime_tool(query: str, runtime: MyRuntime) -> str:
"""Echo the query and capture runtime value."""
captured["runtime"] = runtime
return query
runtime_obj = object()
runtime = MyRuntime(some_obj=runtime_obj)
assert runtime_tool.invoke({"query": "hello", "runtime": runtime}) == "hello"
assert captured["runtime"] is runtime
def test_tool_injected_tool_call_id_with_custom_schema() -> None:
"""Ensure InjectedToolCallId works with custom args schema."""
class InputSchema(BaseModel):
x: int
@tool(args_schema=InputSchema)
def injected_tool(
x: int, tool_call_id: Annotated[str, InjectedToolCallId]
) -> ToolMessage:
"""Tool with injected tool_call_id and custom schema."""
return ToolMessage(str(x), tool_call_id=tool_call_id)
# Test that tool_call_id is properly injected even though not in custom schema
result = injected_tool.invoke(
{
"type": "tool_call",
"args": {"x": 42},
"name": "injected_tool",
"id": "test_call_id",
}
)
assert result == ToolMessage("42", tool_call_id="test_call_id")
# Test that it still raises error when invoked without a ToolCall
with pytest.raises(
ValueError,
match="When tool includes an InjectedToolCallId argument, "
"tool must always be invoked with a full model ToolCall",
):
injected_tool.invoke({"x": 42})
def test_tool_injected_arg_with_custom_schema() -> None:
"""Ensure InjectedToolArg works with custom args schema."""
class InputSchema(BaseModel):
query: str
class CustomContext:
"""Custom context object to be injected."""
def __init__(self, value: str) -> None:
self.value = value
captured: dict[str, Any] = {}
@tool(args_schema=InputSchema)
def search_tool(
query: str, context: Annotated[CustomContext, InjectedToolArg]
) -> str:
"""Search with custom context."""
captured["context"] = context
return f"Results for {query} with context {context.value}"
# Test that context is properly injected even though not in custom schema
ctx = CustomContext("test_context")
result = search_tool.invoke({"query": "hello", "context": ctx})
assert result == "Results for hello with context test_context"
assert captured["context"] is ctx
assert captured["context"].value == "test_context"
def test_tool_injected_tool_call_id() -> None:
@tool
def foo(x: int, tool_call_id: Annotated[str, InjectedToolCallId]) -> ToolMessage:
"""Foo."""
return ToolMessage(x, tool_call_id=tool_call_id) # type: ignore[call-overload]
assert foo.invoke(
{
"type": "tool_call",
"args": {"x": 0},
"name": "foo",
"id": "bar",
}
) == ToolMessage(0, tool_call_id="bar") # type: ignore[call-overload]
with pytest.raises(
ValueError,
match="When tool includes an InjectedToolCallId argument, "
"tool must always be invoked with a full model ToolCall",
):
assert foo.invoke({"x": 0})
@tool
def foo2(x: int, tool_call_id: Annotated[str, InjectedToolCallId()]) -> ToolMessage:
"""Foo."""
return ToolMessage(x, tool_call_id=tool_call_id) # type: ignore[call-overload]
assert foo2.invoke(
{
"type": "tool_call",
"args": {"x": 0},
"name": "foo",
"id": "bar",
}
) == ToolMessage(0, tool_call_id="bar") # type: ignore[call-overload]
def test_tool_injected_tool_call_id_override_llm_generated() -> None:
"""Test that InjectedToolCallId overrides LLM-generated values."""
@tool
def foo(x: int, tool_call_id: Annotated[str, InjectedToolCallId]) -> ToolMessage:
"""Foo."""
return ToolMessage(str(x), tool_call_id=tool_call_id)
# Test that when LLM generates the tool_call_id, it gets overridden
result = foo.invoke(
{
"type": "tool_call",
"args": {"x": 0, "tool_call_id": "fake_llm_id"}, # LLM generated this
"name": "foo",
"id": "real_tool_call_id", # This should be used instead
}
)
# The tool should receive the real tool call ID, not the LLM-generated one
assert result == ToolMessage("0", tool_call_id="real_tool_call_id")
def test_tool_uninjected_tool_call_id() -> None:
@tool
def foo(x: int, tool_call_id: str) -> ToolMessage:
"""Foo."""
return ToolMessage(str(x), tool_call_id=tool_call_id)
with pytest.raises(ValueError, match="1 validation error for foo"):
foo.invoke({"type": "tool_call", "args": {"x": 0}, "name": "foo", "id": "bar"})
assert foo.invoke(
{
"type": "tool_call",
"args": {"x": 0, "tool_call_id": "zap"},
"name": "foo",
"id": "bar",
}
) == ToolMessage(0, tool_call_id="zap") # type: ignore[call-overload]
def test_tool_return_output_mixin() -> None:
class Bar(ToolOutputMixin):
def __init__(self, x: int) -> None:
self.x = x
def __eq__(self, other: object) -> bool:
return isinstance(other, self.__class__) and self.x == other.x
def __hash__(self) -> int:
return hash(self.x)
@tool
def foo(x: int) -> Bar:
"""Foo."""
return Bar(x=x)
assert foo.invoke(
{
"type": "tool_call",
"args": {"x": 0},
"name": "foo",
"id": "bar",
}
) == Bar(x=0)
def test_tool_mutate_input() -> None:
class MyTool(BaseTool):
name: str = "MyTool"
description: str = "a tool"
@override
def _run(
self,
x: str,
run_manager: CallbackManagerForToolRun | None = None,
) -> str:
return "hi"
my_input = {"x": "hi"}
MyTool().invoke(my_input)
assert my_input == {"x": "hi"}
def test_structured_tool_args_schema_dict() -> None:
args_schema = {
"properties": {
"a": {"title": "A", "type": "integer"},
"b": {"title": "B", "type": "integer"},
},
"required": ["a", "b"],
"title": "add",
"type": "object",
"description": "add two numbers",
}
tool = StructuredTool(
name="add",
args_schema=args_schema,
func=lambda a, b: a + b,
)
assert tool.invoke({"a": 1, "b": 2}) == 3
assert tool.args_schema == args_schema
# test that the tool call schema is the same as the args schema
assert _get_tool_call_json_schema(tool) == args_schema
# test that the input schema is the same as the parent (Runnable) input schema
assert (
tool.get_input_schema().model_json_schema()
== create_model_v2(
tool.get_name("Input"),
root=tool.InputType,
module_name=tool.__class__.__module__,
).model_json_schema()
)
# test that args are extracted correctly
assert tool.args == {
"a": {"title": "A", "type": "integer"},
"b": {"title": "B", "type": "integer"},
}
def test_simple_tool_args_schema_dict() -> None:
args_schema = {
"properties": {
"a": {"title": "A", "type": "integer"},
},
"required": ["a"],
"title": "square",
"type": "object",
"description": "square a number",
}
tool = Tool(
name="square",
description="square a number",
args_schema=args_schema,
func=lambda a: a * a,
)
assert tool.invoke({"a": 2}) == 4
assert tool.args_schema == args_schema
# test that the tool call schema is the same as the args schema
assert _get_tool_call_json_schema(tool) == args_schema
# test that the input schema is the same as the parent (Runnable) input schema
assert (
tool.get_input_schema().model_json_schema()
== create_model_v2(
tool.get_name("Input"),
root=tool.InputType,
module_name=tool.__class__.__module__,
).model_json_schema()
)
# test that args are extracted correctly
assert tool.args == {
"a": {"title": "A", "type": "integer"},
}
def test_empty_string_tool_call_id() -> None:
@tool
def foo(x: int) -> str:
"""Foo."""
return "hi"
assert foo.invoke({"type": "tool_call", "args": {"x": 0}, "id": ""}) == ToolMessage(
content="hi", name="foo", tool_call_id=""
)
def test_tool_decorator_description() -> None:
# test basic tool
@tool
def foo(x: int) -> str:
"""Foo."""
return "hi"
assert foo.description == "Foo."
assert (
cast("BaseModel", foo.tool_call_schema).model_json_schema()["description"]
== "Foo."
)
# test basic tool with description
@tool(description="description")
def foo_description(x: int) -> str:
"""Foo."""
return "hi"
assert foo_description.description == "description"
assert (
cast("BaseModel", foo_description.tool_call_schema).model_json_schema()[
"description"
]
== "description"
)
# test tool with args schema
class ArgsSchema(BaseModel):
"""Bar."""
x: int
@tool(args_schema=ArgsSchema)
def foo_args_schema(x: int) -> str:
return "hi"
assert foo_args_schema.description == "Bar."
assert (
cast("BaseModel", foo_args_schema.tool_call_schema).model_json_schema()[
"description"
]
== "Bar."
)
@tool(description="description", args_schema=ArgsSchema)
def foo_args_schema_description(x: int) -> str:
return "hi"
assert foo_args_schema_description.description == "description"
assert (
cast(
"BaseModel", foo_args_schema_description.tool_call_schema
).model_json_schema()["description"]
== "description"
)
args_json_schema = {
"description": "JSON Schema.",
"properties": {
"x": {"description": "my field", "title": "X", "type": "string"}
},
"required": ["x"],
"title": "my_tool",
"type": "object",
}
@tool(args_schema=args_json_schema)
def foo_args_jsons_schema(x: int) -> str:
return "hi"
@tool(description="description", args_schema=args_json_schema)
def foo_args_jsons_schema_with_description(x: int) -> str:
return "hi"
assert foo_args_jsons_schema.description == "JSON Schema."
assert (
cast("dict", foo_args_jsons_schema.tool_call_schema)["description"]
== "JSON Schema."
)
assert foo_args_jsons_schema_with_description.description == "description"
assert (
cast("dict", foo_args_jsons_schema_with_description.tool_call_schema)[
"description"
]
== "description"
)
def test_title_property_preserved() -> None:
"""Test that the title property is preserved when generating schema.
https://github.com/langchain-ai/langchain/issues/30456
"""
schema_to_be_extracted = {
"type": "object",
"required": [],
"properties": {
"title": {"type": "string", "description": "item title"},
"due_date": {"type": "string", "description": "item due date"},
},
"description": "foo",
}
@tool(args_schema=schema_to_be_extracted)
def extract_data(extracted_data: dict[str, Any]) -> dict[str, Any]:
"""Some documentation."""
return extracted_data
assert convert_to_openai_tool(extract_data) == {
"function": {
"description": "Some documentation.",
"name": "extract_data",
"parameters": {
"properties": {
"due_date": {"description": "item due date", "type": "string"},
"title": {"description": "item title", "type": "string"},
},
"required": [],
"type": "object",
},
},
"type": "function",
}
def test_nested_pydantic_fields() -> None:
class Address(BaseModel):
street: str
class Person(BaseModel):
name: str
address: Address = Field(description="Home address")
result = convert_to_openai_tool(Person)
assert len(result["function"]["parameters"]["properties"]) == 2
async def test_tool_ainvoke_does_not_mutate_inputs() -> None:
"""Verify that the inputs are not mutated when invoking a tool asynchronously."""
def sync_no_op(foo: int) -> str:
return "good"
async def async_no_op(foo: int) -> str:
return "good"
tool = StructuredTool(
name="sample_tool",
description="",
args_schema={
"type": "object",
"required": ["foo"],
"properties": {
"seconds": {"type": "number", "description": "How big is foo"}
},
},
coroutine=async_no_op,
func=sync_no_op,
)
tool_call: ToolCall = {
"name": "sample_tool",
"args": {"foo": 2},
"id": "call_0_82c17db8-95df-452f-a4c2-03f809022134",
"type": "tool_call",
}
assert tool.invoke(tool_call["args"]) == "good"
assert tool_call == {
"name": "sample_tool",
"args": {"foo": 2},
"id": "call_0_82c17db8-95df-452f-a4c2-03f809022134",
"type": "tool_call",
}
assert await tool.ainvoke(tool_call["args"]) == "good"
assert tool_call == {
"name": "sample_tool",
"args": {"foo": 2},
"id": "call_0_82c17db8-95df-452f-a4c2-03f809022134",
"type": "tool_call",
}
def test_tool_invoke_does_not_mutate_inputs() -> None:
"""Verify that the inputs are not mutated when invoking a tool synchronously."""
def sync_no_op(foo: int) -> str:
return "good"
async def async_no_op(foo: int) -> str:
return "good"
tool = StructuredTool(
name="sample_tool",
description="",
args_schema={
"type": "object",
"required": ["foo"],
"properties": {
"seconds": {"type": "number", "description": "How big is foo"}
},
},
coroutine=async_no_op,
func=sync_no_op,
)
tool_call: ToolCall = {
"name": "sample_tool",
"args": {"foo": 2},
"id": "call_0_82c17db8-95df-452f-a4c2-03f809022134",
"type": "tool_call",
}
assert tool.invoke(tool_call["args"]) == "good"
assert tool_call == {
"name": "sample_tool",
"args": {"foo": 2},
"id": "call_0_82c17db8-95df-452f-a4c2-03f809022134",
"type": "tool_call",
}
def test_tool_args_schema_with_annotated_type() -> None:
@tool
def test_tool(
query_fragments: Annotated[
list[str],
"A list of query fragments",
],
) -> list[str]:
"""Search the Internet and retrieve relevant result items."""
return []
assert test_tool.args == {
"query_fragments": {
"description": "A list of query fragments",
"items": {"type": "string"},
"title": "Query Fragments",
"type": "array",
}
}
|
InjectedToolWithSchema
|
python
|
charliermarsh__ruff
|
crates/ruff_linter/resources/test/fixtures/flake8_slots/SLOT001.py
|
{
"start": 216,
"end": 302
}
|
class ____(Tuple[str, int, float]): # OK
__slots__ = ("foo",)
import builtins
|
Good
|
python
|
doocs__leetcode
|
solution/0000-0099/0090.Subsets II/Solution2.py
|
{
"start": 0,
"end": 537
}
|
class ____:
def subsetsWithDup(self, nums: List[int]) -> List[List[int]]:
nums.sort()
n = len(nums)
ans = []
for mask in range(1 << n):
ok = True
t = []
for i in range(n):
if mask >> i & 1:
if i and (mask >> (i - 1) & 1) == 0 and nums[i] == nums[i - 1]:
ok = False
break
t.append(nums[i])
if ok:
ans.append(t)
return ans
|
Solution
|
python
|
openai__openai-python
|
src/openai/resources/containers/files/files.py
|
{
"start": 18476,
"end": 19111
}
|
class ____:
def __init__(self, files: Files) -> None:
self._files = files
self.create = _legacy_response.to_raw_response_wrapper(
files.create,
)
self.retrieve = _legacy_response.to_raw_response_wrapper(
files.retrieve,
)
self.list = _legacy_response.to_raw_response_wrapper(
files.list,
)
self.delete = _legacy_response.to_raw_response_wrapper(
files.delete,
)
@cached_property
def content(self) -> ContentWithRawResponse:
return ContentWithRawResponse(self._files.content)
|
FilesWithRawResponse
|
python
|
pytorch__pytorch
|
test/torch_np/numpy_tests/lib/test_function_base.py
|
{
"start": 66218,
"end": 66749
}
|
class ____(TestCase):
def test_simple(self):
x = np.array([4, 3, 2, 1, 1, 2, 3, 4, 0])
assert_(np.all(unique(x) == [0, 1, 2, 3, 4]))
assert_(unique(np.array([1, 1, 1, 1, 1])) == np.array([1]))
@xpassIfTorchDynamo_np # (reason="unique not implemented for 'ComplexDouble'")
def test_simple_complex(self):
x = np.array([5 + 6j, 1 + 1j, 1 + 10j, 10, 5 + 6j])
assert_(np.all(unique(x) == [1 + 1j, 1 + 10j, 5 + 6j, 10]))
@xpassIfTorchDynamo_np # (reason="TODO: implement")
|
TestUnique
|
python
|
pytorch__pytorch
|
torch/serialization.py
|
{
"start": 5953,
"end": 9455
}
|
class ____:
"""
Context manager or function to set default mmap options for :func:`torch.load` with ``mmap=True`` to flags.
For now, only either ``mmap.MAP_PRIVATE`` or ``mmap.MAP_SHARED`` are supported.
Please open an issue if you need any other option to be added here.
.. note::
This feature is currently not supported for Windows.
Args:
flags: ``mmap.MAP_PRIVATE`` or ``mmap.MAP_SHARED``
"""
def __init__(self, flags: int) -> None:
if IS_WINDOWS:
raise RuntimeError(
"Changing the default mmap options is currently not supported for Windows"
)
if flags != MAP_PRIVATE and flags != MAP_SHARED:
raise ValueError(
"Invalid argument in function set_default_mmap_options, "
f"expected mmap.MAP_PRIVATE or mmap.MAP_SHARED, but got {flags}"
)
# global config
from torch.utils.serialization import config
self.prev = config.load.mmap_flags
config.load.mmap_flags = flags
def __enter__(self) -> None:
pass
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
from torch.utils.serialization import config
config.load.mmap_flags = self.prev
def clear_safe_globals() -> None:
"""
Clears the list of globals that are safe for ``weights_only`` load.
"""
_weights_only_unpickler._clear_safe_globals()
def get_safe_globals() -> list[Union[Callable, tuple[Callable, str]]]:
"""
Returns the list of user-added globals that are safe for ``weights_only`` load.
"""
return _weights_only_unpickler._get_safe_globals()
def add_safe_globals(safe_globals: list[Union[Callable, tuple[Callable, str]]]) -> None:
"""
Marks the given globals as safe for ``weights_only`` load. For example, functions
added to this list can be called during unpickling, classes could be instantiated
and have state set.
Each item in the list can either be a function/class or a tuple of the form
(function/class, string) where string is the full path of the function/class.
Within the serialized format, each function is identified with its full
path as ``{__module__}.{__qualname__}``. When calling this API, you can provide this
full path that should match the one in the checkpoint otherwise the default
``{fn.__module__}.{fn.__qualname__}`` will be used.
Args:
safe_globals (List[Union[Callable, Tuple[Callable, str]]]): list of globals to mark as safe
Example:
>>> # xdoctest: +SKIP("Can't torch.save(t, ...) as doctest thinks MyTensor is defined on torch.serialization")
>>> import tempfile
>>> class MyTensor(torch.Tensor):
... pass
>>> t = MyTensor(torch.randn(2, 3))
>>> with tempfile.NamedTemporaryFile() as f:
... torch.save(t, f.name)
# Running `torch.load(f.name, weights_only=True)` will fail with
# Unsupported global: GLOBAL __main__.MyTensor was not an allowed global by default.
# Check the code and make sure MyTensor is safe to be used when loaded from an arbitrary checkpoint.
... torch.serialization.add_safe_globals([MyTensor])
... torch.load(f.name, weights_only=True)
# MyTensor([[-0.5024, -1.8152, -0.5455],
# [-0.8234, 2.0500, -0.3657]])
"""
_weights_only_unpickler._add_safe_globals(safe_globals)
|
set_default_mmap_options
|
python
|
great-expectations__great_expectations
|
contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_to_be_iso_languages.py
|
{
"start": 871,
"end": 1873
}
|
class ____(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
condition_metric_name = "column_values.valid_iso_languages"
# This method implements the core logic for the PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
return column.apply(lambda x: is_valid_iso_languages(x))
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# raise NotImplementedError
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @column_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, **kwargs):
# raise NotImplementedError
# This class defines the Expectation itself
|
ColumnValuesToBeIsoLanguages
|
python
|
bokeh__bokeh
|
src/bokeh/events.py
|
{
"start": 14657,
"end": 15039
}
|
class ____(PointEvent):
''' Announce a tap or click event on a Bokeh plot.
Attributes:
sx (float) : x-coordinate of the event in *screen* space
sy (float) : y-coordinate of the event in *screen* space
x (float) : x-coordinate of the event in *data* space
y (float) : y-coordinate of the event in *data* space
'''
event_name = 'tap'
|
Tap
|
python
|
jazzband__django-oauth-toolkit
|
tests/test_rest_framework.py
|
{
"start": 1471,
"end": 1617
}
|
class ____(OAuth2View):
permission_classes = [permissions.IsAuthenticated, TokenHasScope]
required_scopes = ["scope1", "another"]
|
ScopedView
|
python
|
kamyu104__LeetCode-Solutions
|
Python/transpose-matrix.py
|
{
"start": 34,
"end": 406
}
|
class ____(object):
def transpose(self, A):
"""
:type A: List[List[int]]
:rtype: List[List[int]]
"""
result = [[None] * len(A) for _ in xrange(len(A[0]))]
for r, row in enumerate(A):
for c, val in enumerate(row):
result[c][r] = val
return result
# Time: O(r * c)
# Space: O(1)
|
Solution
|
python
|
pyqtgraph__pyqtgraph
|
pyqtgraph/flowchart/Flowchart.py
|
{
"start": 691,
"end": 21603
}
|
class ____(Node):
sigFileLoaded = QtCore.Signal(object)
sigFileSaved = QtCore.Signal(object)
#sigOutputChanged = QtCore.Signal() ## inherited from Node
sigChartLoaded = QtCore.Signal()
sigStateChanged = QtCore.Signal() # called when output is expected to have changed
sigChartChanged = QtCore.Signal(object, object, object) # called when nodes are added, removed, or renamed.
# (self, action, node)
def __init__(self, terminals=None, name=None, filePath=None, library=None):
self.library = library or LIBRARY
if name is None:
name = "Flowchart"
if terminals is None:
terminals = {}
self.filePath = filePath
Node.__init__(self, name, allowAddInput=True, allowAddOutput=True) ## create node without terminals; we'll add these later
self.inputWasSet = False ## flag allows detection of changes in the absence of input change.
self._nodes = {}
self.nextZVal = 10
#self.connects = []
#self._chartGraphicsItem = FlowchartGraphicsItem(self)
self._widget = None
self._scene = None
self.processing = False ## flag that prevents recursive node updates
self.widget()
self.inputNode = Node('Input', allowRemove=False, allowAddOutput=True)
self.outputNode = Node('Output', allowRemove=False, allowAddInput=True)
self.addNode(self.inputNode, 'Input', [-150, 0])
self.addNode(self.outputNode, 'Output', [300, 0])
self.outputNode.sigOutputChanged.connect(self.outputChanged)
self.outputNode.sigTerminalRenamed.connect(self.internalTerminalRenamed)
self.inputNode.sigTerminalRenamed.connect(self.internalTerminalRenamed)
self.outputNode.sigTerminalRemoved.connect(self.internalTerminalRemoved)
self.inputNode.sigTerminalRemoved.connect(self.internalTerminalRemoved)
self.outputNode.sigTerminalAdded.connect(self.internalTerminalAdded)
self.inputNode.sigTerminalAdded.connect(self.internalTerminalAdded)
self.viewBox.autoRange(padding = 0.04)
for name, opts in terminals.items():
self.addTerminal(name, **opts)
def setLibrary(self, lib):
self.library = lib
self.widget().chartWidget.buildMenu()
def setInput(self, **args):
"""Set the input values of the flowchart. This will automatically propagate
the new values throughout the flowchart, (possibly) causing the output to change.
"""
#print "setInput", args
#Node.setInput(self, **args)
#print " ....."
self.inputWasSet = True
self.inputNode.setOutput(**args)
def outputChanged(self):
## called when output of internal node has changed
vals = self.outputNode.inputValues()
self.widget().outputChanged(vals)
self.setOutput(**vals)
#self.sigOutputChanged.emit(self)
def output(self):
"""Return a dict of the values on the Flowchart's output terminals.
"""
return self.outputNode.inputValues()
def nodes(self):
return self._nodes
def addTerminal(self, name, **opts):
term = Node.addTerminal(self, name, **opts)
name = term.name()
if opts['io'] == 'in': ## inputs to the flowchart become outputs on the input node
opts['io'] = 'out'
opts['multi'] = False
self.inputNode.sigTerminalAdded.disconnect(self.internalTerminalAdded)
try:
self.inputNode.addTerminal(name, **opts)
finally:
self.inputNode.sigTerminalAdded.connect(self.internalTerminalAdded)
else:
opts['io'] = 'in'
#opts['multi'] = False
self.outputNode.sigTerminalAdded.disconnect(self.internalTerminalAdded)
try:
self.outputNode.addTerminal(name, **opts)
finally:
self.outputNode.sigTerminalAdded.connect(self.internalTerminalAdded)
return term
def removeTerminal(self, name):
#print "remove:", name
term = self[name]
inTerm = self.internalTerminal(term)
Node.removeTerminal(self, name)
inTerm.node().removeTerminal(inTerm.name())
def internalTerminalRenamed(self, term, oldName):
self[oldName].rename(term.name())
def internalTerminalAdded(self, node, term):
if term._io == 'in':
io = 'out'
else:
io = 'in'
Node.addTerminal(self, term.name(), io=io, renamable=term.isRenamable(), removable=term.isRemovable(), multiable=term.isMultiable())
def internalTerminalRemoved(self, node, term):
try:
Node.removeTerminal(self, term.name())
except KeyError:
pass
def terminalRenamed(self, term, oldName):
newName = term.name()
#print "flowchart rename", newName, oldName
#print self.terminals
Node.terminalRenamed(self, self[oldName], oldName)
#print self.terminals
for n in [self.inputNode, self.outputNode]:
if oldName in n.terminals:
n[oldName].rename(newName)
def createNode(self, nodeType, name=None, pos=None):
"""Create a new Node and add it to this flowchart.
"""
if name is None:
n = 0
while True:
name = "%s.%d" % (nodeType, n)
if name not in self._nodes:
break
n += 1
node = self.library.getNodeType(nodeType)(name)
self.addNode(node, name, pos)
return node
def addNode(self, node, name, pos=None):
"""Add an existing Node to this flowchart.
See also: createNode()
"""
if pos is None:
pos = [0, 0]
if type(pos) in [QtCore.QPoint, QtCore.QPointF]:
pos = [pos.x(), pos.y()]
item = node.graphicsItem()
item.setZValue(self.nextZVal*2)
self.nextZVal += 1
self.viewBox.addItem(item)
item.moveBy(*pos)
self._nodes[name] = node
if node is not self.inputNode and node is not self.outputNode:
self.widget().addNode(node)
node.sigClosed.connect(self.nodeClosed)
node.sigRenamed.connect(self.nodeRenamed)
node.sigOutputChanged.connect(self.nodeOutputChanged)
self.sigChartChanged.emit(self, 'add', node)
def removeNode(self, node):
"""Remove a Node from this flowchart.
"""
node.close()
def nodeClosed(self, node):
del self._nodes[node.name()]
self.widget().removeNode(node)
for signal, slot in [('sigClosed', self.nodeClosed),
('sigRenamed', self.nodeRenamed),
('sigOutputChanged', self.nodeOutputChanged)]:
try:
getattr(node, signal).disconnect(slot)
except (TypeError, RuntimeError):
pass
self.sigChartChanged.emit(self, 'remove', node)
def nodeRenamed(self, node, oldName):
del self._nodes[oldName]
self._nodes[node.name()] = node
if node is not self.inputNode and node is not self.outputNode:
self.widget().nodeRenamed(node, oldName)
self.sigChartChanged.emit(self, 'rename', node)
def arrangeNodes(self):
pass
def internalTerminal(self, term):
"""If the terminal belongs to the external Node, return the corresponding internal terminal"""
if term.node() is self:
if term.isInput():
return self.inputNode[term.name()]
else:
return self.outputNode[term.name()]
else:
return term
def connectTerminals(self, term1, term2):
"""Connect two terminals together within this flowchart."""
term1 = self.internalTerminal(term1)
term2 = self.internalTerminal(term2)
term1.connectTo(term2)
def process(self, **args):
"""
Process data through the flowchart, returning the output.
Keyword arguments must be the names of input terminals.
The return value is a dict with one key per output terminal.
"""
data = {} ## Stores terminal:value pairs
## determine order of operations
## order should look like [('p', node1), ('p', node2), ('d', terminal1), ...]
## Each tuple specifies either (p)rocess this node or (d)elete the result from this terminal
order = self.processOrder()
#print "ORDER:", order
## Record inputs given to process()
for n, t in self.inputNode.outputs().items():
# if n not in args:
# raise Exception("Parameter %s required to process this chart." % n)
if n in args:
data[t] = args[n]
ret = {}
## process all in order
for c, arg in order:
if c == 'p': ## Process a single node
#print "===> process:", arg
node = arg
if node is self.inputNode:
continue ## input node has already been processed.
## get input and output terminals for this node
outs = list(node.outputs().values())
ins = list(node.inputs().values())
## construct input value dictionary
args = {}
for inp in ins:
inputs = inp.inputTerminals()
if len(inputs) == 0:
continue
if inp.isMultiValue(): ## multi-input terminals require a dict of all inputs
args[inp.name()] = dict([(i, data[i]) for i in inputs if i in data])
else: ## single-inputs terminals only need the single input value available
args[inp.name()] = data[inputs[0]]
if node is self.outputNode:
ret = args ## we now have the return value, but must keep processing in case there are other endpoint nodes in the chart
else:
try:
if node.isBypassed():
result = node.processBypassed(args)
else:
result = node.process(display=False, **args)
except:
print("Error processing node %s. Args are: %s" % (str(node), str(args)))
raise
for out in outs:
#print " Output:", out, out.name()
#print out.name()
try:
data[out] = result[out.name()]
except KeyError:
pass
elif c == 'd': ## delete a terminal result (no longer needed; may be holding a lot of memory)
#print "===> delete", arg
if arg in data:
del data[arg]
return ret
def processOrder(self):
"""Return the order of operations required to process this chart.
The order returned should look like [('p', node1), ('p', node2), ('d', terminal1), ...]
where each tuple specifies either (p)rocess this node or (d)elete the result from this terminal
"""
## first collect list of nodes/terminals and their dependencies
deps = {}
tdeps = {} ## {terminal: [nodes that depend on terminal]}
for name, node in self._nodes.items():
deps[node] = node.dependentNodes()
for t in node.outputs().values():
tdeps[t] = t.dependentNodes()
#print "DEPS:", deps
## determine correct node-processing order
order = fn.toposort(deps)
#print "ORDER1:", order
## construct list of operations
ops = [('p', n) for n in order]
## determine when it is safe to delete terminal values
dels = []
for t, nodes in tdeps.items():
lastInd = 0
lastNode = None
for n in nodes: ## determine which node is the last to be processed according to order
if n is self:
lastInd = None
break
else:
try:
ind = order.index(n)
except ValueError:
continue
if lastNode is None or ind > lastInd:
lastNode = n
lastInd = ind
if lastInd is not None:
dels.append((lastInd+1, t))
dels.sort(key=lambda a: a[0], reverse=True)
for i, t in dels:
ops.insert(i, ('d', t))
return ops
def nodeOutputChanged(self, startNode):
"""Triggered when a node's output values have changed. (NOT called during process())
Propagates new data forward through network."""
## first collect list of nodes/terminals and their dependencies
if self.processing:
return
self.processing = True
try:
deps = {}
for name, node in self._nodes.items():
deps[node] = []
for t in node.outputs().values():
deps[node].extend(t.dependentNodes())
## determine order of updates
order = fn.toposort(deps, nodes=[startNode])
order.reverse()
## keep track of terminals that have been updated
terms = set(startNode.outputs().values())
#print "======= Updating", startNode
# print("Order:", order)
for node in order[1:]:
# print("Processing node", node)
update = False
for term in list(node.inputs().values()):
# print(" checking terminal", term)
deps = list(term.connections().keys())
for d in deps:
if d in terms:
# print(" ..input", d, "changed")
update |= True
term.inputChanged(d, process=False)
if update:
# print(" processing..")
node.update()
terms |= set(node.outputs().values())
finally:
self.processing = False
if self.inputWasSet:
self.inputWasSet = False
else:
self.sigStateChanged.emit()
def chartGraphicsItem(self):
"""Return the graphicsItem that displays the internal nodes and
connections of this flowchart.
Note that the similar method `graphicsItem()` is inherited from Node
and returns the *external* graphical representation of this flowchart."""
return self.viewBox
def widget(self):
"""Return the control widget for this flowchart.
This widget provides GUI access to the parameters for each node and a
graphical representation of the flowchart.
"""
if self._widget is None:
self._widget = FlowchartCtrlWidget(self)
self.scene = self._widget.scene()
self.viewBox = self._widget.viewBox()
return self._widget
def listConnections(self):
conn = set()
for n in self._nodes.values():
terms = n.outputs()
for t in terms.values():
for c in t.connections():
conn.add((t, c))
return conn
def saveState(self):
"""Return a serializable data structure representing the current state of this flowchart.
"""
state = Node.saveState(self)
state['nodes'] = []
state['connects'] = []
for name, node in self._nodes.items():
cls = type(node)
if hasattr(cls, 'nodeName'):
clsName = cls.nodeName
pos = node.graphicsItem().pos()
ns = {'class': clsName, 'name': name, 'pos': (pos.x(), pos.y()), 'state': node.saveState()}
state['nodes'].append(ns)
conn = self.listConnections()
for a, b in conn:
state['connects'].append((a.node().name(), a.name(), b.node().name(), b.name()))
state['inputNode'] = self.inputNode.saveState()
state['outputNode'] = self.outputNode.saveState()
return state
def restoreState(self, state, clear=False):
"""Restore the state of this flowchart from a previous call to `saveState()`.
"""
self.blockSignals(True)
try:
if clear:
self.clear()
Node.restoreState(self, state)
nodes = state['nodes']
nodes.sort(key=lambda a: a['pos'][0])
for n in nodes:
if n['name'] in self._nodes:
self._nodes[n['name']].restoreState(n['state'])
continue
try:
node = self.createNode(n['class'], name=n['name'])
node.restoreState(n['state'])
except:
printExc("Error creating node %s: (continuing anyway)" % n['name'])
self.inputNode.restoreState(state.get('inputNode', {}))
self.outputNode.restoreState(state.get('outputNode', {}))
#self.restoreTerminals(state['terminals'])
for n1, t1, n2, t2 in state['connects']:
try:
self.connectTerminals(self._nodes[n1][t1], self._nodes[n2][t2])
except:
print(self._nodes[n1].terminals)
print(self._nodes[n2].terminals)
printExc("Error connecting terminals %s.%s - %s.%s:" % (n1, t1, n2, t2))
finally:
self.blockSignals(False)
self.outputChanged()
self.sigChartLoaded.emit()
self.sigStateChanged.emit()
def loadFile(self, fileName=None, startDir=None):
"""Load a flowchart (``*.fc``) file.
"""
if fileName is None:
if startDir is None:
startDir = self.filePath
if startDir is None:
startDir = '.'
self.fileDialog = FileDialog(None, "Load Flowchart..", startDir, "Flowchart (*.fc)")
self.fileDialog.show()
self.fileDialog.fileSelected.connect(self.loadFile)
return
## NOTE: was previously using a real widget for the file dialog's parent, but this caused weird mouse event bugs..
state = configfile.readConfigFile(fileName)
self.restoreState(state, clear=True)
self.viewBox.autoRange()
self.sigFileLoaded.emit(fileName)
def saveFile(self, fileName=None, startDir=None, suggestedFileName='flowchart.fc'):
"""Save this flowchart to a .fc file
"""
if fileName is None:
if startDir is None:
startDir = self.filePath
if startDir is None:
startDir = '.'
self.fileDialog = FileDialog(None, "Save Flowchart..", startDir, "Flowchart (*.fc)")
self.fileDialog.setDefaultSuffix("fc")
self.fileDialog.setAcceptMode(QtWidgets.QFileDialog.AcceptMode.AcceptSave)
self.fileDialog.show()
self.fileDialog.fileSelected.connect(self.saveFile)
return
configfile.writeConfigFile(self.saveState(), fileName)
self.sigFileSaved.emit(fileName)
def clear(self):
"""Remove all nodes from this flowchart except the original input/output nodes.
"""
for n in list(self._nodes.values()):
if n is self.inputNode or n is self.outputNode:
continue
n.close() ## calls self.nodeClosed(n) by signal
#self.clearTerminals()
self.widget().clear()
def clearTerminals(self):
Node.clearTerminals(self)
self.inputNode.clearTerminals()
self.outputNode.clearTerminals()
|
Flowchart
|
python
|
sqlalchemy__sqlalchemy
|
test/engine/test_pool.py
|
{
"start": 64761,
"end": 67319
}
|
class ____(PoolTestBase):
@testing.requires.threading_with_mock
def test_cleanup(self):
self._test_cleanup(False)
# TODO: the SingletonThreadPool cleanup method
# has an unfixed race condition within the "cleanup" system that
# leads to this test being off by one connection under load; in any
# case, this connection will be closed once it is garbage collected.
# this pool is not a production-level pool and is only used for the
# SQLite "memory" connection, and is not very useful under actual
# multi-threaded conditions
# @testing.requires.threading_with_mock
# def test_cleanup_no_gc(self):
# self._test_cleanup(True)
def _test_cleanup(self, strong_refs):
"""test that the pool's connections are OK after cleanup() has
been called."""
dbapi = MockDBAPI()
lock = threading.Lock()
def creator():
# the mock iterator isn't threadsafe...
with lock:
return dbapi.connect()
p = pool.SingletonThreadPool(creator=creator, pool_size=3)
if strong_refs:
sr = set()
def _conn():
c = p.connect()
sr.add(c.dbapi_connection)
return c
else:
def _conn():
return p.connect()
def checkout():
for x in range(10):
c = _conn()
assert c
c.cursor()
c.close()
time.sleep(0.01)
threads = []
for i in range(10):
th = threading.Thread(target=checkout)
th.start()
threads.append(th)
for th in threads:
th.join(join_timeout)
lp = len(p._all_conns)
is_true(3 <= lp <= 4)
if strong_refs:
still_opened = len([c for c in sr if not c.close.call_count])
eq_(still_opened, 3)
def test_no_rollback_from_nested_connections(self):
dbapi = MockDBAPI()
lock = threading.Lock()
def creator():
# the mock iterator isn't threadsafe...
with lock:
return dbapi.connect()
p = pool.SingletonThreadPool(creator=creator, pool_size=3)
c1 = p.connect()
mock_conn = c1.dbapi_connection
c2 = p.connect()
is_(c1, c2)
c2.close()
eq_(mock_conn.mock_calls, [])
c1.close()
eq_(mock_conn.mock_calls, [call.rollback()])
|
SingletonThreadPoolTest
|
python
|
mlflow__mlflow
|
mlflow/server/handlers.py
|
{
"start": 8352,
"end": 149799
}
|
class ____(ModelRegistryStoreRegistry):
def __init__(self):
super().__init__()
self.register("", self._get_file_store)
self.register("file", self._get_file_store)
for scheme in DATABASE_ENGINES:
self.register(scheme, self._get_sqlalchemy_store)
# Add support for Databricks registries
self.register("databricks", self._get_databricks_rest_store)
self.register("databricks-uc", self._get_databricks_uc_rest_store)
self.register_entrypoints()
@classmethod
def _get_file_store(cls, store_uri):
from mlflow.store.model_registry.file_store import FileStore
return FileStore(store_uri)
@classmethod
def _get_sqlalchemy_store(cls, store_uri):
from mlflow.store.model_registry.sqlalchemy_store import SqlAlchemyStore
return SqlAlchemyStore(store_uri)
@classmethod
def _get_databricks_rest_store(cls, store_uri):
return ModelRegistryRestStore(partial(get_databricks_host_creds, store_uri))
@classmethod
def _get_databricks_uc_rest_store(cls, store_uri):
from mlflow.environment_variables import MLFLOW_TRACKING_URI
from mlflow.store._unity_catalog.registry.rest_store import UcModelRegistryStore
# Get tracking URI from environment or use "databricks-uc" as default
tracking_uri = MLFLOW_TRACKING_URI.get() or "databricks-uc"
return UcModelRegistryStore(store_uri, tracking_uri)
_tracking_store_registry = TrackingStoreRegistryWrapper()
_model_registry_store_registry = ModelRegistryStoreRegistryWrapper()
def _get_artifact_repo_mlflow_artifacts():
"""
Get an artifact repository specified by ``--artifacts-destination`` option for ``mlflow server``
command.
"""
from mlflow.server import ARTIFACTS_DESTINATION_ENV_VAR
global _artifact_repo
if _artifact_repo is None:
_artifact_repo = get_artifact_repository(os.environ[ARTIFACTS_DESTINATION_ENV_VAR])
return _artifact_repo
def _get_trace_artifact_repo(trace_info: TraceInfo):
"""
Resolve the artifact repository for fetching data for the given trace.
Args:
trace_info: The trace info object containing metadata about the trace.
"""
artifact_uri = get_artifact_uri_for_trace(trace_info)
if _is_servable_proxied_run_artifact_root(artifact_uri):
# If the artifact location is a proxied run artifact root (e.g. mlflow-artifacts://...),
# we need to resolve it to the actual artifact location.
from mlflow.server import ARTIFACTS_DESTINATION_ENV_VAR
path = _get_proxied_run_artifact_destination_path(artifact_uri)
if not path:
raise MlflowException(
f"Failed to resolve the proxied run artifact URI: {artifact_uri}. ",
"Trace artifact URI must contain subpath to the trace data directory.",
error_code=BAD_REQUEST,
)
root = os.environ[ARTIFACTS_DESTINATION_ENV_VAR]
artifact_uri = posixpath.join(root, path)
# We don't set it to global var unlike run artifact, because the artifact repo has
# to be created with full trace artifact URI including request_id.
# e.g. s3://<experiment_id>/traces/<request_id>
artifact_repo = get_artifact_repository(artifact_uri)
else:
artifact_repo = get_artifact_repository(artifact_uri)
return artifact_repo
def _is_serving_proxied_artifacts():
"""
Returns:
True if the MLflow server is serving proxied artifacts (i.e. acting as a proxy for
artifact upload / download / list operations), as would be enabled by specifying the
--serve-artifacts configuration option. False otherwise.
"""
from mlflow.server import SERVE_ARTIFACTS_ENV_VAR
return os.environ.get(SERVE_ARTIFACTS_ENV_VAR, "false") == "true"
def _is_servable_proxied_run_artifact_root(run_artifact_root):
"""
Determines whether or not the following are true:
- The specified Run artifact root is a proxied artifact root (i.e. an artifact root with scheme
``http``, ``https``, or ``mlflow-artifacts``).
- The MLflow server is capable of resolving and accessing the underlying storage location
corresponding to the proxied artifact root, allowing it to fulfill artifact list and
download requests by using this storage location directly.
Args:
run_artifact_root: The Run artifact root location (URI).
Returns:
True if the specified Run artifact root refers to proxied artifacts that can be
served by this MLflow server (i.e. the server has access to the destination and
can respond to list and download requests for the artifact). False otherwise.
"""
parsed_run_artifact_root = urllib.parse.urlparse(run_artifact_root)
# NB: If the run artifact root is a proxied artifact root (has scheme `http`, `https`, or
# `mlflow-artifacts`) *and* the MLflow server is configured to serve artifacts, the MLflow
# server always assumes that it has access to the underlying storage location for the proxied
# artifacts. This may not always be accurate. For example:
#
# An organization may initially use the MLflow server to serve Tracking API requests and proxy
# access to artifacts stored in Location A (via `mlflow server --serve-artifacts`). Then, for
# scalability and / or security purposes, the organization may decide to store artifacts in a
# new location B and set up a separate server (e.g. `mlflow server --artifacts-only`) to proxy
# access to artifacts stored in Location B.
#
# In this scenario, requests for artifacts stored in Location B that are sent to the original
# MLflow server will fail if the original MLflow server does not have access to Location B
# because it will assume that it can serve all proxied artifacts regardless of the underlying
# location. Such failures can be remediated by granting the original MLflow server access to
# Location B.
return (
parsed_run_artifact_root.scheme in ["http", "https", "mlflow-artifacts"]
and _is_serving_proxied_artifacts()
)
def _get_proxied_run_artifact_destination_path(proxied_artifact_root, relative_path=None):
"""
Resolves the specified proxied artifact location within a Run to a concrete storage location.
Args:
proxied_artifact_root: The Run artifact root location (URI) with scheme ``http``,
``https``, or `mlflow-artifacts` that can be resolved by the MLflow server to a
concrete storage location.
relative_path: The relative path of the destination within the specified
``proxied_artifact_root``. If ``None``, the destination is assumed to be
the resolved ``proxied_artifact_root``.
Returns:
The storage location of the specified artifact.
"""
parsed_proxied_artifact_root = urllib.parse.urlparse(proxied_artifact_root)
assert parsed_proxied_artifact_root.scheme in ["http", "https", "mlflow-artifacts"]
if parsed_proxied_artifact_root.scheme == "mlflow-artifacts":
# If the proxied artifact root is an `mlflow-artifacts` URI, the run artifact root path is
# simply the path component of the URI, since the fully-qualified format of an
# `mlflow-artifacts` URI is `mlflow-artifacts://<netloc>/path/to/artifact`
proxied_run_artifact_root_path = parsed_proxied_artifact_root.path.lstrip("/")
else:
# In this case, the proxied artifact root is an HTTP(S) URL referring to an mlflow-artifacts
# API route that can be used to download the artifact. These routes are always anchored at
# `/api/2.0/mlflow-artifacts/artifacts`. Accordingly, we split the path on this route anchor
# and interpret the rest of the path (everything after the route anchor) as the run artifact
# root path
mlflow_artifacts_http_route_anchor = "/api/2.0/mlflow-artifacts/artifacts/"
assert mlflow_artifacts_http_route_anchor in parsed_proxied_artifact_root.path
proxied_run_artifact_root_path = parsed_proxied_artifact_root.path.split(
mlflow_artifacts_http_route_anchor
)[1].lstrip("/")
return (
posixpath.join(proxied_run_artifact_root_path, relative_path)
if relative_path is not None
else proxied_run_artifact_root_path
)
def _get_tracking_store(
backend_store_uri: str | None = None,
default_artifact_root: str | None = None,
) -> AbstractTrackingStore:
from mlflow.server import ARTIFACT_ROOT_ENV_VAR, BACKEND_STORE_URI_ENV_VAR
global _tracking_store
if _tracking_store is None:
store_uri = backend_store_uri or os.environ.get(BACKEND_STORE_URI_ENV_VAR, None)
artifact_root = default_artifact_root or os.environ.get(ARTIFACT_ROOT_ENV_VAR, None)
_tracking_store = _tracking_store_registry.get_store(store_uri, artifact_root)
utils.set_tracking_uri(store_uri)
return _tracking_store
def _get_model_registry_store(registry_store_uri: str | None = None) -> AbstractModelRegistryStore:
from mlflow.server import BACKEND_STORE_URI_ENV_VAR, REGISTRY_STORE_URI_ENV_VAR
global _model_registry_store
if _model_registry_store is None:
store_uri = (
registry_store_uri
or os.environ.get(REGISTRY_STORE_URI_ENV_VAR, None)
or os.environ.get(BACKEND_STORE_URI_ENV_VAR, None)
)
_model_registry_store = _model_registry_store_registry.get_store(store_uri)
registry_utils.set_registry_uri(store_uri)
return _model_registry_store
def _get_job_store(backend_store_uri: str | None = None) -> AbstractJobStore:
"""
Get a job store instance based on the backend store URI.
Args:
backend_store_uri: Optional backend store URI. If not provided,
uses environment variable.
Returns:
An instance of AbstractJobStore
"""
from mlflow.server import BACKEND_STORE_URI_ENV_VAR
from mlflow.store.jobs.sqlalchemy_store import SqlAlchemyJobStore
from mlflow.utils.uri import extract_db_type_from_uri
global _job_store
if _job_store is None:
store_uri = backend_store_uri or os.environ.get(BACKEND_STORE_URI_ENV_VAR, None)
try:
extract_db_type_from_uri(store_uri)
except MlflowException:
# Require a database backend URI for the job store
raise ValueError("Job store requires a database backend URI")
_job_store = SqlAlchemyJobStore(store_uri)
return _job_store
def initialize_backend_stores(
backend_store_uri: str | None = None,
registry_store_uri: str | None = None,
default_artifact_root: str | None = None,
) -> None:
_get_tracking_store(backend_store_uri, default_artifact_root)
try:
_get_model_registry_store(registry_store_uri)
except UnsupportedModelRegistryStoreURIException:
pass
def _assert_string(x):
assert isinstance(x, str)
def _assert_intlike(x):
try:
x = int(x)
except ValueError:
pass
assert isinstance(x, int)
def _assert_bool(x):
assert isinstance(x, bool)
def _assert_floatlike(x):
try:
x = float(x)
except ValueError:
pass
assert isinstance(x, float)
def _assert_array(x):
assert isinstance(x, list)
def _assert_map_key_present(x):
_assert_array(x)
for entry in x:
_assert_required(entry.get("key"))
def _assert_required(x, path=None):
if path is None:
assert x is not None
# When parsing JSON payloads via proto, absent string fields
# are expressed as empty strings
assert x != ""
else:
assert x is not None, missing_value(path)
assert x != "", missing_value(path)
def _assert_less_than_or_equal(x, max_value, message=None):
if x > max_value:
raise AssertionError(message) if message else AssertionError()
def _assert_intlike_within_range(x, min_value, max_value, message=None):
if not min_value <= x <= max_value:
raise AssertionError(message) if message else AssertionError()
def _assert_item_type_string(x):
assert all(isinstance(item, str) for item in x)
_TYPE_VALIDATORS = {
_assert_intlike,
_assert_string,
_assert_bool,
_assert_floatlike,
_assert_array,
_assert_item_type_string,
}
def _validate_param_against_schema(schema, param, value, proto_parsing_succeeded=False):
"""
Attempts to validate a single parameter against a specified schema. Examples of the elements of
the schema are type assertions and checks for required parameters. Returns None on validation
success. Otherwise, raises an MLFlowException if an assertion fails. This method is intended
to be called for side effects.
Args:
schema: A list of functions to validate the parameter against.
param: The string name of the parameter being validated.
value: The corresponding value of the `param` being validated.
proto_parsing_succeeded: A boolean value indicating whether proto parsing succeeded.
If the proto was successfully parsed, we assume all of the types of the parameters in
the request body were correctly specified, and thus we skip validating types. If proto
parsing failed, then we validate types in addition to the rest of the schema. For
details, see https://github.com/mlflow/mlflow/pull/5458#issuecomment-1080880870.
"""
for f in schema:
if f in _TYPE_VALIDATORS and proto_parsing_succeeded:
continue
try:
f(value)
except AssertionError as e:
if e.args:
message = e.args[0]
elif f == _assert_required:
message = f"Missing value for required parameter '{param}'."
else:
message = invalid_value(
param, value, f" Hint: Value was of type '{type(value).__name__}'."
)
raise MlflowException(
message=(
message + " See the API docs for more information about request parameters."
),
error_code=INVALID_PARAMETER_VALUE,
)
return None
def _get_request_json(flask_request=request):
_validate_content_type(flask_request, ["application/json"])
return flask_request.get_json(force=True, silent=True)
def _get_request_message(request_message, flask_request=request, schema=None):
if flask_request.method == "GET" and flask_request.args:
# Convert atomic values of repeated fields to lists before calling protobuf deserialization.
# Context: We parse the parameter string into a dictionary outside of protobuf since
# protobuf does not know how to read the query parameters directly. The query parser above
# has no type information and hence any parameter that occurs exactly once is parsed as an
# atomic value. Since protobuf requires that the values of repeated fields are lists,
# deserialization will fail unless we do the fix below.
request_json = {}
for field in request_message.DESCRIPTOR.fields:
if field.name not in flask_request.args:
continue
# Use is_repeated property (preferred) with fallback to deprecated label
try:
is_repeated = field.is_repeated
except AttributeError:
is_repeated = field.label == descriptor.FieldDescriptor.LABEL_REPEATED
if is_repeated:
request_json[field.name] = flask_request.args.getlist(field.name)
else:
value = flask_request.args.get(field.name)
if field.type == descriptor.FieldDescriptor.TYPE_BOOL and isinstance(value, str):
if value.lower() not in ["true", "false"]:
raise MlflowException.invalid_parameter_value(
f"Invalid boolean value: {value}, must be 'true' or 'false'.",
)
value = value.lower() == "true"
request_json[field.name] = value
else:
request_json = _get_request_json(flask_request)
# Older clients may post their JSON double-encoded as strings, so the get_json
# above actually converts it to a string. Therefore, we check this condition
# (which we can tell for sure because any proper request should be a dictionary),
# and decode it a second time.
if is_string_type(request_json):
request_json = json.loads(request_json)
# If request doesn't have json body then assume it's empty.
if request_json is None:
request_json = {}
proto_parsing_succeeded = True
try:
parse_dict(request_json, request_message)
except ParseError:
proto_parsing_succeeded = False
schema = schema or {}
for schema_key, schema_validation_fns in schema.items():
if schema_key in request_json or _assert_required in schema_validation_fns:
value = request_json.get(schema_key)
if schema_key == "run_id" and value is None and "run_uuid" in request_json:
value = request_json.get("run_uuid")
_validate_param_against_schema(
schema=schema_validation_fns,
param=schema_key,
value=value,
proto_parsing_succeeded=proto_parsing_succeeded,
)
return request_message
def _response_with_file_attachment_headers(file_path, response):
mime_type = _guess_mime_type(file_path)
filename = pathlib.Path(file_path).name
response.mimetype = mime_type
content_disposition_header_name = "Content-Disposition"
if content_disposition_header_name not in response.headers:
response.headers[content_disposition_header_name] = f"attachment; filename={filename}"
response.headers["X-Content-Type-Options"] = "nosniff"
response.headers["Content-Type"] = mime_type
return response
def _send_artifact(artifact_repository, path):
file_path = os.path.abspath(artifact_repository.download_artifacts(path))
# Always send artifacts as attachments to prevent the browser from displaying them on our web
# server's domain, which might enable XSS.
mime_type = _guess_mime_type(file_path)
file_sender_response = send_file(file_path, mimetype=mime_type, as_attachment=True)
return _response_with_file_attachment_headers(file_path, file_sender_response)
def catch_mlflow_exception(func):
@wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except MlflowException as e:
response = Response(mimetype="application/json")
response.set_data(e.serialize_as_json())
response.status_code = e.get_http_status_code()
return response
return wrapper
def _disable_unless_serve_artifacts(func):
@wraps(func)
def wrapper(*args, **kwargs):
if not _is_serving_proxied_artifacts():
return Response(
(
f"Endpoint: {request.url_rule} disabled due to the mlflow server running "
"with `--no-serve-artifacts`. To enable artifacts server functionality, "
"run `mlflow server` with `--serve-artifacts`"
),
503,
)
return func(*args, **kwargs)
return wrapper
def _disable_if_artifacts_only(func):
@wraps(func)
def wrapper(*args, **kwargs):
from mlflow.server import ARTIFACTS_ONLY_ENV_VAR
if os.environ.get(ARTIFACTS_ONLY_ENV_VAR):
return Response(
(
f"Endpoint: {request.url_rule} disabled due to the mlflow server running "
"in `--artifacts-only` mode. To enable tracking server functionality, run "
"`mlflow server` without `--artifacts-only`"
),
503,
)
return func(*args, **kwargs)
return wrapper
@catch_mlflow_exception
def get_artifact_handler():
run_id = request.args.get("run_id") or request.args.get("run_uuid")
path = request.args["path"]
path = validate_path_is_safe(path)
run = _get_tracking_store().get_run(run_id)
if _is_servable_proxied_run_artifact_root(run.info.artifact_uri):
artifact_repo = _get_artifact_repo_mlflow_artifacts()
artifact_path = _get_proxied_run_artifact_destination_path(
proxied_artifact_root=run.info.artifact_uri,
relative_path=path,
)
else:
artifact_repo = _get_artifact_repo(run)
artifact_path = path
return _send_artifact(artifact_repo, artifact_path)
def _not_implemented():
response = Response()
response.status_code = 404
return response
# Tracking Server APIs
@catch_mlflow_exception
@_disable_if_artifacts_only
def _create_experiment():
request_message = _get_request_message(
CreateExperiment(),
schema={
"name": [_assert_required, _assert_string],
"artifact_location": [_assert_string],
"tags": [_assert_array],
},
)
tags = [ExperimentTag(tag.key, tag.value) for tag in request_message.tags]
# Validate query string in artifact location to prevent attacks
parsed_artifact_location = urllib.parse.urlparse(request_message.artifact_location)
if parsed_artifact_location.fragment or parsed_artifact_location.params:
raise MlflowException(
"'artifact_location' URL can't include fragments or params.",
error_code=INVALID_PARAMETER_VALUE,
)
validate_query_string(parsed_artifact_location.query)
experiment_id = _get_tracking_store().create_experiment(
request_message.name, request_message.artifact_location, tags
)
response_message = CreateExperiment.Response()
response_message.experiment_id = experiment_id
response = Response(mimetype="application/json")
response.set_data(message_to_json(response_message))
return response
@catch_mlflow_exception
@_disable_if_artifacts_only
def _get_experiment():
request_message = _get_request_message(
GetExperiment(), schema={"experiment_id": [_assert_required, _assert_string]}
)
response_message = get_experiment_impl(request_message)
response = Response(mimetype="application/json")
response.set_data(message_to_json(response_message))
return response
def get_experiment_impl(request_message):
response_message = GetExperiment.Response()
experiment = _get_tracking_store().get_experiment(request_message.experiment_id).to_proto()
response_message.experiment.MergeFrom(experiment)
return response_message
@catch_mlflow_exception
@_disable_if_artifacts_only
def _get_experiment_by_name():
request_message = _get_request_message(
GetExperimentByName(),
schema={"experiment_name": [_assert_required, _assert_string]},
)
response_message = GetExperimentByName.Response()
store_exp = _get_tracking_store().get_experiment_by_name(request_message.experiment_name)
if store_exp is None:
raise MlflowException(
f"Could not find experiment with name '{request_message.experiment_name}'",
error_code=RESOURCE_DOES_NOT_EXIST,
)
experiment = store_exp.to_proto()
response_message.experiment.MergeFrom(experiment)
response = Response(mimetype="application/json")
response.set_data(message_to_json(response_message))
return response
@catch_mlflow_exception
@_disable_if_artifacts_only
def _delete_experiment():
request_message = _get_request_message(
DeleteExperiment(), schema={"experiment_id": [_assert_required, _assert_string]}
)
_get_tracking_store().delete_experiment(request_message.experiment_id)
response_message = DeleteExperiment.Response()
response = Response(mimetype="application/json")
response.set_data(message_to_json(response_message))
return response
@catch_mlflow_exception
@_disable_if_artifacts_only
def _restore_experiment():
request_message = _get_request_message(
RestoreExperiment(),
schema={"experiment_id": [_assert_required, _assert_string]},
)
_get_tracking_store().restore_experiment(request_message.experiment_id)
response_message = RestoreExperiment.Response()
response = Response(mimetype="application/json")
response.set_data(message_to_json(response_message))
return response
@catch_mlflow_exception
@_disable_if_artifacts_only
def _update_experiment():
request_message = _get_request_message(
UpdateExperiment(),
schema={
"experiment_id": [_assert_required, _assert_string],
"new_name": [_assert_string, _assert_required],
},
)
if request_message.new_name:
_get_tracking_store().rename_experiment(
request_message.experiment_id, request_message.new_name
)
response_message = UpdateExperiment.Response()
response = Response(mimetype="application/json")
response.set_data(message_to_json(response_message))
return response
@catch_mlflow_exception
@_disable_if_artifacts_only
def _create_run():
request_message = _get_request_message(
CreateRun(),
schema={
"experiment_id": [_assert_string],
"start_time": [_assert_intlike],
"run_name": [_assert_string],
},
)
tags = [RunTag(tag.key, tag.value) for tag in request_message.tags]
run = _get_tracking_store().create_run(
experiment_id=request_message.experiment_id,
user_id=request_message.user_id,
start_time=request_message.start_time,
tags=tags,
run_name=request_message.run_name,
)
response_message = CreateRun.Response()
response_message.run.MergeFrom(run.to_proto())
response = Response(mimetype="application/json")
response.set_data(message_to_json(response_message))
return response
@catch_mlflow_exception
@_disable_if_artifacts_only
def _update_run():
request_message = _get_request_message(
UpdateRun(),
schema={
"run_id": [_assert_required, _assert_string],
"end_time": [_assert_intlike],
"status": [_assert_string],
"run_name": [_assert_string],
},
)
run_id = request_message.run_id or request_message.run_uuid
run_name = request_message.run_name if request_message.HasField("run_name") else None
end_time = request_message.end_time if request_message.HasField("end_time") else None
status = request_message.status if request_message.HasField("status") else None
updated_info = _get_tracking_store().update_run_info(run_id, status, end_time, run_name)
response_message = UpdateRun.Response(run_info=updated_info.to_proto())
response = Response(mimetype="application/json")
response.set_data(message_to_json(response_message))
return response
@catch_mlflow_exception
@_disable_if_artifacts_only
def _delete_run():
request_message = _get_request_message(
DeleteRun(), schema={"run_id": [_assert_required, _assert_string]}
)
_get_tracking_store().delete_run(request_message.run_id)
response_message = DeleteRun.Response()
response = Response(mimetype="application/json")
response.set_data(message_to_json(response_message))
return response
@catch_mlflow_exception
@_disable_if_artifacts_only
def _restore_run():
request_message = _get_request_message(
RestoreRun(), schema={"run_id": [_assert_required, _assert_string]}
)
_get_tracking_store().restore_run(request_message.run_id)
response_message = RestoreRun.Response()
response = Response(mimetype="application/json")
response.set_data(message_to_json(response_message))
return response
@catch_mlflow_exception
@_disable_if_artifacts_only
def _log_metric():
request_message = _get_request_message(
LogMetric(),
schema={
"run_id": [_assert_required, _assert_string],
"key": [_assert_required, _assert_string],
"value": [_assert_required, _assert_floatlike],
"timestamp": [_assert_intlike, _assert_required],
"step": [_assert_intlike],
"model_id": [_assert_string],
"dataset_name": [_assert_string],
"dataset_digest": [_assert_string],
},
)
metric = Metric(
request_message.key,
request_message.value,
request_message.timestamp,
request_message.step,
request_message.model_id or None,
request_message.dataset_name or None,
request_message.dataset_digest or None,
request_message.run_id or None,
)
run_id = request_message.run_id or request_message.run_uuid
_get_tracking_store().log_metric(run_id, metric)
response_message = LogMetric.Response()
response = Response(mimetype="application/json")
response.set_data(message_to_json(response_message))
return response
@catch_mlflow_exception
@_disable_if_artifacts_only
def _log_param():
request_message = _get_request_message(
LogParam(),
schema={
"run_id": [_assert_required, _assert_string],
"key": [_assert_required, _assert_string],
"value": [_assert_string],
},
)
param = Param(request_message.key, request_message.value)
run_id = request_message.run_id or request_message.run_uuid
_get_tracking_store().log_param(run_id, param)
response_message = LogParam.Response()
response = Response(mimetype="application/json")
response.set_data(message_to_json(response_message))
return response
@catch_mlflow_exception
@_disable_if_artifacts_only
def _log_inputs():
request_message = _get_request_message(
LogInputs(),
schema={
"run_id": [_assert_required, _assert_string],
"datasets": [_assert_array],
"models": [_assert_array],
},
)
run_id = request_message.run_id
datasets = [
DatasetInput.from_proto(proto_dataset_input)
for proto_dataset_input in request_message.datasets
]
models = (
[
LoggedModelInput.from_proto(proto_logged_model_input)
for proto_logged_model_input in request_message.models
]
if request_message.models
else None
)
_get_tracking_store().log_inputs(run_id, datasets=datasets, models=models)
response_message = LogInputs.Response()
response = Response(mimetype="application/json")
response.set_data(message_to_json(response_message))
return response
@catch_mlflow_exception
@_disable_if_artifacts_only
def _log_outputs():
request_message = _get_request_message(
LogOutputs(),
schema={
"run_id": [_assert_required, _assert_string],
"models": [_assert_required, _assert_array],
},
)
models = [LoggedModelOutput.from_proto(p) for p in request_message.models]
_get_tracking_store().log_outputs(run_id=request_message.run_id, models=models)
response_message = LogOutputs.Response()
return _wrap_response(response_message)
@catch_mlflow_exception
@_disable_if_artifacts_only
def _set_experiment_tag():
request_message = _get_request_message(
SetExperimentTag(),
schema={
"experiment_id": [_assert_required, _assert_string],
"key": [_assert_required, _assert_string],
"value": [_assert_string],
},
)
tag = ExperimentTag(request_message.key, request_message.value)
_get_tracking_store().set_experiment_tag(request_message.experiment_id, tag)
response_message = SetExperimentTag.Response()
response = Response(mimetype="application/json")
response.set_data(message_to_json(response_message))
return response
@catch_mlflow_exception
@_disable_if_artifacts_only
def _delete_experiment_tag():
request_message = _get_request_message(
DeleteExperimentTag(),
schema={
"experiment_id": [_assert_required, _assert_string],
"key": [_assert_required, _assert_string],
},
)
_get_tracking_store().delete_experiment_tag(request_message.experiment_id, request_message.key)
response_message = DeleteExperimentTag.Response()
response = Response(mimetype="application/json")
response.set_data(message_to_json(response_message))
return response
@catch_mlflow_exception
@_disable_if_artifacts_only
def _set_tag():
request_message = _get_request_message(
SetTag(),
schema={
"run_id": [_assert_required, _assert_string],
"key": [_assert_required, _assert_string],
"value": [_assert_string],
},
)
tag = RunTag(request_message.key, request_message.value)
run_id = request_message.run_id or request_message.run_uuid
_get_tracking_store().set_tag(run_id, tag)
response_message = SetTag.Response()
response = Response(mimetype="application/json")
response.set_data(message_to_json(response_message))
return response
@catch_mlflow_exception
@_disable_if_artifacts_only
def _delete_tag():
request_message = _get_request_message(
DeleteTag(),
schema={
"run_id": [_assert_required, _assert_string],
"key": [_assert_required, _assert_string],
},
)
_get_tracking_store().delete_tag(request_message.run_id, request_message.key)
response_message = DeleteTag.Response()
response = Response(mimetype="application/json")
response.set_data(message_to_json(response_message))
return response
@catch_mlflow_exception
@_disable_if_artifacts_only
def _get_run():
request_message = _get_request_message(
GetRun(), schema={"run_id": [_assert_required, _assert_string]}
)
response_message = get_run_impl(request_message)
response = Response(mimetype="application/json")
response.set_data(message_to_json(response_message))
return response
def get_run_impl(request_message):
response_message = GetRun.Response()
run_id = request_message.run_id or request_message.run_uuid
response_message.run.MergeFrom(_get_tracking_store().get_run(run_id).to_proto())
return response_message
@catch_mlflow_exception
@_disable_if_artifacts_only
def _search_runs():
request_message = _get_request_message(
SearchRuns(),
schema={
"experiment_ids": [_assert_array],
"filter": [_assert_string],
"max_results": [
_assert_intlike,
lambda x: _assert_less_than_or_equal(int(x), 50000),
],
"order_by": [_assert_array, _assert_item_type_string],
},
)
response_message = search_runs_impl(request_message)
response = Response(mimetype="application/json")
response.set_data(message_to_json(response_message))
return response
def search_runs_impl(request_message):
response_message = SearchRuns.Response()
run_view_type = ViewType.ACTIVE_ONLY
if request_message.HasField("run_view_type"):
run_view_type = ViewType.from_proto(request_message.run_view_type)
filter_string = request_message.filter
max_results = request_message.max_results
experiment_ids = list(request_message.experiment_ids)
# NB: Local import to avoid circular dependency (auth imports from handlers)
try:
from mlflow.server import auth
if auth.auth_config:
experiment_ids = auth.filter_experiment_ids(experiment_ids)
except ImportError:
# Auth module not available (Flask-WTF not installed), skip filtering
pass
order_by = request_message.order_by
run_entities = _get_tracking_store().search_runs(
experiment_ids=experiment_ids,
filter_string=filter_string,
run_view_type=run_view_type,
max_results=max_results,
order_by=order_by,
page_token=request_message.page_token or None,
)
response_message.runs.extend([r.to_proto() for r in run_entities])
if run_entities.token:
response_message.next_page_token = run_entities.token
return response_message
@catch_mlflow_exception
@_disable_if_artifacts_only
def _list_artifacts():
request_message = _get_request_message(
ListArtifacts(),
schema={
"run_id": [_assert_string, _assert_required],
"path": [_assert_string],
"page_token": [_assert_string],
},
)
response_message = list_artifacts_impl(request_message)
response = Response(mimetype="application/json")
response.set_data(message_to_json(response_message))
return response
def list_artifacts_impl(request_message):
response_message = ListArtifacts.Response()
if request_message.HasField("path"):
path = request_message.path
path = validate_path_is_safe(path)
else:
path = None
run_id = request_message.run_id or request_message.run_uuid
run = _get_tracking_store().get_run(run_id)
if _is_servable_proxied_run_artifact_root(run.info.artifact_uri):
artifact_entities = _list_artifacts_for_proxied_run_artifact_root(
proxied_artifact_root=run.info.artifact_uri,
relative_path=path,
)
else:
artifact_entities = _get_artifact_repo(run).list_artifacts(path)
response_message.files.extend([a.to_proto() for a in artifact_entities])
response_message.root_uri = run.info.artifact_uri
return response_message
def _list_artifacts_for_proxied_run_artifact_root(proxied_artifact_root, relative_path=None):
"""
Lists artifacts from the specified ``relative_path`` within the specified proxied Run artifact
root (i.e. a Run artifact root with scheme ``http``, ``https``, or ``mlflow-artifacts``).
Args:
proxied_artifact_root: The Run artifact root location (URI) with scheme ``http``,
``https``, or ``mlflow-artifacts`` that can be resolved by the
MLflow server to a concrete storage location.
relative_path: The relative path within the specified ``proxied_artifact_root`` under
which to list artifact contents. If ``None``, artifacts are listed from
the ``proxied_artifact_root`` directory.
"""
parsed_proxied_artifact_root = urllib.parse.urlparse(proxied_artifact_root)
assert parsed_proxied_artifact_root.scheme in ["http", "https", "mlflow-artifacts"]
artifact_destination_repo = _get_artifact_repo_mlflow_artifacts()
artifact_destination_path = _get_proxied_run_artifact_destination_path(
proxied_artifact_root=proxied_artifact_root,
relative_path=relative_path,
)
artifact_entities = []
for file_info in artifact_destination_repo.list_artifacts(artifact_destination_path):
basename = posixpath.basename(file_info.path)
run_relative_artifact_path = (
posixpath.join(relative_path, basename) if relative_path else basename
)
artifact_entities.append(
FileInfo(run_relative_artifact_path, file_info.is_dir, file_info.file_size)
)
return artifact_entities
@catch_mlflow_exception
@_disable_if_artifacts_only
def _get_metric_history():
request_message = _get_request_message(
GetMetricHistory(),
schema={
"run_id": [_assert_string, _assert_required],
"metric_key": [_assert_string, _assert_required],
"page_token": [_assert_string],
},
)
response_message = GetMetricHistory.Response()
run_id = request_message.run_id or request_message.run_uuid
max_results = request_message.max_results if request_message.max_results is not None else None
metric_entities = _get_tracking_store().get_metric_history(
run_id,
request_message.metric_key,
max_results=max_results,
page_token=request_message.page_token or None,
)
response_message.metrics.extend([m.to_proto() for m in metric_entities])
# Set next_page_token if available
if next_page_token := metric_entities.token:
response_message.next_page_token = next_page_token
response = Response(mimetype="application/json")
response.set_data(message_to_json(response_message))
return response
@catch_mlflow_exception
@_disable_if_artifacts_only
def get_metric_history_bulk_handler():
MAX_HISTORY_RESULTS = 25000
MAX_RUN_IDS_PER_REQUEST = 100
run_ids = request.args.to_dict(flat=False).get("run_id", [])
if not run_ids:
raise MlflowException(
message="GetMetricHistoryBulk request must specify at least one run_id.",
error_code=INVALID_PARAMETER_VALUE,
)
if len(run_ids) > MAX_RUN_IDS_PER_REQUEST:
raise MlflowException(
message=(
f"GetMetricHistoryBulk request cannot specify more than {MAX_RUN_IDS_PER_REQUEST}"
f" run_ids. Received {len(run_ids)} run_ids."
),
error_code=INVALID_PARAMETER_VALUE,
)
metric_key = request.args.get("metric_key")
if metric_key is None:
raise MlflowException(
message="GetMetricHistoryBulk request must specify a metric_key.",
error_code=INVALID_PARAMETER_VALUE,
)
max_results = int(request.args.get("max_results", MAX_HISTORY_RESULTS))
max_results = min(max_results, MAX_HISTORY_RESULTS)
store = _get_tracking_store()
def _default_history_bulk_impl():
metrics_with_run_ids = []
for run_id in sorted(run_ids):
metrics_for_run = sorted(
store.get_metric_history(
run_id=run_id,
metric_key=metric_key,
max_results=max_results,
),
key=lambda metric: (metric.timestamp, metric.step, metric.value),
)
metrics_with_run_ids.extend(
[
{
"key": metric.key,
"value": metric.value,
"timestamp": metric.timestamp,
"step": metric.step,
"run_id": run_id,
}
for metric in metrics_for_run
]
)
return metrics_with_run_ids
if hasattr(store, "get_metric_history_bulk"):
metrics_with_run_ids = [
metric.to_dict()
for metric in store.get_metric_history_bulk(
run_ids=run_ids,
metric_key=metric_key,
max_results=max_results,
)
]
else:
metrics_with_run_ids = _default_history_bulk_impl()
return {
"metrics": metrics_with_run_ids[:max_results],
}
@catch_mlflow_exception
@_disable_if_artifacts_only
def get_metric_history_bulk_interval_handler():
request_message = _get_request_message(
GetMetricHistoryBulkInterval(),
schema={
"run_ids": [
_assert_required,
_assert_array,
_assert_item_type_string,
lambda x: _assert_less_than_or_equal(
len(x),
MAX_RUNS_GET_METRIC_HISTORY_BULK,
message=f"GetMetricHistoryBulkInterval request must specify at most "
f"{MAX_RUNS_GET_METRIC_HISTORY_BULK} run_ids. Received {len(x)} run_ids.",
),
],
"metric_key": [_assert_required, _assert_string],
"start_step": [_assert_intlike],
"end_step": [_assert_intlike],
"max_results": [
_assert_intlike,
lambda x: _assert_intlike_within_range(
int(x),
1,
MAX_RESULTS_PER_RUN,
message=f"max_results must be between 1 and {MAX_RESULTS_PER_RUN}.",
),
],
},
)
response_message = get_metric_history_bulk_interval_impl(request_message)
response = Response(mimetype="application/json")
response.set_data(message_to_json(response_message))
return response
def get_metric_history_bulk_interval_impl(request_message):
args = request.args
run_ids = request_message.run_ids
metric_key = request_message.metric_key
max_results = int(args.get("max_results", MAX_RESULTS_PER_RUN))
start_step = args.get("start_step")
end_step = args.get("end_step")
if start_step is not None and end_step is not None:
start_step = int(start_step)
end_step = int(end_step)
if start_step > end_step:
raise MlflowException.invalid_parameter_value(
"end_step must be greater than start_step. "
f"Found start_step={start_step} and end_step={end_step}."
)
elif start_step is not None or end_step is not None:
raise MlflowException.invalid_parameter_value(
"If either start step or end step are specified, both must be specified."
)
store = _get_tracking_store()
metrics_with_run_ids = store.get_metric_history_bulk_interval(
run_ids=run_ids,
metric_key=metric_key,
max_results=max_results,
start_step=start_step,
end_step=end_step,
)
response_message = GetMetricHistoryBulkInterval.Response()
response_message.metrics.extend([m.to_proto() for m in metrics_with_run_ids])
return response_message
@catch_mlflow_exception
@_disable_if_artifacts_only
def _search_datasets_handler():
request_message = _get_request_message(
SearchDatasets(),
)
response_message = search_datasets_impl(request_message)
response = Response(mimetype="application/json")
response.set_data(message_to_json(response_message))
return response
def search_datasets_impl(request_message):
MAX_EXPERIMENT_IDS_PER_REQUEST = 20
_validate_content_type(request, ["application/json"])
experiment_ids = request_message.experiment_ids or []
if not experiment_ids:
raise MlflowException(
message="SearchDatasets request must specify at least one experiment_id.",
error_code=INVALID_PARAMETER_VALUE,
)
if len(experiment_ids) > MAX_EXPERIMENT_IDS_PER_REQUEST:
raise MlflowException(
message=(
f"SearchDatasets request cannot specify more than {MAX_EXPERIMENT_IDS_PER_REQUEST}"
f" experiment_ids. Received {len(experiment_ids)} experiment_ids."
),
error_code=INVALID_PARAMETER_VALUE,
)
store = _get_tracking_store()
if hasattr(store, "_search_datasets"):
response_message = SearchDatasets.Response()
response_message.dataset_summaries.extend(
[summary.to_proto() for summary in store._search_datasets(experiment_ids)]
)
return response_message
else:
return _not_implemented()
def _validate_gateway_path(method: str, gateway_path: str) -> None:
if not gateway_path:
raise MlflowException(
message="Deployments proxy request must specify a gateway_path.",
error_code=INVALID_PARAMETER_VALUE,
)
elif method == "GET":
if gateway_path.strip("/") != "api/2.0/endpoints":
raise MlflowException(
message=f"Invalid gateway_path: {gateway_path} for method: {method}",
error_code=INVALID_PARAMETER_VALUE,
)
elif method == "POST":
# For POST, gateway_path must be in the form of "gateway/{name}/invocations"
if not re.fullmatch(r"gateway/[^/]+/invocations", gateway_path.strip("/")):
raise MlflowException(
message=f"Invalid gateway_path: {gateway_path} for method: {method}",
error_code=INVALID_PARAMETER_VALUE,
)
@catch_mlflow_exception
def gateway_proxy_handler():
target_uri = MLFLOW_DEPLOYMENTS_TARGET.get()
if not target_uri:
# Pretend an empty gateway service is running
return {"endpoints": []}
args = request.args if request.method == "GET" else request.json
gateway_path = args.get("gateway_path")
_validate_gateway_path(request.method, gateway_path)
json_data = args.get("json_data", None)
response = requests.request(request.method, f"{target_uri}/{gateway_path}", json=json_data)
if response.status_code == 200:
return response.json()
else:
raise MlflowException(
message=f"Deployments proxy request failed with error code {response.status_code}. "
f"Error message: {response.text}",
error_code=response.status_code,
)
@catch_mlflow_exception
@_disable_if_artifacts_only
def create_promptlab_run_handler():
def assert_arg_exists(arg_name, arg):
if not arg:
raise MlflowException(
message=f"CreatePromptlabRun request must specify {arg_name}.",
error_code=INVALID_PARAMETER_VALUE,
)
_validate_content_type(request, ["application/json"])
args = request.json
experiment_id = args.get("experiment_id")
assert_arg_exists("experiment_id", experiment_id)
run_name = args.get("run_name", None)
tags = args.get("tags", [])
prompt_template = args.get("prompt_template")
assert_arg_exists("prompt_template", prompt_template)
raw_prompt_parameters = args.get("prompt_parameters")
assert_arg_exists("prompt_parameters", raw_prompt_parameters)
prompt_parameters = [
Param(param.get("key"), param.get("value")) for param in args.get("prompt_parameters")
]
model_route = args.get("model_route")
assert_arg_exists("model_route", model_route)
raw_model_parameters = args.get("model_parameters", [])
model_parameters = [
Param(param.get("key"), param.get("value")) for param in raw_model_parameters
]
model_input = args.get("model_input")
assert_arg_exists("model_input", model_input)
model_output = args.get("model_output", None)
raw_model_output_parameters = args.get("model_output_parameters", [])
model_output_parameters = [
Param(param.get("key"), param.get("value")) for param in raw_model_output_parameters
]
mlflow_version = args.get("mlflow_version")
assert_arg_exists("mlflow_version", mlflow_version)
user_id = args.get("user_id", "unknown")
# use current time if not provided
start_time = args.get("start_time", int(time.time() * 1000))
store = _get_tracking_store()
run = _create_promptlab_run_impl(
store,
experiment_id=experiment_id,
run_name=run_name,
tags=tags,
prompt_template=prompt_template,
prompt_parameters=prompt_parameters,
model_route=model_route,
model_parameters=model_parameters,
model_input=model_input,
model_output=model_output,
model_output_parameters=model_output_parameters,
mlflow_version=mlflow_version,
user_id=user_id,
start_time=start_time,
)
response_message = CreateRun.Response()
response_message.run.MergeFrom(run.to_proto())
response = Response(mimetype="application/json")
response.set_data(message_to_json(response_message))
return response
@catch_mlflow_exception
def upload_artifact_handler():
args = request.args
run_uuid = args.get("run_uuid")
if not run_uuid:
raise MlflowException(
message="Request must specify run_uuid.",
error_code=INVALID_PARAMETER_VALUE,
)
path = args.get("path")
if not path:
raise MlflowException(
message="Request must specify path.",
error_code=INVALID_PARAMETER_VALUE,
)
path = validate_path_is_safe(path)
if request.content_length and request.content_length > 10 * 1024 * 1024:
raise MlflowException(
message="Artifact size is too large. Max size is 10MB.",
error_code=INVALID_PARAMETER_VALUE,
)
data = request.data
if not data:
raise MlflowException(
message="Request must specify data.",
error_code=INVALID_PARAMETER_VALUE,
)
run = _get_tracking_store().get_run(run_uuid)
artifact_dir = run.info.artifact_uri
basename = posixpath.basename(path)
dirname = posixpath.dirname(path)
def _log_artifact_to_repo(file, run, dirname, artifact_dir):
if _is_servable_proxied_run_artifact_root(run.info.artifact_uri):
artifact_repo = _get_artifact_repo_mlflow_artifacts()
path_to_log = (
os.path.join(run.info.experiment_id, run.info.run_id, "artifacts", dirname)
if dirname
else os.path.join(run.info.experiment_id, run.info.run_id, "artifacts")
)
else:
artifact_repo = get_artifact_repository(artifact_dir)
path_to_log = dirname
artifact_repo.log_artifact(file, path_to_log)
with tempfile.TemporaryDirectory() as tmpdir:
dir_path = os.path.join(tmpdir, dirname) if dirname else tmpdir
file_path = os.path.join(dir_path, basename)
os.makedirs(dir_path, exist_ok=True)
with open(file_path, "wb") as f:
f.write(data)
_log_artifact_to_repo(file_path, run, dirname, artifact_dir)
return Response(mimetype="application/json")
@catch_mlflow_exception
@_disable_if_artifacts_only
def _search_experiments():
request_message = _get_request_message(
SearchExperiments(),
schema={
"view_type": [_assert_intlike],
"max_results": [_assert_intlike],
"order_by": [_assert_array],
"filter": [_assert_string],
"page_token": [_assert_string],
},
)
experiment_entities = _get_tracking_store().search_experiments(
view_type=request_message.view_type,
max_results=request_message.max_results,
order_by=request_message.order_by,
filter_string=request_message.filter,
page_token=request_message.page_token or None,
)
response_message = SearchExperiments.Response()
response_message.experiments.extend([e.to_proto() for e in experiment_entities])
if experiment_entities.token:
response_message.next_page_token = experiment_entities.token
response = Response(mimetype="application/json")
response.set_data(message_to_json(response_message))
return response
@catch_mlflow_exception
def _get_artifact_repo(run):
return get_artifact_repository(run.info.artifact_uri)
@catch_mlflow_exception
@_disable_if_artifacts_only
def _log_batch():
def _assert_metrics_fields_present(metrics):
for idx, m in enumerate(metrics):
_assert_required(m.get("key"), path=f"metrics[{idx}].key")
_assert_required(m.get("value"), path=f"metrics[{idx}].value")
_assert_required(m.get("timestamp"), path=f"metrics[{idx}].timestamp")
def _assert_params_fields_present(params):
for idx, param in enumerate(params):
_assert_required(param.get("key"), path=f"params[{idx}].key")
def _assert_tags_fields_present(tags):
for idx, tag in enumerate(tags):
_assert_required(tag.get("key"), path=f"tags[{idx}].key")
_validate_batch_log_api_req(_get_request_json())
request_message = _get_request_message(
LogBatch(),
schema={
"run_id": [_assert_string, _assert_required],
"metrics": [_assert_array, _assert_metrics_fields_present],
"params": [_assert_array, _assert_params_fields_present],
"tags": [_assert_array, _assert_tags_fields_present],
},
)
metrics = [Metric.from_proto(proto_metric) for proto_metric in request_message.metrics]
params = [Param.from_proto(proto_param) for proto_param in request_message.params]
tags = [RunTag.from_proto(proto_tag) for proto_tag in request_message.tags]
_get_tracking_store().log_batch(
run_id=request_message.run_id, metrics=metrics, params=params, tags=tags
)
response_message = LogBatch.Response()
response = Response(mimetype="application/json")
response.set_data(message_to_json(response_message))
return response
@catch_mlflow_exception
@_disable_if_artifacts_only
def _log_model():
request_message = _get_request_message(
LogModel(),
schema={
"run_id": [_assert_string, _assert_required],
"model_json": [_assert_string, _assert_required],
},
)
try:
model = json.loads(request_message.model_json)
except Exception:
raise MlflowException(
f"Malformed model info. \n {request_message.model_json} \n is not a valid JSON.",
error_code=INVALID_PARAMETER_VALUE,
)
missing_fields = {"artifact_path", "flavors", "utc_time_created", "run_id"} - set(model.keys())
if missing_fields:
raise MlflowException(
f"Model json is missing mandatory fields: {missing_fields}",
error_code=INVALID_PARAMETER_VALUE,
)
_get_tracking_store().record_logged_model(
run_id=request_message.run_id, mlflow_model=Model.from_dict(model)
)
response_message = LogModel.Response()
response = Response(mimetype="application/json")
response.set_data(message_to_json(response_message))
return response
def _wrap_response(response_message):
response = Response(mimetype="application/json")
response.set_data(message_to_json(response_message))
return response
# Model Registry APIs
@catch_mlflow_exception
@_disable_if_artifacts_only
def _create_registered_model():
request_message = _get_request_message(
CreateRegisteredModel(),
schema={
"name": [_assert_string, _assert_required],
"tags": [_assert_array],
"description": [_assert_string],
},
)
store = _get_model_registry_store()
registered_model = store.create_registered_model(
name=request_message.name,
tags=request_message.tags,
description=request_message.description,
)
response_message = CreateRegisteredModel.Response(registered_model=registered_model.to_proto())
# Determine if this is a prompt based on the tags
if _is_prompt_request(request_message):
# Send prompt creation webhook
deliver_webhook(
event=WebhookEvent(WebhookEntity.PROMPT, WebhookAction.CREATED),
payload=PromptCreatedPayload(
name=request_message.name,
tags={
t.key: t.value
for t in request_message.tags
if t.key not in {IS_PROMPT_TAG_KEY, PROMPT_TYPE_TAG_KEY}
},
description=request_message.description,
),
store=store,
)
else:
# Send regular model creation webhook
deliver_webhook(
event=WebhookEvent(WebhookEntity.REGISTERED_MODEL, WebhookAction.CREATED),
payload=RegisteredModelCreatedPayload(
name=request_message.name,
tags={t.key: t.value for t in request_message.tags},
description=request_message.description,
),
store=store,
)
return _wrap_response(response_message)
@catch_mlflow_exception
@_disable_if_artifacts_only
def _get_registered_model():
request_message = _get_request_message(
GetRegisteredModel(), schema={"name": [_assert_string, _assert_required]}
)
registered_model = _get_model_registry_store().get_registered_model(name=request_message.name)
response_message = GetRegisteredModel.Response(registered_model=registered_model.to_proto())
return _wrap_response(response_message)
@catch_mlflow_exception
@_disable_if_artifacts_only
def _update_registered_model():
request_message = _get_request_message(
UpdateRegisteredModel(),
schema={
"name": [_assert_string, _assert_required],
"description": [_assert_string],
},
)
name = request_message.name
new_description = request_message.description
registered_model = _get_model_registry_store().update_registered_model(
name=name, description=new_description
)
response_message = UpdateRegisteredModel.Response(registered_model=registered_model.to_proto())
return _wrap_response(response_message)
@catch_mlflow_exception
@_disable_if_artifacts_only
def _rename_registered_model():
request_message = _get_request_message(
RenameRegisteredModel(),
schema={
"name": [_assert_string, _assert_required],
"new_name": [_assert_string, _assert_required],
},
)
name = request_message.name
new_name = request_message.new_name
registered_model = _get_model_registry_store().rename_registered_model(
name=name, new_name=new_name
)
response_message = RenameRegisteredModel.Response(registered_model=registered_model.to_proto())
return _wrap_response(response_message)
@catch_mlflow_exception
@_disable_if_artifacts_only
def _delete_registered_model():
request_message = _get_request_message(
DeleteRegisteredModel(), schema={"name": [_assert_string, _assert_required]}
)
_get_model_registry_store().delete_registered_model(name=request_message.name)
return _wrap_response(DeleteRegisteredModel.Response())
@catch_mlflow_exception
@_disable_if_artifacts_only
def _search_registered_models():
request_message = _get_request_message(
SearchRegisteredModels(),
schema={
"filter": [_assert_string],
"max_results": [
_assert_intlike,
lambda x: _assert_less_than_or_equal(int(x), 1000),
],
"order_by": [_assert_array, _assert_item_type_string],
"page_token": [_assert_string],
},
)
store = _get_model_registry_store()
registered_models = store.search_registered_models(
filter_string=request_message.filter,
max_results=request_message.max_results,
order_by=request_message.order_by,
page_token=request_message.page_token or None,
)
response_message = SearchRegisteredModels.Response()
response_message.registered_models.extend([e.to_proto() for e in registered_models])
if registered_models.token:
response_message.next_page_token = registered_models.token
return _wrap_response(response_message)
@catch_mlflow_exception
@_disable_if_artifacts_only
def _get_latest_versions():
request_message = _get_request_message(
GetLatestVersions(),
schema={
"name": [_assert_string, _assert_required],
"stages": [_assert_array, _assert_item_type_string],
},
)
latest_versions = _get_model_registry_store().get_latest_versions(
name=request_message.name, stages=request_message.stages
)
response_message = GetLatestVersions.Response()
response_message.model_versions.extend([e.to_proto() for e in latest_versions])
return _wrap_response(response_message)
@catch_mlflow_exception
@_disable_if_artifacts_only
def _set_registered_model_tag():
request_message = _get_request_message(
SetRegisteredModelTag(),
schema={
"name": [_assert_string, _assert_required],
"key": [_assert_string, _assert_required],
"value": [_assert_string],
},
)
tag = RegisteredModelTag(key=request_message.key, value=request_message.value)
store = _get_model_registry_store()
store.set_registered_model_tag(name=request_message.name, tag=tag)
if _is_prompt(request_message.name):
# Send prompt tag set webhook
deliver_webhook(
event=WebhookEvent(WebhookEntity.PROMPT_TAG, WebhookAction.SET),
payload=PromptTagSetPayload(
name=request_message.name,
key=request_message.key,
value=request_message.value,
),
store=store,
)
return _wrap_response(SetRegisteredModelTag.Response())
@catch_mlflow_exception
@_disable_if_artifacts_only
def _delete_registered_model_tag():
request_message = _get_request_message(
DeleteRegisteredModelTag(),
schema={
"name": [_assert_string, _assert_required],
"key": [_assert_string, _assert_required],
},
)
store = _get_model_registry_store()
store.delete_registered_model_tag(name=request_message.name, key=request_message.key)
if _is_prompt(request_message.name):
# Send prompt tag deleted webhook
deliver_webhook(
event=WebhookEvent(WebhookEntity.PROMPT_TAG, WebhookAction.DELETED),
payload=PromptTagDeletedPayload(
name=request_message.name,
key=request_message.key,
),
store=store,
)
return _wrap_response(DeleteRegisteredModelTag.Response())
def _validate_non_local_source_contains_relative_paths(source: str):
"""
Validation check to ensure that sources that are provided that conform to the schemes:
http, https, or mlflow-artifacts do not contain relative path designations that are intended
to access local file system paths on the tracking server.
Example paths that this validation function is intended to find and raise an Exception if
passed:
"mlflow-artifacts://host:port/../../../../"
"http://host:port/api/2.0/mlflow-artifacts/artifacts/../../../../"
"https://host:port/api/2.0/mlflow-artifacts/artifacts/../../../../"
"/models/artifacts/../../../"
"s3:/my_bucket/models/path/../../other/path"
"file://path/to/../../../../some/where/you/should/not/be"
"mlflow-artifacts://host:port/..%2f..%2f..%2f..%2f"
"http://host:port/api/2.0/mlflow-artifacts/artifacts%00"
"""
invalid_source_error_message = (
f"Invalid model version source: '{source}'. If supplying a source as an http, https, "
"local file path, ftp, objectstore, or mlflow-artifacts uri, an absolute path must be "
"provided without relative path references present. "
"Please provide an absolute path."
)
while (unquoted := urllib.parse.unquote_plus(source)) != source:
source = unquoted
source_path = re.sub(r"/+", "/", urllib.parse.urlparse(source).path.rstrip("/"))
if "\x00" in source_path or any(p == ".." for p in source.split("/")):
raise MlflowException(invalid_source_error_message, INVALID_PARAMETER_VALUE)
resolved_source = pathlib.Path(source_path).resolve().as_posix()
# NB: drive split is specifically for Windows since WindowsPath.resolve() will append the
# drive path of the pwd to a given path. We don't care about the drive here, though.
_, resolved_path = os.path.splitdrive(resolved_source)
if resolved_path != source_path:
raise MlflowException(invalid_source_error_message, INVALID_PARAMETER_VALUE)
def _validate_source_run(source: str, run_id: str) -> None:
if is_local_uri(source):
if run_id:
store = _get_tracking_store()
run = store.get_run(run_id)
source = pathlib.Path(local_file_uri_to_path(source)).resolve()
if is_local_uri(run.info.artifact_uri):
run_artifact_dir = pathlib.Path(
local_file_uri_to_path(run.info.artifact_uri)
).resolve()
if run_artifact_dir in [source, *source.parents]:
return
raise MlflowException(
f"Invalid model version source: '{source}'. To use a local path as a model version "
"source, the run_id request parameter has to be specified and the local path has to be "
"contained within the artifact directory of the run specified by the run_id.",
INVALID_PARAMETER_VALUE,
)
# Checks if relative paths are present in the source (a security threat). If any are present,
# raises an Exception.
_validate_non_local_source_contains_relative_paths(source)
def _validate_source_model(source: str, model_id: str) -> None:
if is_local_uri(source):
if model_id:
store = _get_tracking_store()
model = store.get_logged_model(model_id)
source = pathlib.Path(local_file_uri_to_path(source)).resolve()
if is_local_uri(model.artifact_location):
run_artifact_dir = pathlib.Path(
local_file_uri_to_path(model.artifact_location)
).resolve()
if run_artifact_dir in [source, *source.parents]:
return
raise MlflowException(
f"Invalid model version source: '{source}'. To use a local path as a model version "
"source, the model_id request parameter has to be specified and the local path has to "
"be contained within the artifact directory of the run specified by the model_id.",
INVALID_PARAMETER_VALUE,
)
# Checks if relative paths are present in the source (a security threat). If any are present,
# raises an Exception.
_validate_non_local_source_contains_relative_paths(source)
@catch_mlflow_exception
@_disable_if_artifacts_only
def _create_model_version():
request_message = _get_request_message(
CreateModelVersion(),
schema={
"name": [_assert_string, _assert_required],
"source": [_assert_string, _assert_required],
"run_id": [_assert_string],
"tags": [_assert_array],
"run_link": [_assert_string],
"description": [_assert_string],
"model_id": [_assert_string],
},
)
if request_message.source and (
regex := MLFLOW_CREATE_MODEL_VERSION_SOURCE_VALIDATION_REGEX.get()
):
if not re.search(regex, request_message.source):
raise MlflowException(
f"Invalid model version source: '{request_message.source}'.",
error_code=INVALID_PARAMETER_VALUE,
)
# If the model version is a prompt, we don't validate the source
is_prompt = _is_prompt_request(request_message)
if not is_prompt:
if request_message.model_id:
_validate_source_model(request_message.source, request_message.model_id)
else:
_validate_source_run(request_message.source, request_message.run_id)
store = _get_model_registry_store()
model_version = store.create_model_version(
name=request_message.name,
source=request_message.source,
run_id=request_message.run_id,
run_link=request_message.run_link,
tags=request_message.tags,
description=request_message.description,
model_id=request_message.model_id,
)
if not is_prompt and request_message.model_id:
tracking_store = _get_tracking_store()
tracking_store.set_model_versions_tags(
name=request_message.name,
version=model_version.version,
model_id=request_message.model_id,
)
response_message = CreateModelVersion.Response(model_version=model_version.to_proto())
if is_prompt:
# Convert tags to dict and extract template text efficiently
tags_dict = {t.key: t.value for t in request_message.tags}
template_text = tags_dict.pop(PROMPT_TEXT_TAG_KEY, None)
# Remove internal prompt identification and type tags
tags_dict.pop(IS_PROMPT_TAG_KEY, None)
tags_dict.pop(PROMPT_TYPE_TAG_KEY, None)
# Send prompt version creation webhook
deliver_webhook(
event=WebhookEvent(WebhookEntity.PROMPT_VERSION, WebhookAction.CREATED),
payload=PromptVersionCreatedPayload(
name=request_message.name,
version=str(model_version.version),
template=template_text,
tags=tags_dict,
description=request_message.description or None,
),
store=store,
)
else:
# Send regular model version creation webhook
deliver_webhook(
event=WebhookEvent(WebhookEntity.MODEL_VERSION, WebhookAction.CREATED),
payload=ModelVersionCreatedPayload(
name=request_message.name,
version=str(model_version.version),
source=request_message.source,
run_id=request_message.run_id or None,
tags={t.key: t.value for t in request_message.tags},
description=request_message.description or None,
),
store=store,
)
return _wrap_response(response_message)
def _is_prompt_request(request_message):
return any(tag.key == IS_PROMPT_TAG_KEY for tag in request_message.tags)
def _is_prompt(name: str) -> bool:
rm = _get_model_registry_store().get_registered_model(name=name)
return rm._is_prompt()
@catch_mlflow_exception
@_disable_if_artifacts_only
def get_model_version_artifact_handler():
name = request.args.get("name")
version = request.args.get("version")
path = request.args["path"]
path = validate_path_is_safe(path)
artifact_uri = _get_model_registry_store().get_model_version_download_uri(name, version)
if _is_servable_proxied_run_artifact_root(artifact_uri):
artifact_repo = _get_artifact_repo_mlflow_artifacts()
artifact_path = _get_proxied_run_artifact_destination_path(
proxied_artifact_root=artifact_uri,
relative_path=path,
)
else:
artifact_repo = get_artifact_repository(artifact_uri)
artifact_path = path
return _send_artifact(artifact_repo, artifact_path)
@catch_mlflow_exception
@_disable_if_artifacts_only
def _get_model_version():
request_message = _get_request_message(
GetModelVersion(),
schema={
"name": [_assert_string, _assert_required],
"version": [_assert_string, _assert_required],
},
)
model_version = _get_model_registry_store().get_model_version(
name=request_message.name, version=request_message.version
)
response_proto = model_version.to_proto()
response_message = GetModelVersion.Response(model_version=response_proto)
return _wrap_response(response_message)
@catch_mlflow_exception
@_disable_if_artifacts_only
def _update_model_version():
request_message = _get_request_message(
UpdateModelVersion(),
schema={
"name": [_assert_string, _assert_required],
"version": [_assert_string, _assert_required],
"description": [_assert_string],
},
)
new_description = None
if request_message.HasField("description"):
new_description = request_message.description
model_version = _get_model_registry_store().update_model_version(
name=request_message.name,
version=request_message.version,
description=new_description,
)
return _wrap_response(UpdateModelVersion.Response(model_version=model_version.to_proto()))
@catch_mlflow_exception
@_disable_if_artifacts_only
def _transition_stage():
request_message = _get_request_message(
TransitionModelVersionStage(),
schema={
"name": [_assert_string, _assert_required],
"version": [_assert_string, _assert_required],
"stage": [_assert_string, _assert_required],
"archive_existing_versions": [_assert_bool],
},
)
model_version = _get_model_registry_store().transition_model_version_stage(
name=request_message.name,
version=request_message.version,
stage=request_message.stage,
archive_existing_versions=request_message.archive_existing_versions,
)
return _wrap_response(
TransitionModelVersionStage.Response(model_version=model_version.to_proto())
)
@catch_mlflow_exception
@_disable_if_artifacts_only
def _delete_model_version():
request_message = _get_request_message(
DeleteModelVersion(),
schema={
"name": [_assert_string, _assert_required],
"version": [_assert_string, _assert_required],
},
)
_get_model_registry_store().delete_model_version(
name=request_message.name, version=request_message.version
)
return _wrap_response(DeleteModelVersion.Response())
@catch_mlflow_exception
@_disable_if_artifacts_only
def _get_model_version_download_uri():
request_message = _get_request_message(GetModelVersionDownloadUri())
download_uri = _get_model_registry_store().get_model_version_download_uri(
name=request_message.name, version=request_message.version
)
response_message = GetModelVersionDownloadUri.Response(artifact_uri=download_uri)
return _wrap_response(response_message)
@catch_mlflow_exception
@_disable_if_artifacts_only
def _search_model_versions():
request_message = _get_request_message(
SearchModelVersions(),
schema={
"filter": [_assert_string],
"max_results": [
_assert_intlike,
lambda x: _assert_less_than_or_equal(int(x), 200_000),
],
"order_by": [_assert_array, _assert_item_type_string],
"page_token": [_assert_string],
},
)
response_message = search_model_versions_impl(request_message)
return _wrap_response(response_message)
def search_model_versions_impl(request_message):
store = _get_model_registry_store()
model_versions = store.search_model_versions(
filter_string=request_message.filter,
max_results=request_message.max_results,
order_by=request_message.order_by,
page_token=request_message.page_token or None,
)
response_message = SearchModelVersions.Response()
response_message.model_versions.extend([e.to_proto() for e in model_versions])
if model_versions.token:
response_message.next_page_token = model_versions.token
return response_message
@catch_mlflow_exception
@_disable_if_artifacts_only
def _set_model_version_tag():
request_message = _get_request_message(
SetModelVersionTag(),
schema={
"name": [_assert_string, _assert_required],
"version": [_assert_string, _assert_required],
"key": [_assert_string, _assert_required],
"value": [_assert_string],
},
)
tag = ModelVersionTag(key=request_message.key, value=request_message.value)
store = _get_model_registry_store()
store.set_model_version_tag(name=request_message.name, version=request_message.version, tag=tag)
if _is_prompt(request_message.name):
# Send prompt version tag set webhook
deliver_webhook(
event=WebhookEvent(WebhookEntity.PROMPT_VERSION_TAG, WebhookAction.SET),
payload=PromptVersionTagSetPayload(
name=request_message.name,
version=request_message.version,
key=request_message.key,
value=request_message.value,
),
store=store,
)
else:
# Send regular model version tag set webhook
deliver_webhook(
event=WebhookEvent(WebhookEntity.MODEL_VERSION_TAG, WebhookAction.SET),
payload=ModelVersionTagSetPayload(
name=request_message.name,
version=request_message.version,
key=request_message.key,
value=request_message.value,
),
store=store,
)
return _wrap_response(SetModelVersionTag.Response())
@catch_mlflow_exception
@_disable_if_artifacts_only
def _delete_model_version_tag():
request_message = _get_request_message(
DeleteModelVersionTag(),
schema={
"name": [_assert_string, _assert_required],
"version": [_assert_string, _assert_required],
"key": [_assert_string, _assert_required],
},
)
store = _get_model_registry_store()
store.delete_model_version_tag(
name=request_message.name,
version=request_message.version,
key=request_message.key,
)
if _is_prompt(request_message.name):
# Send prompt version tag deleted webhook
deliver_webhook(
event=WebhookEvent(WebhookEntity.PROMPT_VERSION_TAG, WebhookAction.DELETED),
payload=PromptVersionTagDeletedPayload(
name=request_message.name,
version=request_message.version,
key=request_message.key,
),
store=store,
)
else:
# Send regular model version tag deleted webhook
deliver_webhook(
event=WebhookEvent(WebhookEntity.MODEL_VERSION_TAG, WebhookAction.DELETED),
payload=ModelVersionTagDeletedPayload(
name=request_message.name,
version=request_message.version,
key=request_message.key,
),
store=store,
)
return _wrap_response(DeleteModelVersionTag.Response())
@catch_mlflow_exception
@_disable_if_artifacts_only
def _set_registered_model_alias():
request_message = _get_request_message(
SetRegisteredModelAlias(),
schema={
"name": [_assert_string, _assert_required],
"alias": [_assert_string, _assert_required],
"version": [_assert_string, _assert_required],
},
)
store = _get_model_registry_store()
store.set_registered_model_alias(
name=request_message.name,
alias=request_message.alias,
version=request_message.version,
)
if _is_prompt(request_message.name):
# Send prompt alias created webhook
deliver_webhook(
event=WebhookEvent(WebhookEntity.PROMPT_ALIAS, WebhookAction.CREATED),
payload=PromptAliasCreatedPayload(
name=request_message.name,
alias=request_message.alias,
version=request_message.version,
),
store=store,
)
else:
# Send regular model version alias created webhook
deliver_webhook(
event=WebhookEvent(WebhookEntity.MODEL_VERSION_ALIAS, WebhookAction.CREATED),
payload=ModelVersionAliasCreatedPayload(
name=request_message.name,
alias=request_message.alias,
version=request_message.version,
),
store=store,
)
return _wrap_response(SetRegisteredModelAlias.Response())
@catch_mlflow_exception
@_disable_if_artifacts_only
def _delete_registered_model_alias():
request_message = _get_request_message(
DeleteRegisteredModelAlias(),
schema={
"name": [_assert_string, _assert_required],
"alias": [_assert_string, _assert_required],
},
)
store = _get_model_registry_store()
store.delete_registered_model_alias(name=request_message.name, alias=request_message.alias)
if _is_prompt(request_message.name):
# Send prompt alias deleted webhook
deliver_webhook(
event=WebhookEvent(WebhookEntity.PROMPT_ALIAS, WebhookAction.DELETED),
payload=PromptAliasDeletedPayload(
name=request_message.name,
alias=request_message.alias,
),
store=store,
)
else:
# Send regular model version alias deleted webhook
deliver_webhook(
event=WebhookEvent(WebhookEntity.MODEL_VERSION_ALIAS, WebhookAction.DELETED),
payload=ModelVersionAliasDeletedPayload(
name=request_message.name,
alias=request_message.alias,
),
store=store,
)
return _wrap_response(DeleteRegisteredModelAlias.Response())
@catch_mlflow_exception
@_disable_if_artifacts_only
def _get_model_version_by_alias():
request_message = _get_request_message(
GetModelVersionByAlias(),
schema={
"name": [_assert_string, _assert_required],
"alias": [_assert_string, _assert_required],
},
)
model_version = _get_model_registry_store().get_model_version_by_alias(
name=request_message.name, alias=request_message.alias
)
response_proto = model_version.to_proto()
response_message = GetModelVersionByAlias.Response(model_version=response_proto)
return _wrap_response(response_message)
# Webhook APIs
@catch_mlflow_exception
@_disable_if_artifacts_only
def _create_webhook():
request_message = _get_request_message(
CreateWebhook(),
schema={
"name": [_assert_string, _assert_required],
"url": [_assert_string, _assert_required],
"events": [_assert_array, _assert_required],
"description": [_assert_string],
"secret": [_assert_string],
"status": [_assert_string],
},
)
webhook = _get_model_registry_store().create_webhook(
name=request_message.name,
url=request_message.url,
events=[WebhookEvent.from_proto(e) for e in request_message.events],
description=request_message.description or None,
secret=request_message.secret or None,
status=WebhookStatus.from_proto(request_message.status) if request_message.status else None,
)
response_message = CreateWebhook.Response(webhook=webhook.to_proto())
return _wrap_response(response_message)
@catch_mlflow_exception
@_disable_if_artifacts_only
def _list_webhooks():
request_message = _get_request_message(
ListWebhooks(),
schema={
"max_results": [_assert_intlike],
"page_token": [_assert_string],
},
)
webhooks_page = _get_model_registry_store().list_webhooks(
max_results=request_message.max_results,
page_token=request_message.page_token or None,
)
response_message = ListWebhooks.Response(
webhooks=[w.to_proto() for w in webhooks_page],
next_page_token=webhooks_page.token,
)
return _wrap_response(response_message)
@catch_mlflow_exception
@_disable_if_artifacts_only
def _get_webhook(webhook_id: str):
webhook = _get_model_registry_store().get_webhook(webhook_id=webhook_id)
response_message = GetWebhook.Response(webhook=webhook.to_proto())
return _wrap_response(response_message)
@catch_mlflow_exception
@_disable_if_artifacts_only
def _update_webhook(webhook_id: str):
request_message = _get_request_message(
UpdateWebhook(),
schema={
"name": [_assert_string],
"description": [_assert_string],
"url": [_assert_string],
"events": [_assert_array],
"secret": [_assert_string],
"status": [_assert_string],
},
)
webhook = _get_model_registry_store().update_webhook(
webhook_id=webhook_id,
name=request_message.name or None,
description=request_message.description or None,
url=request_message.url or None,
events=(
[WebhookEvent.from_proto(e) for e in request_message.events]
if request_message.events
else None
),
secret=request_message.secret or None,
status=WebhookStatus.from_proto(request_message.status) if request_message.status else None,
)
response_message = UpdateWebhook.Response(webhook=webhook.to_proto())
return _wrap_response(response_message)
@catch_mlflow_exception
@_disable_if_artifacts_only
def _delete_webhook(webhook_id: str):
_get_model_registry_store().delete_webhook(webhook_id=webhook_id)
response_message = DeleteWebhook.Response()
return _wrap_response(response_message)
@catch_mlflow_exception
@_disable_if_artifacts_only
def _test_webhook(webhook_id: str):
request_message = _get_request_message(TestWebhook())
event = (
WebhookEvent.from_proto(request_message.event)
if request_message.HasField("event")
else None
)
store = _get_model_registry_store()
webhook = store.get_webhook(webhook_id=webhook_id)
test_result = test_webhook(webhook=webhook, event=event)
response_message = TestWebhook.Response(result=test_result.to_proto())
return _wrap_response(response_message)
# MLflow Artifacts APIs
@catch_mlflow_exception
@_disable_unless_serve_artifacts
def _download_artifact(artifact_path):
"""
A request handler for `GET /mlflow-artifacts/artifacts/<artifact_path>` to download an artifact
from `artifact_path` (a relative path from the root artifact directory).
"""
artifact_path = validate_path_is_safe(artifact_path)
tmp_dir = tempfile.TemporaryDirectory()
artifact_repo = _get_artifact_repo_mlflow_artifacts()
dst = artifact_repo.download_artifacts(artifact_path, tmp_dir.name)
# Ref: https://stackoverflow.com/a/24613980/6943581
file_handle = open(dst, "rb") # noqa: SIM115
def stream_and_remove_file():
yield from file_handle
file_handle.close()
tmp_dir.cleanup()
file_sender_response = current_app.response_class(stream_and_remove_file())
return _response_with_file_attachment_headers(artifact_path, file_sender_response)
@catch_mlflow_exception
@_disable_unless_serve_artifacts
def _upload_artifact(artifact_path):
"""
A request handler for `PUT /mlflow-artifacts/artifacts/<artifact_path>` to upload an artifact
to `artifact_path` (a relative path from the root artifact directory).
"""
artifact_path = validate_path_is_safe(artifact_path)
head, tail = posixpath.split(artifact_path)
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_path = os.path.join(tmp_dir, tail)
with open(tmp_path, "wb") as f:
chunk_size = 1024 * 1024 # 1 MB
while True:
chunk = request.stream.read(chunk_size)
if len(chunk) == 0:
break
f.write(chunk)
artifact_repo = _get_artifact_repo_mlflow_artifacts()
artifact_repo.log_artifact(tmp_path, artifact_path=head or None)
return _wrap_response(UploadArtifact.Response())
@catch_mlflow_exception
@_disable_unless_serve_artifacts
def _list_artifacts_mlflow_artifacts():
"""
A request handler for `GET /mlflow-artifacts/artifacts?path=<value>` to list artifacts in `path`
(a relative path from the root artifact directory).
"""
request_message = _get_request_message(ListArtifactsMlflowArtifacts())
path = validate_path_is_safe(request_message.path) if request_message.HasField("path") else None
artifact_repo = _get_artifact_repo_mlflow_artifacts()
files = []
for file_info in artifact_repo.list_artifacts(path):
basename = posixpath.basename(file_info.path)
new_file_info = FileInfo(basename, file_info.is_dir, file_info.file_size)
files.append(new_file_info.to_proto())
response_message = ListArtifacts.Response()
response_message.files.extend(files)
response = Response(mimetype="application/json")
response.set_data(message_to_json(response_message))
return response
@catch_mlflow_exception
@_disable_unless_serve_artifacts
def _delete_artifact_mlflow_artifacts(artifact_path):
"""
A request handler for `DELETE /mlflow-artifacts/artifacts?path=<value>` to delete artifacts in
`path` (a relative path from the root artifact directory).
"""
artifact_path = validate_path_is_safe(artifact_path)
_get_request_message(DeleteArtifact())
artifact_repo = _get_artifact_repo_mlflow_artifacts()
artifact_repo.delete_artifacts(artifact_path)
response_message = DeleteArtifact.Response()
response = Response(mimetype="application/json")
response.set_data(message_to_json(response_message))
return response
@catch_mlflow_exception
def _graphql():
from graphql import parse
from mlflow.server.graphql.graphql_no_batching import check_query_safety
from mlflow.server.graphql.graphql_schema_extensions import schema
# Extracting the query, variables, and operationName from the request
request_json = _get_request_json()
query = request_json.get("query")
variables = request_json.get("variables")
operation_name = request_json.get("operationName")
node = parse(query)
if check_result := check_query_safety(node):
result = check_result
else:
# Executing the GraphQL query using the Graphene schema
result = schema.execute(query, variables=variables, operation_name=operation_name)
# Convert execution result into json.
result_data = {
"data": result.data,
"errors": [error.message for error in result.errors] if result.errors else None,
}
# Return the response
return jsonify(result_data)
def _validate_support_multipart_upload(artifact_repo):
if not isinstance(artifact_repo, MultipartUploadMixin):
raise _UnsupportedMultipartUploadException()
@catch_mlflow_exception
@_disable_unless_serve_artifacts
def _create_multipart_upload_artifact(artifact_path):
"""
A request handler for `POST /mlflow-artifacts/mpu/create` to create a multipart upload
to `artifact_path` (a relative path from the root artifact directory).
"""
artifact_path = validate_path_is_safe(artifact_path)
request_message = _get_request_message(
CreateMultipartUpload(),
schema={
"path": [_assert_required, _assert_string],
"num_parts": [_assert_intlike],
},
)
path = request_message.path
num_parts = request_message.num_parts
artifact_repo = _get_artifact_repo_mlflow_artifacts()
_validate_support_multipart_upload(artifact_repo)
create_response = artifact_repo.create_multipart_upload(
path,
num_parts,
artifact_path,
)
response_message = create_response.to_proto()
response = Response(mimetype="application/json")
response.set_data(message_to_json(response_message))
return response
@catch_mlflow_exception
@_disable_unless_serve_artifacts
def _complete_multipart_upload_artifact(artifact_path):
"""
A request handler for `POST /mlflow-artifacts/mpu/complete` to complete a multipart upload
to `artifact_path` (a relative path from the root artifact directory).
"""
artifact_path = validate_path_is_safe(artifact_path)
request_message = _get_request_message(
CompleteMultipartUpload(),
schema={
"path": [_assert_required, _assert_string],
"upload_id": [_assert_string],
"parts": [_assert_required],
},
)
path = request_message.path
upload_id = request_message.upload_id
parts = [MultipartUploadPart.from_proto(part) for part in request_message.parts]
artifact_repo = _get_artifact_repo_mlflow_artifacts()
_validate_support_multipart_upload(artifact_repo)
artifact_repo.complete_multipart_upload(
path,
upload_id,
parts,
artifact_path,
)
return _wrap_response(CompleteMultipartUpload.Response())
@catch_mlflow_exception
@_disable_unless_serve_artifacts
def _abort_multipart_upload_artifact(artifact_path):
"""
A request handler for `POST /mlflow-artifacts/mpu/abort` to abort a multipart upload
to `artifact_path` (a relative path from the root artifact directory).
"""
artifact_path = validate_path_is_safe(artifact_path)
request_message = _get_request_message(
AbortMultipartUpload(),
schema={
"path": [_assert_required, _assert_string],
"upload_id": [_assert_string],
},
)
path = request_message.path
upload_id = request_message.upload_id
artifact_repo = _get_artifact_repo_mlflow_artifacts()
_validate_support_multipart_upload(artifact_repo)
artifact_repo.abort_multipart_upload(
path,
upload_id,
artifact_path,
)
return _wrap_response(AbortMultipartUpload.Response())
# MLflow Tracing APIs
@catch_mlflow_exception
@_disable_if_artifacts_only
def _start_trace_v3():
"""
A request handler for `POST /mlflow/traces` to create a new TraceInfo record in tracking store.
"""
request_message = _get_request_message(
StartTraceV3(),
schema={"trace": [_assert_required]},
)
trace_info = TraceInfo.from_proto(request_message.trace.trace_info)
trace_info = _get_tracking_store().start_trace(trace_info)
response_message = StartTraceV3.Response(trace=ProtoTrace(trace_info=trace_info.to_proto()))
return _wrap_response(response_message)
@catch_mlflow_exception
@_disable_if_artifacts_only
def _get_trace_info_v3(trace_id):
"""
A request handler for `GET /mlflow/traces/{trace_id}/info` to retrieve
an existing TraceInfo record from tracking store.
"""
trace_info = _get_tracking_store().get_trace_info(trace_id)
response_message = GetTraceInfoV3.Response(trace=ProtoTrace(trace_info=trace_info.to_proto()))
return _wrap_response(response_message)
@catch_mlflow_exception
@_disable_if_artifacts_only
def _batch_get_traces() -> Response:
"""
A request handler for `GET /mlflow/traces/batchGet` to retrieve
a batch of complete traces with spans for given trace ids.
"""
request_message = _get_request_message(
BatchGetTraces(),
schema={"trace_ids": [_assert_array, _assert_required, _assert_item_type_string]},
)
traces = _get_tracking_store().batch_get_traces(request_message.trace_ids, None)
response_message = BatchGetTraces.Response()
response_message.traces.extend([t.to_proto() for t in traces])
return _wrap_response(response_message)
@catch_mlflow_exception
@_disable_if_artifacts_only
def _get_trace() -> Response:
"""
A request handler for `GET /mlflow/traces/get` to get a trace with spans for given trace id.
"""
request_message = _get_request_message(
GetTrace(),
schema={
"trace_id": [_assert_string, _assert_required],
"allow_partial": [_assert_bool],
},
)
trace_id = request_message.trace_id
allow_partial = request_message.allow_partial
trace = _get_tracking_store().get_trace(trace_id, allow_partial=allow_partial)
response_message = GetTrace.Response(trace=trace.to_proto())
return _wrap_response(response_message)
@catch_mlflow_exception
@_disable_if_artifacts_only
def _search_traces_v3():
"""
A request handler for `GET /mlflow/traces` to search for TraceInfo records in tracking store.
"""
request_message = _get_request_message(
SearchTracesV3(),
schema={
"locations": [_assert_array, _assert_required],
"filter": [_assert_string],
"max_results": [
_assert_intlike,
lambda x: _assert_less_than_or_equal(int(x), 500),
],
"order_by": [_assert_array, _assert_item_type_string],
"page_token": [_assert_string],
},
)
experiment_ids = [
location.mlflow_experiment.experiment_id
for location in request_message.locations
if location.HasField("mlflow_experiment")
]
traces, token = _get_tracking_store().search_traces(
locations=experiment_ids,
filter_string=request_message.filter,
max_results=request_message.max_results,
order_by=request_message.order_by,
page_token=request_message.page_token or None,
)
response_message = SearchTracesV3.Response()
response_message.traces.extend([e.to_proto() for e in traces])
if token:
response_message.next_page_token = token
return _wrap_response(response_message)
@catch_mlflow_exception
@_disable_if_artifacts_only
def _delete_traces():
"""
A request handler for `POST /mlflow/traces/delete-traces` to delete TraceInfo records
from tracking store.
"""
request_message = _get_request_message(
DeleteTraces(),
schema={
"experiment_id": [_assert_string, _assert_required],
"max_timestamp_millis": [_assert_intlike],
"max_traces": [_assert_intlike],
"request_ids": [_assert_array, _assert_item_type_string],
},
)
# NB: Interestingly, the field accessor for the message object returns the default
# value for optional field if it's not set. For example, `request_message.max_traces`
# returns 0 if max_traces is not specified in the request. This is not desirable,
# because null and 0 means completely opposite i.e. the former is 'delete nothing'
# while the latter is 'delete all'. To handle this, we need to explicitly check
# if the field is set or not using `HasField` method and return None if not.
def _get_nullable_field(field):
if request_message.HasField(field):
return getattr(request_message, field)
return None
traces_deleted = _get_tracking_store().delete_traces(
experiment_id=request_message.experiment_id,
max_timestamp_millis=_get_nullable_field("max_timestamp_millis"),
max_traces=_get_nullable_field("max_traces"),
trace_ids=request_message.request_ids,
)
return _wrap_response(DeleteTraces.Response(traces_deleted=traces_deleted))
@catch_mlflow_exception
@_disable_if_artifacts_only
def _calculate_trace_filter_correlation():
"""
A request handler for `POST /mlflow/traces/calculate-filter-correlation` to calculate
NPMI correlation between two trace filter conditions.
"""
request_message = _get_request_message(
CalculateTraceFilterCorrelation(),
schema={
"experiment_ids": [_assert_array, _assert_required, _assert_item_type_string],
"filter_string1": [_assert_string, _assert_required],
"filter_string2": [_assert_string, _assert_required],
"base_filter": [_assert_string],
},
)
result = _get_tracking_store().calculate_trace_filter_correlation(
experiment_ids=request_message.experiment_ids,
filter_string1=request_message.filter_string1,
filter_string2=request_message.filter_string2,
base_filter=request_message.base_filter
if request_message.HasField("base_filter")
else None,
)
return _wrap_response(result.to_proto())
@catch_mlflow_exception
@_disable_if_artifacts_only
def _set_trace_tag(request_id):
"""
A request handler for `PATCH /mlflow/traces/{request_id}/tags` to set tags on a TraceInfo record
"""
request_message = _get_request_message(
SetTraceTag(),
schema={
"key": [_assert_string, _assert_required],
"value": [_assert_string],
},
)
_get_tracking_store().set_trace_tag(request_id, request_message.key, request_message.value)
return _wrap_response(SetTraceTag.Response())
@catch_mlflow_exception
@_disable_if_artifacts_only
def _set_trace_tag_v3(trace_id):
"""
A request handler for `PATCH /mlflow/traces/{trace_id}/tags` to set tags on a TraceInfo record.
Identical to `_set_trace_tag`, but with request_id renamed to with trace_id.
"""
request_message = _get_request_message(
SetTraceTagV3(),
schema={
"key": [_assert_string, _assert_required],
"value": [_assert_string],
},
)
_get_tracking_store().set_trace_tag(trace_id, request_message.key, request_message.value)
return _wrap_response(SetTraceTagV3.Response())
@catch_mlflow_exception
@_disable_if_artifacts_only
def _delete_trace_tag(request_id):
"""
A request handler for `DELETE /mlflow/traces/{request_id}/tags` to delete tags from a TraceInfo
record.
"""
request_message = _get_request_message(
DeleteTraceTag(),
schema={
"key": [_assert_string, _assert_required],
},
)
_get_tracking_store().delete_trace_tag(request_id, request_message.key)
return _wrap_response(DeleteTraceTag.Response())
@catch_mlflow_exception
@_disable_if_artifacts_only
def _delete_trace_tag_v3(trace_id):
"""
A request handler for `DELETE /mlflow/traces/{trace_id}/tags` to delete tags
from a TraceInfo record.
Identical to `_delete_trace_tag`, but with request_id renamed to with trace_id.
"""
request_message = _get_request_message(
DeleteTraceTagV3(),
schema={
"key": [_assert_string, _assert_required],
},
)
_get_tracking_store().delete_trace_tag(trace_id, request_message.key)
return _wrap_response(DeleteTraceTagV3.Response())
@catch_mlflow_exception
@_disable_if_artifacts_only
def _link_traces_to_run():
"""
A request handler for `POST /mlflow/traces/link-to-run` to link traces to a run.
"""
request_message = _get_request_message(
LinkTracesToRun(),
schema={
"trace_ids": [_assert_array, _assert_required, _assert_item_type_string],
"run_id": [_assert_string, _assert_required],
},
)
_get_tracking_store().link_traces_to_run(
trace_ids=request_message.trace_ids,
run_id=request_message.run_id,
)
return _wrap_response(LinkTracesToRun.Response())
def _fetch_trace_data_from_store(
store: AbstractTrackingStore, request_id: str
) -> dict[str, Any] | None:
try:
# allow partial so the frontend can render in-progress traces
trace = store.get_trace(request_id, allow_partial=True)
return trace.data.to_dict()
except MlflowTracingException:
return None
except MlflowNotImplementedException:
# fallback to batch_get_traces if get_trace is not implemented
pass
try:
traces = store.batch_get_traces([request_id], None)
match traces:
case [trace]:
return trace.data.to_dict()
case _:
raise MlflowException(
f"Trace with id={request_id} not found.",
error_code=RESOURCE_DOES_NOT_EXIST,
)
# For stores that don't support batch get traces, or if trace data is not in the store,
# return None to signal fallback to artifact repository
except (MlflowTracingException, MlflowNotImplementedException):
return None
@catch_mlflow_exception
@_disable_if_artifacts_only
def get_trace_artifact_handler() -> Response:
request_id = request.args.get("request_id")
if not request_id:
raise MlflowException(
'Request must include the "request_id" query parameter.',
error_code=BAD_REQUEST,
)
store = _get_tracking_store()
trace_data = _fetch_trace_data_from_store(store, request_id)
if trace_data is None:
trace_info = store.get_trace_info(request_id)
trace_data = _get_trace_artifact_repo(trace_info).download_trace_data()
# Write data to a BytesIO buffer instead of needing to save a temp file
buf = io.BytesIO()
buf.write(json.dumps(trace_data).encode())
buf.seek(0)
file_sender_response = send_file(
buf,
mimetype="application/octet-stream",
as_attachment=True,
download_name=TRACE_DATA_FILE_NAME,
)
return _response_with_file_attachment_headers(TRACE_DATA_FILE_NAME, file_sender_response)
# Assessments API handlers
@catch_mlflow_exception
@_disable_if_artifacts_only
def _create_assessment(trace_id):
"""
A request handler for `POST /mlflow/traces/{assessment.trace_id}/assessments`
to create a new assessment.
"""
request_message = _get_request_message(
CreateAssessment(),
schema={
"assessment": [_assert_required],
},
)
assessment = Assessment.from_proto(request_message.assessment)
assessment.trace_id = trace_id
created_assessment = _get_tracking_store().create_assessment(assessment)
response_message = CreateAssessment.Response(assessment=created_assessment.to_proto())
return _wrap_response(response_message)
@catch_mlflow_exception
@_disable_if_artifacts_only
def _get_assessment(trace_id, assessment_id):
"""
A request handler for `GET /mlflow/traces/{trace_id}/assessments/{assessment_id}`
to get an assessment.
"""
assessment = _get_tracking_store().get_assessment(trace_id, assessment_id)
response_message = GetAssessmentRequest.Response(assessment=assessment.to_proto())
return _wrap_response(response_message)
@catch_mlflow_exception
@_disable_if_artifacts_only
def _update_assessment(trace_id, assessment_id):
"""
A request handler for `PATCH /mlflow/traces/{trace_id}/assessments/{assessment_id}`
to update an assessment.
"""
request_message = _get_request_message(
UpdateAssessment(),
schema={
"assessment": [_assert_required],
"update_mask": [_assert_required],
},
)
assessment_proto = request_message.assessment
update_mask = request_message.update_mask
kwargs = {}
for path in update_mask.paths:
if path == "assessment_name":
kwargs["name"] = assessment_proto.assessment_name
elif path == "expectation":
kwargs["expectation"] = Expectation.from_proto(assessment_proto)
elif path == "feedback":
kwargs["feedback"] = Feedback.from_proto(assessment_proto)
elif path == "rationale":
kwargs["rationale"] = assessment_proto.rationale
elif path == "metadata":
kwargs["metadata"] = dict(assessment_proto.metadata)
elif path == "valid":
kwargs["valid"] = assessment_proto.valid
updated_assessment = _get_tracking_store().update_assessment(
trace_id=trace_id, assessment_id=assessment_id, **kwargs
)
response_message = UpdateAssessment.Response(assessment=updated_assessment.to_proto())
return _wrap_response(response_message)
@catch_mlflow_exception
@_disable_if_artifacts_only
def _delete_assessment(trace_id, assessment_id):
"""
A request handler for `DELETE /mlflow/traces/{trace_id}/assessments/{assessment_id}`
to delete an assessment.
"""
_get_tracking_store().delete_assessment(trace_id, assessment_id)
response_message = DeleteAssessment.Response()
return _wrap_response(response_message)
# Deprecated MLflow Tracing APIs. Kept for backward compatibility but do not use.
@catch_mlflow_exception
@_disable_if_artifacts_only
def _deprecated_start_trace_v2():
"""
A request handler for `POST /mlflow/traces` to create a new TraceInfo record in tracking store.
"""
request_message = _get_request_message(
StartTrace(),
schema={
"experiment_id": [_assert_string],
"timestamp_ms": [_assert_intlike],
"request_metadata": [_assert_map_key_present],
"tags": [_assert_map_key_present],
},
)
request_metadata = {e.key: e.value for e in request_message.request_metadata}
tags = {e.key: e.value for e in request_message.tags}
trace_info = _get_tracking_store().deprecated_start_trace_v2(
experiment_id=request_message.experiment_id,
timestamp_ms=request_message.timestamp_ms,
request_metadata=request_metadata,
tags=tags,
)
response_message = StartTrace.Response(trace_info=trace_info.to_proto())
return _wrap_response(response_message)
@catch_mlflow_exception
@_disable_if_artifacts_only
def _deprecated_end_trace_v2(request_id):
"""
A request handler for `PATCH /mlflow/traces/{request_id}` to mark an existing TraceInfo
record completed in tracking store.
"""
request_message = _get_request_message(
EndTrace(),
schema={
"timestamp_ms": [_assert_intlike],
"status": [_assert_string],
"request_metadata": [_assert_map_key_present],
"tags": [_assert_map_key_present],
},
)
request_metadata = {e.key: e.value for e in request_message.request_metadata}
tags = {e.key: e.value for e in request_message.tags}
trace_info = _get_tracking_store().deprecated_end_trace_v2(
request_id=request_id,
timestamp_ms=request_message.timestamp_ms,
status=TraceStatus.from_proto(request_message.status),
request_metadata=request_metadata,
tags=tags,
)
if isinstance(trace_info, TraceInfo):
trace_info = TraceInfoV2.from_v3(trace_info)
response_message = EndTrace.Response(trace_info=trace_info.to_proto())
return _wrap_response(response_message)
@catch_mlflow_exception
@_disable_if_artifacts_only
def _deprecated_get_trace_info_v2(request_id):
"""
A request handler for `GET /mlflow/traces/{request_id}/info` to retrieve
an existing TraceInfo record from tracking store.
"""
trace_info = _get_tracking_store().get_trace_info(request_id)
trace_info = TraceInfoV2.from_v3(trace_info)
response_message = GetTraceInfo.Response(trace_info=trace_info.to_proto())
return _wrap_response(response_message)
@catch_mlflow_exception
@_disable_if_artifacts_only
def _deprecated_search_traces_v2():
"""
A request handler for `GET /mlflow/traces` to search for TraceInfo records in tracking store.
"""
request_message = _get_request_message(
SearchTraces(),
schema={
"experiment_ids": [
_assert_array,
_assert_item_type_string,
_assert_required,
],
"filter": [_assert_string],
"max_results": [
_assert_intlike,
lambda x: _assert_less_than_or_equal(int(x), 500),
],
"order_by": [_assert_array, _assert_item_type_string],
"page_token": [_assert_string],
},
)
traces, token = _get_tracking_store().search_traces(
experiment_ids=request_message.experiment_ids,
filter_string=request_message.filter,
max_results=request_message.max_results,
order_by=request_message.order_by,
page_token=request_message.page_token or None,
)
traces = [TraceInfoV2.from_v3(t) for t in traces]
response_message = SearchTraces.Response()
response_message.traces.extend([e.to_proto() for e in traces])
if token:
response_message.next_page_token = token
return _wrap_response(response_message)
# Logged Models APIs
@catch_mlflow_exception
@_disable_if_artifacts_only
def get_logged_model_artifact_handler(model_id: str):
artifact_file_path = request.args.get("artifact_file_path")
if not artifact_file_path:
raise MlflowException(
'Request must include the "artifact_file_path" query parameter.',
error_code=BAD_REQUEST,
)
validate_path_is_safe(artifact_file_path)
logged_model: LoggedModel = _get_tracking_store().get_logged_model(model_id)
if _is_servable_proxied_run_artifact_root(logged_model.artifact_location):
artifact_repo = _get_artifact_repo_mlflow_artifacts()
artifact_path = _get_proxied_run_artifact_destination_path(
proxied_artifact_root=logged_model.artifact_location,
relative_path=artifact_file_path,
)
else:
artifact_repo = get_artifact_repository(logged_model.artifact_location)
artifact_path = artifact_file_path
return _send_artifact(artifact_repo, artifact_path)
@catch_mlflow_exception
@_disable_if_artifacts_only
def _create_logged_model():
request_message = _get_request_message(
CreateLoggedModel(),
schema={
"experiment_id": [_assert_string, _assert_required],
"name": [_assert_string],
"model_type": [_assert_string],
"source_run_id": [_assert_string],
"params": [_assert_array],
"tags": [_assert_array],
},
)
model = _get_tracking_store().create_logged_model(
experiment_id=request_message.experiment_id,
name=request_message.name or None,
model_type=request_message.model_type,
source_run_id=request_message.source_run_id,
params=(
[LoggedModelParameter.from_proto(param) for param in request_message.params]
if request_message.params
else None
),
tags=(
[LoggedModelTag(key=tag.key, value=tag.value) for tag in request_message.tags]
if request_message.tags
else None
),
)
response_message = CreateLoggedModel.Response(model=model.to_proto())
return _wrap_response(response_message)
@catch_mlflow_exception
@_disable_if_artifacts_only
def _log_logged_model_params(model_id: str):
request_message = _get_request_message(
LogLoggedModelParamsRequest(),
schema={
"model_id": [_assert_string, _assert_required],
"params": [_assert_array],
},
)
params = (
[LoggedModelParameter.from_proto(param) for param in request_message.params]
if request_message.params
else []
)
_get_tracking_store().log_logged_model_params(model_id, params)
return _wrap_response(LogLoggedModelParamsRequest.Response())
@catch_mlflow_exception
@_disable_if_artifacts_only
def _get_logged_model(model_id: str):
allow_deleted = request.args.get("allow_deleted", "false").lower() == "true"
model = _get_tracking_store().get_logged_model(model_id, allow_deleted=allow_deleted)
response_message = GetLoggedModel.Response(model=model.to_proto())
return _wrap_response(response_message)
@catch_mlflow_exception
@_disable_if_artifacts_only
def _finalize_logged_model(model_id: str):
request_message = _get_request_message(
FinalizeLoggedModel(),
schema={
"model_id": [_assert_string, _assert_required],
"status": [_assert_intlike, _assert_required],
},
)
model = _get_tracking_store().finalize_logged_model(
request_message.model_id, LoggedModelStatus.from_int(request_message.status)
)
response_message = FinalizeLoggedModel.Response(model=model.to_proto())
return _wrap_response(response_message)
@catch_mlflow_exception
@_disable_if_artifacts_only
def _delete_logged_model(model_id: str):
_get_tracking_store().delete_logged_model(model_id)
return _wrap_response(DeleteLoggedModel.Response())
@catch_mlflow_exception
@_disable_if_artifacts_only
def _set_logged_model_tags(model_id: str):
request_message = _get_request_message(
SetLoggedModelTags(),
schema={"tags": [_assert_array]},
)
tags = [LoggedModelTag(key=tag.key, value=tag.value) for tag in request_message.tags]
_get_tracking_store().set_logged_model_tags(model_id, tags)
return _wrap_response(SetLoggedModelTags.Response())
@catch_mlflow_exception
@_disable_if_artifacts_only
def _delete_logged_model_tag(model_id: str, tag_key: str):
_get_tracking_store().delete_logged_model_tag(model_id, tag_key)
return _wrap_response(DeleteLoggedModelTag.Response())
@catch_mlflow_exception
@_disable_if_artifacts_only
def _search_logged_models():
request_message = _get_request_message(
SearchLoggedModels(),
schema={
"experiment_ids": [
_assert_array,
_assert_item_type_string,
_assert_required,
],
"filter": [_assert_string],
"datasets": [_assert_array],
"max_results": [_assert_intlike],
"order_by": [_assert_array],
"page_token": [_assert_string],
},
)
models = _get_tracking_store().search_logged_models(
# Convert `RepeatedScalarContainer` objects (experiment_ids and order_by) to `list`
# to avoid serialization issues
experiment_ids=list(request_message.experiment_ids),
filter_string=request_message.filter or None,
datasets=(
[
{
"dataset_name": d.dataset_name,
"dataset_digest": d.dataset_digest or None,
}
for d in request_message.datasets
]
if request_message.datasets
else None
),
max_results=request_message.max_results or None,
order_by=(
[
{
"field_name": ob.field_name,
"ascending": ob.ascending,
"dataset_name": ob.dataset_name or None,
"dataset_digest": ob.dataset_digest or None,
}
for ob in request_message.order_by
]
if request_message.order_by
else None
),
page_token=request_message.page_token or None,
)
response_message = SearchLoggedModels.Response()
response_message.models.extend([e.to_proto() for e in models])
if models.token:
response_message.next_page_token = models.token
return _wrap_response(response_message)
@catch_mlflow_exception
@_disable_if_artifacts_only
def _list_logged_model_artifacts(model_id: str):
request_message = _get_request_message(
ListLoggedModelArtifacts(),
schema={"artifact_directory_path": [_assert_string]},
)
if request_message.HasField("artifact_directory_path"):
artifact_path = validate_path_is_safe(request_message.artifact_directory_path)
else:
artifact_path = None
return _list_logged_model_artifacts_impl(model_id, artifact_path)
def _list_logged_model_artifacts_impl(
model_id: str, artifact_directory_path: str | None
) -> Response:
response = ListLoggedModelArtifacts.Response()
logged_model: LoggedModel = _get_tracking_store().get_logged_model(model_id)
if _is_servable_proxied_run_artifact_root(logged_model.artifact_location):
artifacts = _list_artifacts_for_proxied_run_artifact_root(
proxied_artifact_root=logged_model.artifact_location,
relative_path=artifact_directory_path,
)
else:
artifacts = get_artifact_repository(logged_model.artifact_location).list_artifacts(
artifact_directory_path
)
response.files.extend([a.to_proto() for a in artifacts])
response.root_uri = logged_model.artifact_location
return _wrap_response(response)
# =============================================================================
# Scorer Management Handlers
# =============================================================================
@catch_mlflow_exception
@_disable_if_artifacts_only
def _register_scorer():
request_message = _get_request_message(
RegisterScorer(),
schema={
"experiment_id": [_assert_required, _assert_string],
"name": [_assert_required, _assert_string],
"serialized_scorer": [_assert_required, _assert_string],
},
)
scorer_version = _get_tracking_store().register_scorer(
request_message.experiment_id,
request_message.name,
request_message.serialized_scorer,
)
response_message = RegisterScorer.Response()
response_message.version = scorer_version.scorer_version
response_message.scorer_id = scorer_version.scorer_id
response_message.experiment_id = scorer_version.experiment_id
response_message.name = scorer_version.scorer_name
response_message.serialized_scorer = scorer_version._serialized_scorer
response_message.creation_time = scorer_version.creation_time
response = Response(mimetype="application/json")
response.set_data(message_to_json(response_message))
return response
@catch_mlflow_exception
@_disable_if_artifacts_only
def _list_scorers():
request_message = _get_request_message(
ListScorers(),
schema={"experiment_id": [_assert_required, _assert_string]},
)
response_message = ListScorers.Response()
scorers = _get_tracking_store().list_scorers(request_message.experiment_id)
response_message.scorers.extend([scorer.to_proto() for scorer in scorers])
response = Response(mimetype="application/json")
response.set_data(message_to_json(response_message))
return response
@catch_mlflow_exception
@_disable_if_artifacts_only
def _list_scorer_versions():
request_message = _get_request_message(
ListScorerVersions(),
schema={
"experiment_id": [_assert_required, _assert_string],
"name": [_assert_required, _assert_string],
},
)
response_message = ListScorerVersions.Response()
scorers = _get_tracking_store().list_scorer_versions(
request_message.experiment_id, request_message.name
)
response_message.scorers.extend([scorer.to_proto() for scorer in scorers])
response = Response(mimetype="application/json")
response.set_data(message_to_json(response_message))
return response
@catch_mlflow_exception
@_disable_if_artifacts_only
def _get_scorer():
request_message = _get_request_message(
GetScorer(),
schema={
"experiment_id": [_assert_required, _assert_string],
"name": [_assert_required, _assert_string],
"version": [_assert_intlike],
},
)
response_message = GetScorer.Response()
scorer_version = _get_tracking_store().get_scorer(
request_message.experiment_id,
request_message.name,
request_message.version if request_message.HasField("version") else None,
)
response_message.scorer.CopyFrom(scorer_version.to_proto())
response = Response(mimetype="application/json")
response.set_data(message_to_json(response_message))
return response
@catch_mlflow_exception
@_disable_if_artifacts_only
def _delete_scorer():
request_message = _get_request_message(
DeleteScorer(),
schema={
"experiment_id": [_assert_required, _assert_string],
"name": [_assert_required, _assert_string],
"version": [_assert_intlike],
},
)
_get_tracking_store().delete_scorer(
request_message.experiment_id,
request_message.name,
request_message.version if request_message.HasField("version") else None,
)
response_message = DeleteScorer.Response()
response = Response(mimetype="application/json")
response.set_data(message_to_json(response_message))
return response
def _get_rest_path(base_path, version=2):
return f"/api/{version}.0{base_path}"
def _get_ajax_path(base_path, version=2):
return _add_static_prefix(f"/ajax-api/{version}.0{base_path}")
def _add_static_prefix(route: str) -> str:
if prefix := os.environ.get(STATIC_PREFIX_ENV_VAR):
return prefix.rstrip("/") + route
return route
def _get_paths(base_path, version=2):
"""
A service endpoints base path is typically something like /mlflow/experiment.
We should register paths like /api/2.0/mlflow/experiment and
/ajax-api/2.0/mlflow/experiment in the Flask router.
"""
base_path = _convert_path_parameter_to_flask_format(base_path)
return [_get_rest_path(base_path, version), _get_ajax_path(base_path, version)]
def _convert_path_parameter_to_flask_format(path):
"""
Converts path parameter format to Flask compatible format.
Some protobuf endpoint paths contain parameters like /mlflow/trace/{request_id}.
This can be interpreted correctly by gRPC framework like Armeria, but Flask does
not understand it. Instead, we need to specify it with a different format,
like /mlflow/trace/<request_id>.
"""
# Handle simple parameters like {trace_id}
path = re.sub(r"{(\w+)}", r"<\1>", path)
# Handle Databricks-specific syntax like {assessment.trace_id} -> <trace_id>
# This is needed because Databricks can extract trace_id from request body,
# but Flask needs it in the URL path
return re.sub(r"{assessment\.trace_id}", r"<trace_id>", path)
def get_handler(request_class):
"""
Args:
request_class: The type of protobuf message
"""
return HANDLERS.get(request_class, _not_implemented)
def get_service_endpoints(service, get_handler):
ret = []
for service_method in service.DESCRIPTOR.methods:
endpoints = service_method.GetOptions().Extensions[databricks_pb2.rpc].endpoints
for endpoint in endpoints:
for http_path in _get_paths(endpoint.path, version=endpoint.since.major):
handler = get_handler(service().GetRequestClass(service_method))
ret.append((http_path, handler, [endpoint.method]))
return ret
def get_endpoints(get_handler=get_handler):
"""
Returns:
List of tuples (path, handler, methods)
"""
return (
get_service_endpoints(MlflowService, get_handler)
+ get_service_endpoints(ModelRegistryService, get_handler)
+ get_service_endpoints(MlflowArtifactsService, get_handler)
+ get_service_endpoints(WebhookService, get_handler)
+ [(_add_static_prefix("/graphql"), _graphql, ["GET", "POST"])]
)
# Evaluation Dataset APIs
@catch_mlflow_exception
@_disable_if_artifacts_only
def _create_dataset_handler():
request_message = _get_request_message(
CreateDataset(),
schema={
"name": [_assert_required, _assert_string],
"experiment_ids": [_assert_array],
"tags": [_assert_string],
},
)
tags = None
if hasattr(request_message, "tags") and request_message.tags:
tags = json.loads(request_message.tags)
dataset = _get_tracking_store().create_dataset(
name=request_message.name,
experiment_ids=list(request_message.experiment_ids)
if request_message.experiment_ids
else None,
tags=tags,
)
response_message = CreateDataset.Response()
response_message.dataset.CopyFrom(dataset.to_proto())
return _wrap_response(response_message)
@catch_mlflow_exception
@_disable_if_artifacts_only
def _get_dataset_handler(dataset_id):
dataset = _get_tracking_store().get_dataset(dataset_id)
response_message = GetDataset.Response()
response_message.dataset.CopyFrom(dataset.to_proto())
return _wrap_response(response_message)
@catch_mlflow_exception
@_disable_if_artifacts_only
def _delete_dataset_handler(dataset_id):
_get_tracking_store().delete_dataset(dataset_id)
response_message = DeleteDataset.Response()
return _wrap_response(response_message)
@catch_mlflow_exception
@_disable_if_artifacts_only
def _search_evaluation_datasets_handler():
request_message = _get_request_message(
SearchEvaluationDatasets(),
schema={
"experiment_ids": [_assert_array],
"filter_string": [_assert_string],
"max_results": [_assert_intlike],
"order_by": [_assert_array],
"page_token": [_assert_string],
},
)
datasets = _get_tracking_store().search_datasets(
experiment_ids=list(request_message.experiment_ids)
if request_message.experiment_ids
else None,
filter_string=request_message.filter_string or None,
max_results=request_message.max_results or None,
order_by=list(request_message.order_by) if request_message.order_by else None,
page_token=request_message.page_token or None,
)
response_message = SearchEvaluationDatasets.Response()
response_message.datasets.extend([d.to_proto() for d in datasets])
if datasets.token:
response_message.next_page_token = datasets.token
return _wrap_response(response_message)
@catch_mlflow_exception
@_disable_if_artifacts_only
def _set_dataset_tags_handler(dataset_id):
request_message = _get_request_message(
SetDatasetTags(),
schema={
"tags": [_assert_required, _assert_string],
},
)
tags = json.loads(request_message.tags)
_get_tracking_store().set_dataset_tags(
dataset_id=dataset_id,
tags=tags,
)
response_message = SetDatasetTags.Response()
return _wrap_response(response_message)
@catch_mlflow_exception
@_disable_if_artifacts_only
def _delete_dataset_tag_handler(dataset_id, key):
_get_tracking_store().delete_dataset_tag(
dataset_id=dataset_id,
key=key,
)
response_message = DeleteDatasetTag.Response()
return _wrap_response(response_message)
@catch_mlflow_exception
@_disable_if_artifacts_only
def _upsert_dataset_records_handler(dataset_id):
request_message = _get_request_message(
UpsertDatasetRecords(),
schema={
"records": [_assert_required, _assert_string],
},
)
records = json.loads(request_message.records)
result = _get_tracking_store().upsert_dataset_records(
dataset_id=dataset_id,
records=records,
)
response_message = UpsertDatasetRecords.Response()
response_message.inserted_count = result["inserted"]
response_message.updated_count = result["updated"]
return _wrap_response(response_message)
def _get_dataset_experiment_ids_handler(dataset_id):
"""
Get experiment IDs associated with an evaluation dataset.
"""
experiment_ids = _get_tracking_store().get_dataset_experiment_ids(dataset_id=dataset_id)
response_message = GetDatasetExperimentIds.Response()
response_message.experiment_ids.extend(experiment_ids)
return _wrap_response(response_message)
@catch_mlflow_exception
@_disable_if_artifacts_only
def _add_dataset_to_experiments_handler(dataset_id):
request_message = _get_request_message(
AddDatasetToExperiments(),
schema={
"experiment_ids": [_assert_array],
},
)
dataset = _get_tracking_store().add_dataset_to_experiments(
dataset_id=dataset_id,
experiment_ids=request_message.experiment_ids,
)
response_message = AddDatasetToExperiments.Response()
response_message.dataset.CopyFrom(dataset.to_proto())
return _wrap_response(response_message)
@catch_mlflow_exception
@_disable_if_artifacts_only
def _remove_dataset_from_experiments_handler(dataset_id):
request_message = _get_request_message(
RemoveDatasetFromExperiments(),
schema={
"experiment_ids": [_assert_array],
},
)
dataset = _get_tracking_store().remove_dataset_from_experiments(
dataset_id=dataset_id,
experiment_ids=request_message.experiment_ids,
)
response_message = RemoveDatasetFromExperiments.Response()
response_message.dataset.CopyFrom(dataset.to_proto())
return _wrap_response(response_message)
def _get_dataset_records_handler(dataset_id):
request_message = _get_request_message(
GetDatasetRecords(),
schema={
"max_results": [_assert_intlike],
"page_token": [_assert_string],
},
)
max_results = request_message.max_results or 1000
page_token = request_message.page_token or None
# Use the pagination-aware method
records, next_page_token = _get_tracking_store()._load_dataset_records(
dataset_id, max_results=max_results, page_token=page_token
)
response_message = GetDatasetRecords.Response()
records_dicts = [record.to_dict() for record in records]
response_message.records = json.dumps(records_dicts)
if next_page_token:
response_message.next_page_token = next_page_token
return _wrap_response(response_message)
HANDLERS = {
# Tracking Server APIs
CreateExperiment: _create_experiment,
GetExperiment: _get_experiment,
GetExperimentByName: _get_experiment_by_name,
DeleteExperiment: _delete_experiment,
RestoreExperiment: _restore_experiment,
UpdateExperiment: _update_experiment,
CreateRun: _create_run,
UpdateRun: _update_run,
DeleteRun: _delete_run,
RestoreRun: _restore_run,
LogParam: _log_param,
LogMetric: _log_metric,
SetExperimentTag: _set_experiment_tag,
DeleteExperimentTag: _delete_experiment_tag,
SetTag: _set_tag,
DeleteTag: _delete_tag,
LogBatch: _log_batch,
LogModel: _log_model,
GetRun: _get_run,
SearchRuns: _search_runs,
ListArtifacts: _list_artifacts,
GetMetricHistory: _get_metric_history,
GetMetricHistoryBulkInterval: get_metric_history_bulk_interval_handler,
SearchExperiments: _search_experiments,
LogInputs: _log_inputs,
LogOutputs: _log_outputs,
# Evaluation Dataset APIs
CreateDataset: _create_dataset_handler,
GetDataset: _get_dataset_handler,
DeleteDataset: _delete_dataset_handler,
SearchEvaluationDatasets: _search_evaluation_datasets_handler,
SetDatasetTags: _set_dataset_tags_handler,
DeleteDatasetTag: _delete_dataset_tag_handler,
UpsertDatasetRecords: _upsert_dataset_records_handler,
GetDatasetExperimentIds: _get_dataset_experiment_ids_handler,
GetDatasetRecords: _get_dataset_records_handler,
AddDatasetToExperiments: _add_dataset_to_experiments_handler,
RemoveDatasetFromExperiments: _remove_dataset_from_experiments_handler,
# Model Registry APIs
CreateRegisteredModel: _create_registered_model,
GetRegisteredModel: _get_registered_model,
DeleteRegisteredModel: _delete_registered_model,
UpdateRegisteredModel: _update_registered_model,
RenameRegisteredModel: _rename_registered_model,
SearchRegisteredModels: _search_registered_models,
GetLatestVersions: _get_latest_versions,
CreateModelVersion: _create_model_version,
GetModelVersion: _get_model_version,
DeleteModelVersion: _delete_model_version,
UpdateModelVersion: _update_model_version,
TransitionModelVersionStage: _transition_stage,
GetModelVersionDownloadUri: _get_model_version_download_uri,
SearchModelVersions: _search_model_versions,
SetRegisteredModelTag: _set_registered_model_tag,
DeleteRegisteredModelTag: _delete_registered_model_tag,
SetModelVersionTag: _set_model_version_tag,
DeleteModelVersionTag: _delete_model_version_tag,
SetRegisteredModelAlias: _set_registered_model_alias,
DeleteRegisteredModelAlias: _delete_registered_model_alias,
GetModelVersionByAlias: _get_model_version_by_alias,
# Webhook APIs
CreateWebhook: _create_webhook,
ListWebhooks: _list_webhooks,
GetWebhook: _get_webhook,
UpdateWebhook: _update_webhook,
DeleteWebhook: _delete_webhook,
TestWebhook: _test_webhook,
# MLflow Artifacts APIs
DownloadArtifact: _download_artifact,
UploadArtifact: _upload_artifact,
ListArtifactsMlflowArtifacts: _list_artifacts_mlflow_artifacts,
DeleteArtifact: _delete_artifact_mlflow_artifacts,
CreateMultipartUpload: _create_multipart_upload_artifact,
CompleteMultipartUpload: _complete_multipart_upload_artifact,
AbortMultipartUpload: _abort_multipart_upload_artifact,
# MLflow Tracing APIs (V3)
StartTraceV3: _start_trace_v3,
GetTraceInfoV3: _get_trace_info_v3,
SearchTracesV3: _search_traces_v3,
DeleteTracesV3: _delete_traces,
CalculateTraceFilterCorrelation: _calculate_trace_filter_correlation,
SetTraceTagV3: _set_trace_tag_v3,
DeleteTraceTagV3: _delete_trace_tag_v3,
LinkTracesToRun: _link_traces_to_run,
BatchGetTraces: _batch_get_traces,
GetTrace: _get_trace,
# Assessment APIs
CreateAssessment: _create_assessment,
GetAssessmentRequest: _get_assessment,
UpdateAssessment: _update_assessment,
DeleteAssessment: _delete_assessment,
# Legacy MLflow Tracing V2 APIs. Kept for backward compatibility but do not use.
StartTrace: _deprecated_start_trace_v2,
EndTrace: _deprecated_end_trace_v2,
GetTraceInfo: _deprecated_get_trace_info_v2,
SearchTraces: _deprecated_search_traces_v2,
DeleteTraces: _delete_traces,
SetTraceTag: _set_trace_tag,
DeleteTraceTag: _delete_trace_tag,
# Logged Models APIs
CreateLoggedModel: _create_logged_model,
GetLoggedModel: _get_logged_model,
FinalizeLoggedModel: _finalize_logged_model,
DeleteLoggedModel: _delete_logged_model,
SetLoggedModelTags: _set_logged_model_tags,
DeleteLoggedModelTag: _delete_logged_model_tag,
SearchLoggedModels: _search_logged_models,
ListLoggedModelArtifacts: _list_logged_model_artifacts,
LogLoggedModelParamsRequest: _log_logged_model_params,
# Scorer APIs
RegisterScorer: _register_scorer,
ListScorers: _list_scorers,
ListScorerVersions: _list_scorer_versions,
GetScorer: _get_scorer,
DeleteScorer: _delete_scorer,
}
|
ModelRegistryStoreRegistryWrapper
|
python
|
pytorch__pytorch
|
test/torch_np/numpy_tests/lib/test_shape_base_.py
|
{
"start": 27321,
"end": 28189
}
|
class ____(TestCase):
def test_basic(self):
d = np.ones((50, 60))
d2 = np.ones((30, 60, 6))
assert_(np.may_share_memory(d, d))
assert_(np.may_share_memory(d, d[::-1]))
assert_(np.may_share_memory(d, d[::2]))
assert_(np.may_share_memory(d, d[1:, ::-1]))
assert_(not np.may_share_memory(d[::-1], d2))
assert_(not np.may_share_memory(d[::2], d2))
assert_(not np.may_share_memory(d[1:, ::-1], d2))
assert_(np.may_share_memory(d2[1:, ::-1], d2))
# Utility
def compare_results(res, desired):
"""Compare lists of arrays."""
if len(res) != len(desired):
raise ValueError("Iterables have different lengths")
# See also PEP 618 for Python 3.10
for x, y in zip(res, desired):
assert_array_equal(x, y)
if __name__ == "__main__":
run_tests()
|
TestMayShareMemory
|
python
|
doocs__leetcode
|
lcof/面试题07. 重建二叉树/Solution.py
|
{
"start": 164,
"end": 642
}
|
class ____:
def buildTree(self, preorder: List[int], inorder: List[int]) -> TreeNode:
def dfs(i, j, n):
if n < 1:
return None
root = TreeNode(preorder[i])
k = d[preorder[i]]
l = k - j
root.left = dfs(i + 1, j, l)
root.right = dfs(i + 1 + l, k + 1, n - l - 1)
return root
d = {v: i for i, v in enumerate(inorder)}
return dfs(0, 0, len(preorder))
|
Solution
|
python
|
joke2k__faker
|
faker/providers/lorem/fil_PH/__init__.py
|
{
"start": 68,
"end": 11351
}
|
class ____(LoremProvider):
"""Implement lorem provider for ``fil_PH`` locale.
Word list is based on the source(s) below with some filtering,
de-conjugating, and additional common words.
Sources:
- https://1000mostcommonwords.com/1000-most-common-filipino-words/
"""
word_list = (
"abo",
"abot",
"aga",
"agham",
"akin",
"aklat",
"ako",
"akyat",
"alam",
"alang-alang",
"alikabok",
"alin",
"alok",
"alon",
"ama",
"amin",
"amoy",
"anak",
"ang",
"angkop",
"anim",
"ano",
"antas",
"apat",
"aral",
"araw",
"ari-arian",
"asa",
"asawa",
"asin",
"aso",
"asukal",
"asul",
"at",
"atin",
"away",
"ay",
"ayos",
"baba",
"babae",
"babaw",
"bagal",
"bagaman",
"bagay",
"bago",
"bagyo",
"bahagi",
"bahay",
"baka",
"bakal",
"bakasyon",
"bakit",
"bakuran",
"balat",
"balde",
"balikat",
"banat",
"banda",
"bangka",
"bangko",
"bansa",
"barko",
"basa",
"basehan",
"baso",
"bata",
"batas",
"batay",
"bato",
"bawas",
"bawat",
"bayad",
"bayan",
"baybayin",
"benta",
"bente",
"berde",
"bibig",
"bigas",
"bigat",
"bigay",
"bigkas",
"bihira",
"bilang",
"bili",
"bilis",
"binata",
"binibini",
"binti",
"biyahe",
"biyaya",
"boses",
"braso",
"buhangin",
"buhay",
"buhok",
"bukas",
"bulaklak",
"bundok",
"buntot",
"buo",
"burol",
"butas",
"buti",
"buto",
"buwan",
"daan",
"dagat",
"dagdag",
"dahil",
"dahilan",
"dala",
"dalaga",
"dalas",
"dalawampu",
"daliri",
"daloy",
"damit",
"damo",
"dapat",
"dati",
"dating",
"digmaan",
"dilaw",
"disenyo",
"dito",
"doon",
"dugo",
"dyip",
"edad",
"eksakto",
"eksperimento",
"elemento",
"enerhiya",
"epekto",
"eroplano",
"espesyal",
"estado",
"gabi",
"gala",
"galaw",
"galit",
"gamit",
"gamot",
"gana",
"ganap",
"ganda",
"gas",
"gastos",
"gatas",
"gawa",
"gawin",
"gilid",
"giliw",
"ginang",
"ginoo",
"ginto",
"gising",
"gitna",
"gubat",
"guhit",
"gulo",
"gulong",
"gusto",
"haba",
"habang",
"hakbang",
"halaga",
"halalan",
"halaman",
"haligi",
"halimbawa",
"hambing",
"hanap",
"hanapbuhay",
"hanay",
"handa",
"hanggan",
"hanggang",
"hangin",
"hardin",
"hati",
"hatid",
"hatol",
"hayop",
"higit",
"hila",
"hilaga",
"hilera",
"himpapawid",
"hindi",
"hintay",
"hirap",
"hiwa",
"hiwalay",
"hugis",
"hula",
"huli",
"hulog",
"humantong",
"husay",
"iba",
"ibabaw",
"ibig",
"ibon",
"ilalim",
"ilan",
"ilang",
"ilog",
"ilong",
"industriya",
"ingay",
"inggit",
"init",
"inom",
"insekto",
"instrumento",
"inumin",
"ipon",
"isa",
"isda",
"isip",
"iskor",
"isla",
"itim",
"itlog",
"ito",
"iwan",
"iyon",
"kaaway",
"kababaihan",
"kabayo",
"kabuuan",
"kaganapan",
"kahit",
"kahon",
"kaibigan",
"kailangan",
"kailanman",
"kain",
"kaisa-isa",
"kakaiba",
"kalabit",
"kalagayan",
"kalahati",
"kalakal",
"kalakalan",
"kalsada",
"kalye",
"kama",
"kamay",
"kampanilya",
"kampo",
"kanin",
"kanluran",
"kanta",
"kanya",
"kapag",
"kapal",
"kapangyarihan",
"kapantay",
"kapatid",
"kapit-bahay",
"kapital",
"kapitan",
"kapwa",
"karagatan",
"karamihan",
"karanasan",
"karaniwan",
"karapatan",
"karne",
"kasalukuyan",
"kasama",
"kasanayan",
"kasangkapan",
"kasaysayan",
"kaso",
"katangian",
"katarungan",
"katawan",
"katinig",
"katulad",
"katunayan",
"kawal",
"kaya",
"kaysa",
"kayumanggi",
"kilos",
"kinang",
"kinig",
"klase",
"ko",
"kompanya",
"koponan",
"kopya",
"kotse",
"kuha",
"kulay",
"kumpleto",
"kung",
"kuskos",
"kuwento",
"laban",
"lagay",
"lagda",
"lago",
"lahat",
"lahi",
"lakad",
"lakas",
"laki",
"lalim",
"lalo",
"laman",
"lamang",
"lambak",
"lambot",
"lamig",
"landas",
"langis",
"langit",
"langoy",
"lapit",
"larawan",
"laro",
"lason",
"lawa",
"lawak",
"layag",
"layo",
"leeg",
"libo",
"libre",
"ligaw",
"ligtas",
"liit",
"likas",
"likha",
"likido",
"likod",
"lima",
"linaw",
"linggo",
"linis",
"linya",
"lipad",
"listahan",
"litaw",
"liwanag",
"lubid",
"lugar",
"luma",
"lungsod",
"lupa",
"lupon",
"lutas",
"luwag",
"maaari",
"maaga",
"madali",
"maging",
"maginoo",
"magkano",
"magulang",
"mahal",
"mahalaga",
"mahirap",
"maikli",
"mainam",
"mainit",
"mais",
"makina",
"mali",
"maliban",
"manatili",
"manggagawa",
"mangyari",
"mangyaring",
"manipis",
"maniwala",
"mansanas",
"mapa",
"marahil",
"marami",
"mas",
"masa",
"masyado",
"mata",
"may",
"mayroon",
"medyo",
"merkado",
"mga",
"milyon",
"minahan",
"minuto",
"mukha",
"mula",
"muli",
"mundo",
"musika",
"na",
"naging",
"nais",
"nakita",
"namin",
"nanay",
"nawala",
"nayon",
"ng",
"ngayon",
"ngipin",
"ngiti",
"ngunit",
"noon",
"numero",
"oo",
"opisina",
"opo",
"oras",
"orihinal",
"pa",
"paa",
"paaralan",
"pabor",
"pabuya",
"pader",
"pagitan",
"pakiramdam",
"paksa",
"palagi",
"palapag",
"pamamagitan",
"pamilya",
"panahon",
"panalo",
"pandiwa",
"pangalan",
"panganib",
"pangarap",
"pangkat",
"pangmaramihang",
"pangngalan",
"pangunahin",
"pantig",
"panuntunan",
"papel",
"para",
"paraan",
"pareho",
"pares",
"parirala",
"parisukat",
"partido",
"pasa",
"pasiya",
"pasok",
"patakaran",
"patlang",
"patnubay",
"pato",
"payag",
"pera",
"pigil",
"pilak",
"pili",
"pindot",
"pinto",
"piraso",
"pito",
"plano",
"port",
"posible",
"posisyon",
"problema",
"produkto",
"proseso",
"prutas",
"pula",
"puno",
"punta",
"punto",
"pusa",
"puso",
"puti",
"puwang",
"puwersa",
"radyo",
"rehiyon",
"resulta",
"sa",
"saan",
"sabay",
"sabi",
"sagot",
"sakahan",
"salamat",
"salamin",
"sali",
"salita",
"sama",
"sampu",
"sandali",
"sang-ayon",
"sangay",
"sanggol",
"sapat",
"sapatos",
"sarili",
"sariwa",
"saya",
"sayaw",
"sigaw",
"siglo",
"sigurado",
"sikat",
"sila",
"silangan",
"silya",
"simbolo",
"simula",
"singil",
"singsing",
"sining",
"sira",
"sistema",
"siya",
"siyam",
"siyempre",
"solusyon",
"subok",
"sukat",
"sulat",
"sulok",
"sulong",
"sumbrero",
"sundin",
"sundo",
"sunod",
"sunog",
"suot",
"suporta",
"suri",
"taas",
"taba",
"tagal",
"tagumpay",
"tahanan",
"tahimik",
"tainga",
"takbo",
"takot",
"tala",
"talakay",
"talim",
"talo",
"talon",
"tama",
"tandaan",
"tanggap",
"tanghali",
"tangi",
"tangkad",
"tanong",
"tao",
"taon",
"tapang",
"tapat",
"tapon",
"tapos",
"tatlon",
"tatsulok",
"tawag",
"tawid",
"tayo",
"temperatura",
"timbang",
"timog",
"tinapay",
"tinda",
"tindahan",
"tingin",
"tipon",
"tiyak",
"tono",
"totoo",
"trabaho",
"trak",
"tren",
"tubig",
"tugon",
"tukoy",
"tuktok",
"tula",
"tulad",
"tulog",
"tulong",
"tuloy",
"tumba",
"tunay",
"tungkol",
"tungo",
"tunog",
"turo",
"tuwa",
"tuwid",
"ugat",
"ulan",
"ulo",
"una",
"upo",
"upuan",
"uri",
"wala",
"walo",
"wika",
"yaman",
"yelo",
)
parts_of_speech: Dict[str, tuple] = {}
|
Provider
|
python
|
ansible__ansible
|
lib/ansible/vars/hostvars.py
|
{
"start": 1147,
"end": 3090
}
|
class ____(c.Mapping):
"""A read-only wrapper to enable on-demand templating of a specific host's variables under that host's variable context."""
def __init__(self, inventory: InventoryManager, variable_manager: VariableManager, loader: DataLoader) -> None:
self._inventory = inventory
self._loader = loader
self._variable_manager = variable_manager
variable_manager._hostvars = self
def raw_get(self, host_name: str) -> dict[str, t.Any] | Marker:
"""
Similar to __getitem__, however the returned data is not run through
the templating engine to expand variables in the hostvars.
"""
# does not use inventory.hosts, so it can create localhost on demand
host = self._inventory.get_host(host_name)
if host is None:
from ansible._internal._templating import _jinja_bits
return _jinja_bits._undef(f"hostvars[{host_name!r}]")
return self._variable_manager.get_vars(host=host, include_hostvars=False)
def __getitem__(self, key: str) -> HostVarsVars | Marker:
data = self.raw_get(key)
from ansible._internal._templating import _jinja_bits
if isinstance(data, _jinja_bits.Marker):
return data
return HostVarsVars(data, loader=self._loader, host=key)
def __contains__(self, item: object) -> bool:
# does not use inventory.hosts, so it can create localhost on demand
return self._inventory.get_host(item) is not None
def __iter__(self) -> t.Iterator[str]:
yield from self._inventory.hosts
def __len__(self) -> int:
return len(self._inventory.hosts)
def __deepcopy__(self, memo: t.Any) -> HostVars:
# this object may be stored in a var dict that is itself deep copied, but since the underlying data
# is supposed to be immutable, we don't need to actually copy the data
return self
|
HostVars
|
python
|
walkccc__LeetCode
|
solutions/3414. Maximum Score of Non-overlapping Intervals/3414.py
|
{
"start": 60,
"end": 177
}
|
class ____:
weight: int
selected: tuple[int]
def __iter__(self):
yield self.weight
yield self.selected
|
T
|
python
|
encode__django-rest-framework
|
tests/test_serializer_bulk_update.py
|
{
"start": 140,
"end": 3802
}
|
class ____(TestCase):
"""
Creating multiple instances using serializers.
"""
def setUp(self):
class BookSerializer(serializers.Serializer):
id = serializers.IntegerField()
title = serializers.CharField(max_length=100)
author = serializers.CharField(max_length=100)
self.BookSerializer = BookSerializer
def test_bulk_create_success(self):
"""
Correct bulk update serialization should return the input data.
"""
data = [
{
'id': 0,
'title': 'The electric kool-aid acid test',
'author': 'Tom Wolfe'
}, {
'id': 1,
'title': 'If this is a man',
'author': 'Primo Levi'
}, {
'id': 2,
'title': 'The wind-up bird chronicle',
'author': 'Haruki Murakami'
}
]
serializer = self.BookSerializer(data=data, many=True)
assert serializer.is_valid() is True
assert serializer.validated_data == data
assert serializer.errors == []
def test_bulk_create_errors(self):
"""
Incorrect bulk create serialization should return errors.
"""
data = [
{
'id': 0,
'title': 'The electric kool-aid acid test',
'author': 'Tom Wolfe'
}, {
'id': 1,
'title': 'If this is a man',
'author': 'Primo Levi'
}, {
'id': 'foo',
'title': 'The wind-up bird chronicle',
'author': 'Haruki Murakami'
}
]
expected_errors = [
{},
{},
{'id': ['A valid integer is required.']}
]
serializer = self.BookSerializer(data=data, many=True)
assert serializer.is_valid() is False
assert serializer.errors == expected_errors
assert serializer.validated_data == []
def test_invalid_list_datatype(self):
"""
Data containing list of incorrect data type should return errors.
"""
data = ['foo', 'bar', 'baz']
serializer = self.BookSerializer(data=data, many=True)
assert serializer.is_valid() is False
message = 'Invalid data. Expected a dictionary, but got str.'
expected_errors = [
{'non_field_errors': [message]},
{'non_field_errors': [message]},
{'non_field_errors': [message]}
]
assert serializer.errors == expected_errors
def test_invalid_single_datatype(self):
"""
Data containing a single incorrect data type should return errors.
"""
data = 123
serializer = self.BookSerializer(data=data, many=True)
assert serializer.is_valid() is False
expected_errors = {'non_field_errors': ['Expected a list of items but got type "int".']}
assert serializer.errors == expected_errors
def test_invalid_single_object(self):
"""
Data containing only a single object, instead of a list of objects
should return errors.
"""
data = {
'id': 0,
'title': 'The electric kool-aid acid test',
'author': 'Tom Wolfe'
}
serializer = self.BookSerializer(data=data, many=True)
assert serializer.is_valid() is False
expected_errors = {'non_field_errors': ['Expected a list of items but got type "dict".']}
assert serializer.errors == expected_errors
|
BulkCreateSerializerTests
|
python
|
google__pytype
|
pytype/abstract/abstract_test.py
|
{
"start": 42317,
"end": 48689
}
|
class ____(AbstractTestBase):
def test_interpreter_class_official_name(self):
cls = abstract.InterpreterClass("X", [], {}, None, None, (), self._ctx)
cls.update_official_name("Z")
self.assertEqual(cls.official_name, "Z")
cls.update_official_name("A") # takes effect because A < Z
self.assertEqual(cls.official_name, "A")
cls.update_official_name("Z") # no effect
self.assertEqual(cls.official_name, "A")
cls.update_official_name("X") # takes effect because X == cls.name
self.assertEqual(cls.official_name, "X")
cls.update_official_name("A") # no effect
self.assertEqual(cls.official_name, "X")
def test_type_parameter_official_name(self):
param = abstract.TypeParameter("T", self._ctx)
param.update_official_name("T")
self.assertFalse(self._ctx.errorlog.has_error())
param.update_official_name("Q")
self.assertTrue(self._ctx.errorlog.has_error())
def test_type_parameter_equality(self):
param1 = abstract.TypeParameter("S", self._ctx)
param2 = abstract.TypeParameter("T", self._ctx)
cls = abstract.InterpreterClass("S", [], {}, None, None, (), self._ctx)
self.assertEqual(param1, param1)
self.assertNotEqual(param1, param2)
self.assertNotEqual(param1, cls)
def test_union_equality(self):
union1 = abstract.Union((self._ctx.convert.unsolvable,), self._ctx)
union2 = abstract.Union((self._ctx.convert.none,), self._ctx)
cls = abstract.InterpreterClass("Union", [], {}, None, None, (), self._ctx)
self.assertEqual(union1, union1)
self.assertNotEqual(union1, union2)
self.assertNotEqual(union1, cls)
def test_instantiate_type_parameter_type(self):
params = {
abstract_utils.T: abstract.TypeParameter(abstract_utils.T, self._ctx)
}
cls = abstract.ParameterizedClass(
self._ctx.convert.type_type, params, self._ctx
)
self.assertListEqual(
cls.instantiate(self._node).data, [self._ctx.convert.unsolvable]
)
def test_super_type(self):
supercls = special_builtins.Super.make(self._ctx)
self.assertEqual(supercls.cls, self._ctx.convert.type_type)
def test_instantiate_interpreter_class(self):
cls = abstract.InterpreterClass("X", [], {}, None, None, (), self._ctx)
# When there is no current frame, create a new instance every time.
v1 = abstract_utils.get_atomic_value(cls.instantiate(self._node))
v2 = abstract_utils.get_atomic_value(cls.instantiate(self._node))
self.assertIsNot(v1, v2)
# Create one instance per opcode.
(frame,) = self._ctx.vm.simple_stack(opcode=object())
self._ctx.vm.push_frame(frame)
v3 = abstract_utils.get_atomic_value(cls.instantiate(self._node))
v4 = abstract_utils.get_atomic_value(cls.instantiate(self._node))
self.assertIsNot(v1, v3)
self.assertIsNot(v2, v3)
self.assertIs(v3, v4)
def test_set_module_on_module(self):
# A module's 'module' attribute should always remain None, and no one
# should attempt to set it to something besides the module's name or None.
ast = pytd_utils.CreateModule("some_mod")
mod = abstract.Module(self._ctx, ast.name, {}, ast)
mod.module = ast.name
self.assertIsNone(mod.module)
self.assertEqual(ast.name, mod.full_name)
mod.module = None
self.assertIsNone(mod.module)
self.assertEqual(ast.name, mod.full_name)
def set_module():
mod.module = "other_mod"
self.assertRaises(AssertionError, set_module)
def test_call_type_parameter_instance(self):
instance = abstract.Instance(self._ctx.convert.list_type, self._ctx)
instance.merge_instance_type_parameter(
self._ctx.root_node,
abstract_utils.T,
self._ctx.convert.int_type.to_variable(self._ctx.root_node),
)
t = abstract.TypeParameter(abstract_utils.T, self._ctx)
t_instance = abstract.TypeParameterInstance(t, instance, self._ctx)
node, ret = t_instance.call(
self._node, t_instance.to_binding(self._node), function.Args(posargs=())
)
self.assertIs(node, self._node)
(retval,) = ret.data
self.assertEqual(retval.cls, self._ctx.convert.int_type)
def test_call_empty_type_parameter_instance(self):
instance = abstract.Instance(self._ctx.convert.list_type, self._ctx)
t = abstract.TypeParameter(abstract_utils.T, self._ctx)
t_instance = abstract.TypeParameterInstance(t, instance, self._ctx)
node, ret = t_instance.call(
self._node, t_instance.to_binding(self._node), function.Args(posargs=())
)
self.assertIs(node, self._node)
(retval,) = ret.data
self.assertIs(retval, self._ctx.convert.empty)
def test_call_type_parameter_instance_with_wrong_args(self):
instance = abstract.Instance(self._ctx.convert.list_type, self._ctx)
instance.merge_instance_type_parameter(
self._ctx.root_node,
abstract_utils.T,
self._ctx.convert.int_type.to_variable(self._ctx.root_node),
)
t = abstract.TypeParameter(abstract_utils.T, self._ctx)
t_instance = abstract.TypeParameterInstance(t, instance, self._ctx)
posargs = (self._ctx.new_unsolvable(self._node),) * 3
node, ret = t_instance.call(
self._node,
t_instance.to_binding(self._node),
function.Args(posargs=posargs),
)
self.assertIs(node, self._node)
self.assertTrue(ret.bindings)
(error,) = self._ctx.errorlog
self.assertEqual(error.name, "wrong-arg-count")
def test_instantiate_tuple_class_for_sub(self):
type_param = abstract.TypeParameter(abstract_utils.K, self._ctx)
cls = abstract.TupleClass(
self._ctx.convert.tuple_type,
{0: type_param, abstract_utils.T: type_param},
self._ctx,
)
# Instantiate the tuple class.
subst_value = cls.instantiate(
self._ctx.root_node, abstract_utils.DUMMY_CONTAINER
)
# Recover the class from the instance.
subbed_cls = self._ctx.annotation_utils.sub_one_annotation(
self._ctx.root_node, type_param, [{abstract_utils.K: subst_value}]
)
self.assertEqual(cls, subbed_cls)
def test_singleton(self):
self.assertIs(
abstract.Unsolvable(self._ctx), abstract.Unsolvable(self._ctx)
)
def test_singleton_subclass(self):
self.assertIs(abstract.Empty(self._ctx), abstract.Empty(self._ctx))
self.assertIsNot(abstract.Deleted(1, self._ctx), abstract.Empty(self._ctx))
|
AbstractTest
|
python
|
bokeh__bokeh
|
tests/unit/bokeh/embed/test_util__embed.py
|
{
"start": 25181,
"end": 28449
}
|
class ____:
def test_is_tex_string(self) -> None:
assert beu.is_tex_string("$$test$$") is True
assert beu.is_tex_string("$$test$$ ") is False
assert beu.is_tex_string(" $$test$$") is False
assert beu.is_tex_string(" $$test$$ ") is False
assert beu.is_tex_string("\\[test\\]") is True
assert beu.is_tex_string("\\(test\\)") is True
assert beu.is_tex_string("HTML <b>text</b> $$\\sin(x) and \\[x\\cdot\\pi\\]!") is False
assert beu.is_tex_string("\\[test\\]") is True
assert beu.is_tex_string("\\(test\\)") is True
assert beu.is_tex_string("test$$") is False
assert beu.is_tex_string("$$test") is False
assert beu.is_tex_string("HTML <b>text</b> $$sin(x)$$ and [xcdotpi]!") is False
assert beu.is_tex_string("$$test\\]") is False
assert beu.is_tex_string("$$test $$ end $$") is True
assert beu.is_tex_string("$$ \\[test end\\]") is False
assert beu.is_tex_string("text \\[text $$latex$$") is False
assert beu.is_tex_string("$$ tex [ tex ] tex $$") is True
assert beu.is_tex_string("$$tex$$text$$tex$$") is True
assert beu.is_tex_string("part0$$part1\\[part2\\(part3$$") is False
assert beu.is_tex_string("part0$$part1\\[part2\\(part3\\]") is False
assert beu.is_tex_string("part0$$part1\\[part2\\(part3\\)") is False
assert beu.is_tex_string("""$$
cos(x)
$$""") is True
assert beu.is_tex_string("""$$
cos(x)$$
""") is False
def test_contains_tex_string(self) -> None:
assert beu.contains_tex_string("$$test$$") is True
assert beu.contains_tex_string("\\[test\\]") is True
assert beu.contains_tex_string("\\(test\\)") is True
assert beu.contains_tex_string("HTML <b>text</b> $$\\sin(x) and \\[x\\cdot\\pi\\]!") is True
assert beu.contains_tex_string("\\[test\\]") is True
assert beu.contains_tex_string("\\(test\\)") is True
assert beu.contains_tex_string("test$$") is False
assert beu.contains_tex_string("$$test") is False
assert beu.contains_tex_string("HTML <b>text</b> $$sin(x)$$ and [xcdotpi]!") is True
assert beu.contains_tex_string("$$test\\]") is False
assert beu.contains_tex_string("$$test $$ end $$") is True
assert beu.contains_tex_string("$$ \\[test end\\]") is True
assert beu.contains_tex_string("text \\[text $$latex$$") is True
assert beu.contains_tex_string("$$ tex [ tex ] tex $$") is True
assert beu.contains_tex_string("$$tex$$text$$tex$$") is True
assert beu.contains_tex_string("part0$$part1\\[part2\\(part3$$") is True
assert beu.contains_tex_string("part0$$part1\\[part2\\(part3\\]") is True
assert beu.contains_tex_string("part0$$part1\\[part2\\(part3\\)") is True
assert beu.contains_tex_string("""$$
cos(x)
$$""") is True
assert beu.contains_tex_string("""$$
cos(x)$$
""") is True
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
# needed for caplog tests to function
basicConfig()
|
Test__tex_helpers
|
python
|
astropy__astropy
|
astropy/utils/iers/iers.py
|
{
"start": 25039,
"end": 27974
}
|
class ____(IERS):
"""IERS Table class targeted to IERS B, provided by IERS itself.
These are final values; see https://www.iers.org/IERS/EN/Home/home_node.html
Notes
-----
If the package IERS B file (``iers.IERS_B_FILE``) is out of date, a new
version can be downloaded from ``iers.IERS_B_URL``.
See `~astropy.utils.iers.IERS_B.read` for instructions on how to read
a pre-2023 style IERS B file (usually named ``eopc04_IAU2000.62-now``).
"""
iers_table = None
@classmethod
def read(
cls,
file: str | os.PathLike[str] | None = None,
readme: str | os.PathLike[str] | None = None,
data_start: int = 6,
) -> Self:
"""Read IERS-B table from a eopc04.* file provided by IERS.
Parameters
----------
file : str or os.PathLike[str]
full path to ascii file holding IERS-B data.
Defaults to package version, ``iers.IERS_B_FILE``.
readme : str or os.PathLike[str]
full path to ascii file holding CDS-style readme.
Defaults to package version, ``iers.IERS_B_README``.
data_start : int
Starting row. Default is 6, appropriate for standard IERS files.
Returns
-------
``IERS_B`` class instance
Notes
-----
To read a pre-2023 style IERS B file (usually named something like
``eopc04_IAU2000.62-now``), do something like this example with an
excerpt that is used for testing::
>>> from astropy.utils.iers import IERS_B
>>> from astropy.utils.data import get_pkg_data_filename
>>> old_style_file = get_pkg_data_filename(
... "tests/data/iers_b_old_style_excerpt",
... package="astropy.utils.iers")
>>> iers_b = IERS_B.read(
... old_style_file,
... readme=get_pkg_data_filename("data/ReadMe.eopc04_IAU2000",
... package="astropy.utils.iers"),
... data_start=14)
"""
if file is None:
file = IERS_B_FILE
else:
file = os.fspath(file)
if readme is None:
readme = IERS_B_README
else:
readme = os.fspath(readme)
table = super().read(file, format="cds", readme=readme, data_start=data_start)
table.meta["data_path"] = file
table.meta["readme_path"] = readme
return table
def ut1_utc_source(self, i):
"""Set UT1-UTC source flag for entries in IERS table."""
return np.ones_like(i) * FROM_IERS_B
def dcip_source(self, i):
"""Set CIP correction source flag for entries in IERS table."""
return np.ones_like(i) * FROM_IERS_B
def pm_source(self, i):
"""Set PM source flag for entries in IERS table."""
return np.ones_like(i) * FROM_IERS_B
|
IERS_B
|
python
|
huggingface__transformers
|
src/transformers/models/mm_grounding_dino/modular_mm_grounding_dino.py
|
{
"start": 15215,
"end": 15519
}
|
class ____(GroundingDinoPreTrainedModel):
@torch.no_grad()
def _init_weights(self, module):
super()._init_weights(module)
if isinstance(module, MMGroundingDinoContrastiveEmbedding):
init.constant_(module.bias, -math.log((1 - 0.01) / 0.01))
|
MMGroundingDinoPreTrainedModel
|
python
|
pallets__click
|
src/click/types.py
|
{
"start": 19264,
"end": 20018
}
|
class ____(_NumberRangeBase, IntParamType):
"""Restrict an :data:`click.INT` value to a range of accepted
values. See :ref:`ranges`.
If ``min`` or ``max`` are not passed, any value is accepted in that
direction. If ``min_open`` or ``max_open`` are enabled, the
corresponding boundary is not included in the range.
If ``clamp`` is enabled, a value outside the range is clamped to the
boundary instead of failing.
.. versionchanged:: 8.0
Added the ``min_open`` and ``max_open`` parameters.
"""
name = "integer range"
def _clamp( # type: ignore
self, bound: int, dir: t.Literal[1, -1], open: bool
) -> int:
if not open:
return bound
return bound + dir
|
IntRange
|
python
|
HIPS__autograd
|
autograd/core.py
|
{
"start": 859,
"end": 3663
}
|
class ____(Node):
__slots__ = ["parents", "vjp"]
def __init__(self, value, fun, args, kwargs, parent_argnums, parents):
self.parents = parents
try:
vjpmaker = primitive_vjps[fun]
except KeyError:
fun_name = getattr(fun, "__name__", fun)
raise NotImplementedError(f"VJP of {fun_name} wrt argnums {parent_argnums} not defined")
self.vjp = vjpmaker(parent_argnums, value, args, kwargs)
def initialize_root(self):
self.parents = []
self.vjp = lambda g: ()
primitive_vjps = {}
def defvjp_argnums(fun, vjpmaker):
primitive_vjps[fun] = vjpmaker
def defvjp_argnum(fun, vjpmaker):
def vjp_argnums(argnums, *args):
vjps = [vjpmaker(argnum, *args) for argnum in argnums]
return lambda g: (vjp(g) for vjp in vjps)
defvjp_argnums(fun, vjp_argnums)
def defvjp(fun, *vjpmakers, **kwargs):
argnums = kwargs.get("argnums", count())
vjps_dict = {
argnum: translate_vjp(vjpmaker, fun, argnum) for argnum, vjpmaker in zip(argnums, vjpmakers)
}
def vjp_argnums(argnums, ans, args, kwargs):
L = len(argnums)
# These first two cases are just optimizations
if L == 1:
argnum = argnums[0]
try:
vjpfun = vjps_dict[argnum]
except KeyError:
raise NotImplementedError(f"VJP of {fun.__name__} wrt argnum 0 not defined")
vjp = vjpfun(ans, *args, **kwargs)
return lambda g: (vjp(g),)
elif L == 2:
argnum_0, argnum_1 = argnums
try:
vjp_0_fun = vjps_dict[argnum_0]
vjp_1_fun = vjps_dict[argnum_1]
except KeyError:
raise NotImplementedError(f"VJP of {fun.__name__} wrt argnums 0, 1 not defined")
vjp_0 = vjp_0_fun(ans, *args, **kwargs)
vjp_1 = vjp_1_fun(ans, *args, **kwargs)
return lambda g: (vjp_0(g), vjp_1(g))
else:
vjps = [vjps_dict[argnum](ans, *args, **kwargs) for argnum in argnums]
return lambda g: (vjp(g) for vjp in vjps)
defvjp_argnums(fun, vjp_argnums)
def translate_vjp(vjpfun, fun, argnum):
if vjpfun is None:
return lambda ans, *args, **kwargs: lambda g: vspace(args[argnum]).zeros()
elif callable(vjpfun):
return vjpfun
else:
raise Exception(f"Bad VJP '{vjpfun}' for '{fun.__name__}'")
# -------------------- forward mode --------------------
def make_jvp(fun, x):
def jvp(g):
start_node = JVPNode.new_root(g)
end_value, end_node = trace(start_node, fun, x)
if end_node is None:
return end_value, vspace(end_value).zeros()
else:
return end_value, end_node.g
return jvp
|
VJPNode
|
python
|
Netflix__metaflow
|
metaflow/_vendor/click/types.py
|
{
"start": 8472,
"end": 10304
}
|
class ____(IntParamType):
"""A parameter that works similar to :data:`click.INT` but restricts
the value to fit into a range. The default behavior is to fail if the
value falls outside the range, but it can also be silently clamped
between the two edges.
See :ref:`ranges` for an example.
"""
name = "integer range"
def __init__(self, min=None, max=None, clamp=False):
self.min = min
self.max = max
self.clamp = clamp
def convert(self, value, param, ctx):
rv = IntParamType.convert(self, value, param, ctx)
if self.clamp:
if self.min is not None and rv < self.min:
return self.min
if self.max is not None and rv > self.max:
return self.max
if (
self.min is not None
and rv < self.min
or self.max is not None
and rv > self.max
):
if self.min is None:
self.fail(
"{} is bigger than the maximum valid value {}.".format(
rv, self.max
),
param,
ctx,
)
elif self.max is None:
self.fail(
"{} is smaller than the minimum valid value {}.".format(
rv, self.min
),
param,
ctx,
)
else:
self.fail(
"{} is not in the valid range of {} to {}.".format(
rv, self.min, self.max
),
param,
ctx,
)
return rv
def __repr__(self):
return "IntRange({}, {})".format(self.min, self.max)
|
IntRange
|
python
|
run-llama__llama_index
|
llama-index-integrations/vector_stores/llama-index-vector-stores-vectorx/llama_index/vector_stores/vectorx/base.py
|
{
"start": 2360,
"end": 13934
}
|
class ____(BasePydanticVectorStore):
stores_text: bool = True
flat_metadata: bool = False
api_token: Optional[str]
encryption_key: Optional[str]
index_name: Optional[str]
space_type: Optional[str]
dimension: Optional[int]
insert_kwargs: Optional[Dict]
add_sparse_vector: bool
text_key: str
batch_size: int
remove_text_from_metadata: bool
_vectorx_index: Any = PrivateAttr()
def __init__(
self,
vectorx_index: Optional[Any] = None,
api_token: Optional[str] = None,
encryption_key: Optional[str] = None,
index_name: Optional[str] = None,
space_type: Optional[str] = "cosine",
dimension: Optional[int] = None,
insert_kwargs: Optional[Dict] = None,
add_sparse_vector: bool = False,
text_key: str = DEFAULT_TEXT_KEY,
batch_size: int = DEFAULT_BATCH_SIZE,
remove_text_from_metadata: bool = False,
**kwargs: Any,
) -> None:
insert_kwargs = insert_kwargs or {}
super().__init__(
index_name=index_name,
api_token=api_token,
encryption_key=encryption_key,
space_type=space_type,
dimension=dimension,
insert_kwargs=insert_kwargs,
add_sparse_vector=add_sparse_vector,
text_key=text_key,
batch_size=batch_size,
remove_text_from_metadata=remove_text_from_metadata,
)
# Use existing vectorx_index or initialize a new one
self._vectorx_index = vectorx_index or self._initialize_vectorx_index(
api_token, encryption_key, index_name, dimension, space_type
)
@classmethod
def _initialize_vectorx_index(
cls,
api_token: Optional[str],
encryption_key: Optional[str],
index_name: Optional[str],
dimension: Optional[int] = None,
space_type: Optional[str] = "cosine",
) -> Any:
"""Initialize VectorX index using the current API."""
try:
from vecx.vectorx import VectorX
except ImportError as e:
raise ImportError(
"Could not import `vecx` package. "
"Please install it with `pip install vecx`."
) from e
# Initialize VectorX client
vx = VectorX(token=api_token)
try:
# Try to get existing index
index = vx.get_index(name=index_name, key=encryption_key)
_logger.info(f"Retrieved existing index: {index_name}")
return index
except Exception as e:
if dimension is None:
raise ValueError(
"Must provide dimension when creating a new index"
) from e
# Create a new index if it doesn't exist
_logger.info(f"Creating new index: {index_name}")
vx.create_index(
name=index_name,
dimension=dimension,
key=encryption_key,
space_type=space_type,
)
return vx.get_index(name=index_name, key=encryption_key)
@classmethod
def from_params(
cls,
api_token: Optional[str] = None,
encryption_key: Optional[str] = None,
index_name: Optional[str] = None,
dimension: Optional[int] = None,
space_type: str = "cosine",
batch_size: int = DEFAULT_BATCH_SIZE,
) -> "VectorXVectorStore":
"""Create VectorXVectorStore from parameters."""
vectorx_index = cls._initialize_vectorx_index(
api_token, encryption_key, index_name, dimension, space_type
)
return cls(
vectorx_index=vectorx_index,
api_token=api_token,
encryption_key=encryption_key,
index_name=index_name,
dimension=dimension,
space_type=space_type,
batch_size=batch_size,
)
@classmethod
def class_name(cls) -> str:
return "VectorXVectorStore"
def add(
self,
nodes: List[BaseNode],
**add_kwargs: Any,
) -> List[str]:
"""
Add nodes to index.
Args:
nodes: List[BaseNode]: list of nodes with embeddings
"""
ids = []
entries = []
for node in nodes:
node_id = node.node_id
metadata = node_to_metadata_dict(node)
# Filter values must be simple key-value pairs
filter_data = {}
if "file_name" in metadata:
filter_data["file_name"] = metadata["file_name"]
if "doc_id" in metadata:
filter_data["doc_id"] = metadata["doc_id"]
if "category" in metadata:
filter_data["category"] = metadata["category"]
if "difficulty" in metadata:
filter_data["difficulty"] = metadata["difficulty"]
if "language" in metadata:
filter_data["language"] = metadata["language"]
if "field" in metadata:
filter_data["field"] = metadata["field"]
if "type" in metadata:
filter_data["type"] = metadata["type"]
if "feature" in metadata:
filter_data["feature"] = metadata["feature"]
entry = {
"id": node_id,
"vector": node.get_embedding(),
"meta": metadata,
"filter": filter_data,
}
ids.append(node_id)
entries.append(entry)
# Batch insert to avoid hitting API limits
batch_size = self.batch_size
for i in range(0, len(entries), batch_size):
batch = entries[i : i + batch_size]
self._vectorx_index.upsert(batch)
return ids
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""
Delete nodes using with ref_doc_id.
Args:
ref_doc_id (str): The id of the document to delete.
"""
try:
self._vectorx_index.delete_with_filter({"doc_id": ref_doc_id})
except Exception as e:
_logger.error(f"Error deleting vectors for doc_id {ref_doc_id}: {e}")
@property
def client(self) -> Any:
"""Return vectorX index client."""
return self._vectorx_index
def query(self, query: VectorStoreQuery, **kwargs: Any) -> VectorStoreQueryResult:
"""
Query index for top k most similar nodes.
Args:
query: VectorStoreQuery object containing query parameters
"""
if not hasattr(self._vectorx_index, "dimension"):
# Get dimension from index if available, otherwise try to infer from query
try:
dimension = self._vectorx_index.describe()["dimension"]
except Exception:
if query.query_embedding is not None:
dimension = len(query.query_embedding)
else:
raise ValueError("Could not determine vector dimension")
else:
dimension = self._vectorx_index.dimension
query_embedding = [0.0] * dimension # Default empty vector
filters = {}
# Apply any metadata filters if provided
if query.filters is not None:
for filter_item in query.filters.filters:
# Case 1: MetadataFilter object
if (
hasattr(filter_item, "key")
and hasattr(filter_item, "value")
and hasattr(filter_item, "operator")
):
op_symbol = reverse_operator_map.get(filter_item.operator)
if not op_symbol:
raise ValueError(
f"Unsupported filter operator: {filter_item.operator}"
)
if filter_item.key not in filters:
filters[filter_item.key] = {}
filters[filter_item.key][op_symbol] = filter_item.value
# Case 2: Raw dict, e.g. {"category": {"$eq": "programming"}}
elif isinstance(filter_item, dict):
for key, op_dict in filter_item.items():
if isinstance(op_dict, dict):
for op, val in op_dict.items():
if key not in filters:
filters[key] = {}
filters[key][op] = val
else:
raise ValueError(f"Unsupported filter format: {filter_item}")
_logger.info(f"Final structured filters: {filters}")
# Use the query embedding if provided
if query.query_embedding is not None:
query_embedding = cast(List[float], query.query_embedding)
if query.alpha is not None and query.mode == VectorStoreQueryMode.HYBRID:
# Apply alpha scaling in hybrid mode
query_embedding = [v * query.alpha for v in query_embedding]
# Execute query
try:
results = self._vectorx_index.query(
vector=query_embedding,
top_k=query.similarity_top_k,
filter=filters if filters else None,
include_vectors=True,
)
except Exception as e:
_logger.error(f"Error querying VectorX: {e}")
raise
# Process results
nodes = []
similarities = []
ids = []
for result in results:
node_id = result["id"]
score = result["similarity"]
# Get metadata from result
metadata = result.get("meta", {})
# Create node from metadata
if self.flat_metadata:
node = metadata_dict_to_node(
metadata=metadata,
text=metadata.pop(self.text_key, None),
id_=node_id,
)
else:
metadata_dict, node_info, relationships = legacy_metadata_dict_to_node(
metadata=metadata,
text_key=self.text_key,
)
# Create TextNode with the extracted metadata
# Step 1: Get the JSON string from "_node_content"
_node_content_str = metadata.get("_node_content", "{}")
# Step 2: Convert JSON string to Python dict
try:
node_content = json.loads(_node_content_str)
except json.JSONDecodeError:
node_content = {}
# Step 3: Get the text
text = node_content.get(self.text_key, "")
node = TextNode(
text=text,
metadata=metadata_dict,
relationships=relationships,
node_id=node_id,
)
# Add any node_info properties to the node
for key, val in node_info.items():
if hasattr(node, key):
setattr(node, key, val)
# If embedding was returned in the results, add it to the node
if "vector" in result:
node.embedding = result["vector"]
nodes.append(node)
similarities.append(score)
ids.append(node_id)
return VectorStoreQueryResult(nodes=nodes, similarities=similarities, ids=ids)
|
VectorXVectorStore
|
python
|
fluentpython__example-code-2e
|
21-async/mojifinder/bottle.py
|
{
"start": 73841,
"end": 76783
}
|
class ____(DictMixin):
""" This dict stores multiple values per key, but behaves exactly like a
normal dict in that it returns only the newest value for any given key.
There are special methods available to access the full list of values.
"""
def __init__(self, *a, **k):
self.dict = dict((k, [v]) for (k, v) in dict(*a, **k).items())
def __len__(self): return len(self.dict)
def __iter__(self): return iter(self.dict)
def __contains__(self, key): return key in self.dict
def __delitem__(self, key): del self.dict[key]
def __getitem__(self, key): return self.dict[key][-1]
def __setitem__(self, key, value): self.append(key, value)
def keys(self): return self.dict.keys()
if py3k:
def values(self): return (v[-1] for v in self.dict.values())
def items(self): return ((k, v[-1]) for k, v in self.dict.items())
def allitems(self):
return ((k, v) for k, vl in self.dict.items() for v in vl)
iterkeys = keys
itervalues = values
iteritems = items
iterallitems = allitems
else:
def values(self): return [v[-1] for v in self.dict.values()]
def items(self): return [(k, v[-1]) for k, v in self.dict.items()]
def iterkeys(self): return self.dict.iterkeys()
def itervalues(self): return (v[-1] for v in self.dict.itervalues())
def iteritems(self):
return ((k, v[-1]) for k, v in self.dict.iteritems())
def iterallitems(self):
return ((k, v) for k, vl in self.dict.iteritems() for v in vl)
def allitems(self):
return [(k, v) for k, vl in self.dict.iteritems() for v in vl]
def get(self, key, default=None, index=-1, type=None):
''' Return the most recent value for a key.
:param default: The default value to be returned if the key is not
present or the type conversion fails.
:param index: An index for the list of available values.
:param type: If defined, this callable is used to cast the value
into a specific type. Exception are suppressed and result in
the default value to be returned.
'''
try:
val = self.dict[key][index]
return type(val) if type else val
except Exception:
pass
return default
def append(self, key, value):
''' Add a new value to the list of values for this key. '''
self.dict.setdefault(key, []).append(value)
def replace(self, key, value):
''' Replace the list of values with a single value. '''
self.dict[key] = [value]
def getall(self, key):
''' Return a (possibly empty) list of values for a key. '''
return self.dict.get(key) or []
#: Aliases for WTForms to mimic other multi-dict APIs (Django)
getone = get
getlist = getall
|
MultiDict
|
python
|
pytorch__pytorch
|
test/dynamo/test_ctx_manager.py
|
{
"start": 45864,
"end": 47457
}
|
class ____(torch.nn.Module):
def forward(self):
_saved_tensors_hooks_disable = torch._C._autograd._saved_tensors_hooks_disable('This is not supported'); _saved_tensors_hooks_disable = None
x: "f32[1]" = torch.ones(1)
y: "f32[1]" = torch.zeros(1)
add: "f32[1]" = x + y; x = y = None
_saved_tensors_hooks_disable_1 = torch._C._autograd._saved_tensors_hooks_disable('Previously disabled message'); _saved_tensors_hooks_disable_1 = None
return (add,)
""", # NOQA: B950
)
def test_disable_saved_tensors_hooks_prev_disabled_nested(self):
def fn(z):
@torch.autograd.graph.disable_saved_tensors_hooks("This is not supported")
def f(x, y):
@torch.autograd.graph.disable_saved_tensors_hooks(
"This is not supported inner"
)
def inner_fn(x, y):
return x + y
return inner_fn(x, y) + x
x, y = (
torch.ones(
1,
),
torch.zeros(
1,
),
)
return f(x, y)
eager = EagerAndRecordGraphs()
with torch.autograd.graph.disable_saved_tensors_hooks(
"Previously disabled message"
):
torch.compile(fn, backend=eager, fullgraph=True)(torch.randn(()))
graph = eager.graphs[0]
actual = normalize_gm(graph.print_readable(False))
self.assertExpectedInline(
actual,
"""\
|
GraphModule
|
python
|
django__django
|
django/db/models/functions/mixins.py
|
{
"start": 133,
"end": 929
}
|
class ____:
def as_postgresql(self, compiler, connection, **extra_context):
# Cast FloatField to DecimalField as PostgreSQL doesn't support the
# following function signatures:
# - LOG(double, double)
# - MOD(double, double)
output_field = DecimalField(decimal_places=sys.float_info.dig, max_digits=1000)
clone = self.copy()
clone.set_source_expressions(
[
(
Cast(expression, output_field)
if isinstance(expression.output_field, FloatField)
else expression
)
for expression in self.get_source_expressions()
]
)
return clone.as_sql(compiler, connection, **extra_context)
|
FixDecimalInputMixin
|
python
|
django__django
|
tests/admin_changelist/models.py
|
{
"start": 1858,
"end": 1891
}
|
class ____(Group):
pass
|
Quartet
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-kyriba/source_kyriba/source.py
|
{
"start": 10435,
"end": 11775
}
|
class ____(AbstractSource):
def gateway_url(self, config: Mapping[str, Any]) -> str:
return f"https://{config['domain']}/gateway"
def check_connection(self, logger, config) -> Tuple[bool, any]:
try:
client = KyribaClient(config["username"], config["password"], self.gateway_url(config))
client.login()
return True, None
except Exception as e:
if isinstance(e, requests.exceptions.HTTPError) and e.response.status_code == 401:
err_message = f"Please check your `username` and `password`. Error: {repr(e)}"
return False, err_message
return False, repr(e)
def streams(self, config: Mapping[str, Any]) -> List[Stream]:
gateway_url = self.gateway_url(config)
client = KyribaClient(config["username"], config["password"], gateway_url)
kwargs = {
"gateway_url": gateway_url,
"client": client,
"start_date": config.get("start_date"),
"end_date": config.get("end_date"),
}
return [
Accounts(**kwargs),
CashFlows(**kwargs),
CashBalancesEod(**kwargs),
CashBalancesIntraday(**kwargs),
BankBalancesEod(**kwargs),
BankBalancesIntraday(**kwargs),
]
|
SourceKyriba
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/remote_origin.py
|
{
"start": 17540,
"end": 18523
}
|
class ____(LegacyNamedTupleMixin):
"""Serializable representation of an ExternalJob that can be used to
uniquely it or reload it in across process boundaries.
"""
repository_origin: RemoteRepositoryOrigin
instigator_name: str
def get_selector(self) -> "InstigatorSelector":
from dagster._core.definitions.selector import InstigatorSelector
return InstigatorSelector(
location_name=self.repository_origin.code_location_origin.location_name,
repository_name=self.repository_origin.repository_name,
name=self.instigator_name,
)
def get_id(self) -> str:
return create_snapshot_id(self)
@property
def location_name(self) -> str:
return self.repository_origin.code_location_origin.location_name
@whitelist_for_serdes(
storage_name="ExternalPartitionSetOrigin",
storage_field_names={"repository_origin": "external_repository_origin"},
)
@record
|
RemoteInstigatorOrigin
|
python
|
ethereum__web3.py
|
web3/providers/rpc/rpc.py
|
{
"start": 830,
"end": 6001
}
|
class ____(JSONBaseProvider):
logger = logging.getLogger("web3.providers.HTTPProvider")
endpoint_uri = None
_request_kwargs = None
def __init__(
self,
endpoint_uri: URI | str | None = None,
request_kwargs: Any | None = None,
session: Any | None = None,
exception_retry_configuration: None
| (ExceptionRetryConfiguration | Empty) = empty,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self._request_session_manager = HTTPSessionManager()
if endpoint_uri is None:
self.endpoint_uri = (
self._request_session_manager.get_default_http_endpoint()
)
else:
self.endpoint_uri = URI(endpoint_uri)
self._request_kwargs = request_kwargs or {}
self._exception_retry_configuration = exception_retry_configuration
if session:
self._request_session_manager.cache_and_return_session(
self.endpoint_uri, session
)
def __str__(self) -> str:
return f"RPC connection {self.endpoint_uri}"
@property
def exception_retry_configuration(self) -> ExceptionRetryConfiguration:
if isinstance(self._exception_retry_configuration, Empty):
self._exception_retry_configuration = ExceptionRetryConfiguration(
errors=(
ConnectionError,
requests.HTTPError,
requests.Timeout,
)
)
return self._exception_retry_configuration
@exception_retry_configuration.setter
def exception_retry_configuration(
self, value: ExceptionRetryConfiguration | Empty
) -> None:
self._exception_retry_configuration = value
@to_dict
def get_request_kwargs(self) -> Iterable[tuple[str, Any]]:
if "headers" not in self._request_kwargs:
yield "headers", self.get_request_headers()
yield from self._request_kwargs.items()
@combomethod
def get_request_headers(cls) -> dict[str, str]:
if isinstance(cls, HTTPProvider):
cls_name = cls.__class__.__name__
else:
cls_name = cls.__name__
module = cls.__module__
return {
"Content-Type": "application/json",
"User-Agent": construct_user_agent(module, cls_name),
}
def _make_request(self, method: RPCEndpoint, request_data: bytes) -> bytes:
"""
If exception_retry_configuration is set, retry on failure; otherwise, make
the request without retrying.
"""
if (
self.exception_retry_configuration is not None
and check_if_retry_on_failure(
method, self.exception_retry_configuration.method_allowlist
)
):
for i in range(self.exception_retry_configuration.retries):
try:
return self._request_session_manager.make_post_request(
self.endpoint_uri, request_data, **self.get_request_kwargs()
)
except tuple(self.exception_retry_configuration.errors) as e:
if i < self.exception_retry_configuration.retries - 1:
time.sleep(
self.exception_retry_configuration.backoff_factor * 2**i
)
continue
else:
raise e
return None
else:
return self._request_session_manager.make_post_request(
self.endpoint_uri, request_data, **self.get_request_kwargs()
)
@handle_request_caching
def make_request(self, method: RPCEndpoint, params: Any) -> RPCResponse:
self.logger.debug(
"Making request HTTP. URI: %s, Method: %s", self.endpoint_uri, method
)
request_data = self.encode_rpc_request(method, params)
raw_response = self._make_request(method, request_data)
response = self.decode_rpc_response(raw_response)
self.logger.debug(
"Getting response HTTP. URI: %s, Method: %s, Response: %s",
self.endpoint_uri,
method,
response,
)
return response
def make_batch_request(
self, batch_requests: list[tuple[RPCEndpoint, Any]]
) -> list[RPCResponse] | RPCResponse:
self.logger.debug("Making batch request HTTP, uri: `%s`", self.endpoint_uri)
request_data = self.encode_batch_rpc_request(batch_requests)
raw_response = self._request_session_manager.make_post_request(
self.endpoint_uri, request_data, **self.get_request_kwargs()
)
self.logger.debug("Received batch response HTTP.")
response = self.decode_rpc_response(raw_response)
if not isinstance(response, list):
# RPC errors return only one response with the error object
return response
return sort_batch_response_by_response_ids(
cast(list[RPCResponse], sort_batch_response_by_response_ids(response))
)
|
HTTPProvider
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/drawing/test_drawing_chart01.py
|
{
"start": 368,
"end": 3229
}
|
class ____(unittest.TestCase):
"""
Test assembling a complete Drawing file.
"""
def test_assemble_xml_file(self):
"""Test writing a drawing with no cell data."""
self.maxDiff = None
fh = StringIO()
drawing = Drawing()
drawing._set_filehandle(fh)
dimensions = [4, 8, 457200, 104775, 12, 22, 152400, 180975, 0, 0]
drawing_object = DrawingInfo()
drawing_object._drawing_type = DrawingTypes.CHART
drawing_object._dimensions = dimensions
drawing_object._width = 0
drawing_object._height = 0
drawing_object._description = None
drawing_object._shape = None
drawing_object._anchor = 1
drawing_object._rel_index = 1
drawing_object._url_rel_index = 0
drawing_object._tip = None
drawing._add_drawing_object(drawing_object)
drawing.embedded = 1
drawing._assemble_xml_file()
exp = _xml_to_list(
"""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<xdr:wsDr xmlns:xdr="http://schemas.openxmlformats.org/drawingml/2006/spreadsheetDrawing" xmlns:a="http://schemas.openxmlformats.org/drawingml/2006/main">
<xdr:twoCellAnchor>
<xdr:from>
<xdr:col>4</xdr:col>
<xdr:colOff>457200</xdr:colOff>
<xdr:row>8</xdr:row>
<xdr:rowOff>104775</xdr:rowOff>
</xdr:from>
<xdr:to>
<xdr:col>12</xdr:col>
<xdr:colOff>152400</xdr:colOff>
<xdr:row>22</xdr:row>
<xdr:rowOff>180975</xdr:rowOff>
</xdr:to>
<xdr:graphicFrame macro="">
<xdr:nvGraphicFramePr>
<xdr:cNvPr id="2" name="Chart 1"/>
<xdr:cNvGraphicFramePr/>
</xdr:nvGraphicFramePr>
<xdr:xfrm>
<a:off x="0" y="0"/>
<a:ext cx="0" cy="0"/>
</xdr:xfrm>
<a:graphic>
<a:graphicData uri="http://schemas.openxmlformats.org/drawingml/2006/chart">
<c:chart xmlns:c="http://schemas.openxmlformats.org/drawingml/2006/chart" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships" r:id="rId1"/>
</a:graphicData>
</a:graphic>
</xdr:graphicFrame>
<xdr:clientData/>
</xdr:twoCellAnchor>
</xdr:wsDr>
"""
)
got = _xml_to_list(fh.getvalue())
self.assertEqual(exp, got)
|
TestAssembleDrawing
|
python
|
run-llama__llama_index
|
llama-index-core/llama_index/core/llama_dataset/base.py
|
{
"start": 1469,
"end": 1686
}
|
class ____(BaseModel):
"""Base llama dataset example class."""
@property
@abstractmethod
def class_name(self) -> str:
"""Class name."""
return "BaseLlamaDataExample"
|
BaseLlamaDataExample
|
python
|
walkccc__LeetCode
|
solutions/769. Max Chunks To Make Sorted/769.py
|
{
"start": 0,
"end": 207
}
|
class ____:
def maxChunksToSorted(self, arr: list[int]) -> int:
ans = 0
mx = -math.inf
for i, a in enumerate(arr):
mx = max(mx, a)
if mx == i:
ans += 1
return ans
|
Solution
|
python
|
docker__docker-py
|
tests/unit/models_images_test.py
|
{
"start": 210,
"end": 4155
}
|
class ____(unittest.TestCase):
def test_build(self):
client = make_fake_client()
image = client.images.build()
client.api.build.assert_called_with()
client.api.inspect_image.assert_called_with(FAKE_IMAGE_ID)
assert isinstance(image, Image)
assert image.id == FAKE_IMAGE_ID
def test_get(self):
client = make_fake_client()
image = client.images.get(FAKE_IMAGE_ID)
client.api.inspect_image.assert_called_with(FAKE_IMAGE_ID)
assert isinstance(image, Image)
assert image.id == FAKE_IMAGE_ID
def test_labels(self):
client = make_fake_client()
image = client.images.get(FAKE_IMAGE_ID)
assert image.labels == {'bar': 'foo'}
def test_list(self):
client = make_fake_client()
images = client.images.list(all=True)
client.api.images.assert_called_with(all=True, name=None, filters=None)
assert len(images) == 1
assert isinstance(images[0], Image)
assert images[0].id == FAKE_IMAGE_ID
def test_load(self):
client = make_fake_client()
client.images.load('byte stream')
client.api.load_image.assert_called_with('byte stream')
def test_pull(self):
client = make_fake_client()
image = client.images.pull('test_image:test')
client.api.pull.assert_called_with(
'test_image', tag='test', all_tags=False, stream=True
)
client.api.inspect_image.assert_called_with('test_image:test')
assert isinstance(image, Image)
assert image.id == FAKE_IMAGE_ID
def test_pull_tag_precedence(self):
client = make_fake_client()
image = client.images.pull('test_image:latest', tag='test')
client.api.pull.assert_called_with(
'test_image', tag='test', all_tags=False, stream=True
)
client.api.inspect_image.assert_called_with('test_image:test')
image = client.images.pull('test_image')
client.api.pull.assert_called_with(
'test_image', tag='latest', all_tags=False, stream=True
)
client.api.inspect_image.assert_called_with('test_image:latest')
assert isinstance(image, Image)
assert image.id == FAKE_IMAGE_ID
def test_pull_multiple(self):
client = make_fake_client()
images = client.images.pull('test_image', all_tags=True)
client.api.pull.assert_called_with(
'test_image', tag='latest', all_tags=True, stream=True
)
client.api.images.assert_called_with(
all=False, name='test_image', filters=None
)
client.api.inspect_image.assert_called_with(FAKE_IMAGE_ID)
assert len(images) == 1
image = images[0]
assert isinstance(image, Image)
assert image.id == FAKE_IMAGE_ID
def test_pull_with_stream_param(self):
client = make_fake_client()
with warnings.catch_warnings(record=True) as w:
client.images.pull('test_image', stream=True)
assert len(w) == 1
assert str(w[0].message).startswith(
'`stream` is not a valid parameter'
)
def test_push(self):
client = make_fake_client()
client.images.push('foobar', insecure_registry=True)
client.api.push.assert_called_with(
'foobar',
tag=None,
insecure_registry=True
)
def test_remove(self):
client = make_fake_client()
client.images.remove('test_image')
client.api.remove_image.assert_called_with('test_image')
def test_search(self):
client = make_fake_client()
client.images.search('test')
client.api.search.assert_called_with('test')
def test_search_limit(self):
client = make_fake_client()
client.images.search('test', limit=5)
client.api.search.assert_called_with('test', limit=5)
|
ImageCollectionTest
|
python
|
django__django
|
tests/many_to_many/models.py
|
{
"start": 619,
"end": 762
}
|
class ____(models.Manager):
def get_queryset(self):
return super().get_queryset().exclude(headline="deleted")
|
NoDeletedArticleManager
|
python
|
keras-team__keras
|
keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/validation_test.py
|
{
"start": 338,
"end": 2788
}
|
class ____(test_case.TestCase):
def test_densify_ragged_bounding_boxes_batched(self):
ragged_boxes = tf.ragged.constant(
[
[[0.1, 0.1, 0.2, 0.2], [0.3, 0.3, 0.4, 0.4]],
[[0.5, 0.5, 0.6, 0.6]],
],
dtype=tf.float32,
)
ragged_labels = tf.ragged.constant(
[
[0, 1],
[2],
],
dtype=tf.int32,
)
bounding_boxes = {"boxes": ragged_boxes, "labels": ragged_labels}
max_boxes = 3
densified_data = validation.densify_bounding_boxes(
bounding_boxes.copy(), is_batched=True, max_boxes=max_boxes
)
densified_boxes = densified_data["boxes"]
densified_labels = densified_data["labels"]
self.assertEqual(densified_boxes.shape, (2, max_boxes, 4))
self.assertEqual(densified_labels.shape, (2, max_boxes))
expected_boxes = [
[[0.1, 0.1, 0.2, 0.2], [0.3, 0.3, 0.4, 0.4], [0.0, 0.0, 0.0, 0.0]],
[[0.5, 0.5, 0.6, 0.6], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]],
]
expected_labels = [
[0, 1, -1],
[2, -1, -1],
]
self.assertAllClose(densified_boxes, expected_boxes)
self.assertAllEqual(densified_labels, expected_labels)
def test_densify_ragged_bounding_boxes_unbatched(self):
ragged_boxes = tf.ragged.constant(
[[0.1, 0.1, 0.2, 0.2], [0.3, 0.3, 0.4, 0.4]],
dtype=tf.float32,
)
ragged_labels = tf.ragged.constant([[0], [1]], dtype=tf.int32)
bounding_boxes = {"boxes": ragged_boxes, "labels": ragged_labels}
max_boxes = 4
densified_data = validation.densify_bounding_boxes(
bounding_boxes.copy(), is_batched=False, max_boxes=max_boxes
)
densified_boxes = densified_data["boxes"]
densified_labels = densified_data["labels"]
self.assertEqual(densified_boxes.shape, (max_boxes, 4))
self.assertEqual(densified_labels.shape, (max_boxes, 1))
expected_boxes = [
[0.1, 0.1, 0.2, 0.2],
[0.3, 0.3, 0.4, 0.4],
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
]
expected_labels = [[0], [1], [-1], [-1]]
self.assertAllClose(densified_boxes, expected_boxes)
self.assertAllEqual(densified_labels, expected_labels)
|
DensifyBoundingBoxesTest
|
python
|
huggingface__transformers
|
src/transformers/models/splinter/modeling_splinter.py
|
{
"start": 9099,
"end": 9809
}
|
class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
# Copied from transformers.models.align.modeling_align.AlignTextLayer with AlignText->Splinter
|
SplinterOutput
|
python
|
pytorch__pytorch
|
test/quantization/fx/test_numeric_suite_fx.py
|
{
"start": 3523,
"end": 3866
}
|
class ____(nn.Module):
def __init__(self) -> None:
super().__init__()
self.w1 = nn.Parameter(torch.empty(4, 4))
self.b1 = nn.Parameter(torch.zeros(4))
torch.nn.init.kaiming_uniform_(self.w1, a=math.sqrt(5))
def forward(self, x):
x = F.linear(x, self.w1, self.b1)
return x
|
LinearFunctional
|
python
|
pappasam__jedi-language-server
|
tests/test_data/hover/somemodule.py
|
{
"start": 104,
"end": 276
}
|
class ____:
"""Class doc string for testing."""
def some_method(self):
"""Method doc string for testing."""
def some_method2(self):
pass
|
SomeClass
|
python
|
huggingface__transformers
|
src/transformers/models/zoedepth/image_processing_zoedepth_fast.py
|
{
"start": 1526,
"end": 12149
}
|
class ____(BaseImageProcessorFast):
do_pad = True
do_rescale = True
do_normalize = True
image_mean = IMAGENET_STANDARD_MEAN
image_std = IMAGENET_STANDARD_STD
do_resize = True
size = {"height": 384, "width": 512}
resample = PILImageResampling.BILINEAR
keep_aspect_ratio = True
ensure_multiple_of = 1 / 32
valid_kwargs = ZoeDepthImageProcessorKwargs
def __init__(self, **kwargs: Unpack[ZoeDepthImageProcessorKwargs]) -> None:
super().__init__(**kwargs)
@auto_docstring
def preprocess(
self,
images: ImageInput,
**kwargs: Unpack[ZoeDepthImageProcessorKwargs],
) -> BatchFeature:
return super().preprocess(images, **kwargs)
def resize(
self,
images: "torch.Tensor",
size: SizeDict,
keep_aspect_ratio: bool = False,
ensure_multiple_of: int = 1,
interpolation: Optional["F.InterpolationMode"] = None,
) -> "torch.Tensor":
"""
Resize an image or batchd images to target size `(size["height"], size["width"])`. If `keep_aspect_ratio` is `True`, the image
is resized to the largest possible size such that the aspect ratio is preserved. If `ensure_multiple_of` is
set, the image is resized to a size that is a multiple of this value.
Args:
images (`torch.Tensor`):
Images to resize.
size (`dict[str, int]`):
Target size of the output image.
keep_aspect_ratio (`bool`, *optional*, defaults to `False`):
If `True`, the image is resized to the largest possible size such that the aspect ratio is preserved.
ensure_multiple_of (`int`, *optional*, defaults to 1):
The image is resized to a size that is a multiple of this value.
interpolation (`F.InterpolationMode`, *optional*, defaults to `InterpolationMode.BILINEAR`):
Defines the resampling filter to use if resizing the image. Otherwise, the image is resized to size
specified in `size`.
"""
if not size.height or not size.width:
raise ValueError(f"The size dictionary must contain the keys 'height' and 'width'. Got {size}")
output_size = get_resize_output_image_size(
images,
output_size=(size.height, size.width),
keep_aspect_ratio=keep_aspect_ratio,
multiple=ensure_multiple_of,
input_data_format=ChannelDimension.FIRST,
)
height, width = output_size
resized_images = torch.nn.functional.interpolate(
images, (int(height), int(width)), mode=interpolation.value, align_corners=True
)
return resized_images
def _pad_images(
self,
images: "torch.Tensor",
):
"""
Args:
image (`torch.Tensor`):
Image to pad.
"""
height, width = get_image_size(images, channel_dim=ChannelDimension.FIRST)
pad_height = int(np.sqrt(height / 2) * 3)
pad_width = int(np.sqrt(width / 2) * 3)
return F.pad(images, padding=(pad_width, pad_height), padding_mode="reflect")
def _preprocess(
self,
images: list["torch.Tensor"],
do_resize: bool,
size: SizeDict,
keep_aspect_ratio: Optional[bool],
ensure_multiple_of: Optional[int],
interpolation: Optional["F.InterpolationMode"],
do_pad: bool,
do_rescale: bool,
rescale_factor: Optional[float],
do_normalize: bool,
image_mean: Optional[Union[float, list[float]]],
image_std: Optional[Union[float, list[float]]],
disable_grouping: Optional[bool],
return_tensors: Optional[Union[str, TensorType]] = None,
**kwargs,
) -> BatchFeature:
# Group images by size for batched resizing
grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping)
resized_images_grouped = {}
for shape, stacked_images in grouped_images.items():
if do_rescale:
stacked_images = self.rescale(stacked_images, rescale_factor)
if do_pad:
stacked_images = self._pad_images(images=stacked_images)
if do_resize:
stacked_images = self.resize(
stacked_images, size, keep_aspect_ratio, ensure_multiple_of, interpolation
)
if do_normalize:
stacked_images = self.normalize(stacked_images, image_mean, image_std)
resized_images_grouped[shape] = stacked_images
resized_images = reorder_images(resized_images_grouped, grouped_images_index)
processed_images = torch.stack(resized_images, dim=0) if return_tensors else resized_images
return BatchFeature(data={"pixel_values": processed_images}, tensor_type=return_tensors)
def post_process_depth_estimation(
self,
outputs: "ZoeDepthDepthEstimatorOutput",
source_sizes: Optional[Union[TensorType, list[tuple[int, int]], None]] = None,
target_sizes: Optional[Union[TensorType, list[tuple[int, int]], None]] = None,
outputs_flipped: Optional[Union["ZoeDepthDepthEstimatorOutput", None]] = None,
do_remove_padding: Optional[Union[bool, None]] = None,
) -> list[dict[str, TensorType]]:
"""
Converts the raw output of [`ZoeDepthDepthEstimatorOutput`] into final depth predictions and depth PIL images.
Only supports PyTorch.
Args:
outputs ([`ZoeDepthDepthEstimatorOutput`]):
Raw outputs of the model.
source_sizes (`TensorType` or `list[tuple[int, int]]`, *optional*):
Tensor of shape `(batch_size, 2)` or list of tuples (`tuple[int, int]`) containing the source size
(height, width) of each image in the batch before preprocessing. This argument should be dealt as
"required" unless the user passes `do_remove_padding=False` as input to this function.
target_sizes (`TensorType` or `list[tuple[int, int]]`, *optional*):
Tensor of shape `(batch_size, 2)` or list of tuples (`tuple[int, int]`) containing the target size
(height, width) of each image in the batch. If left to None, predictions will not be resized.
outputs_flipped ([`ZoeDepthDepthEstimatorOutput`], *optional*):
Raw outputs of the model from flipped input (averaged out in the end).
do_remove_padding (`bool`, *optional*):
By default ZoeDepth adds padding equal to `int(√(height / 2) * 3)` (and similarly for width) to fix the
boundary artifacts in the output depth map, so we need remove this padding during post_processing. The
parameter exists here in case the user changed the image preprocessing to not include padding.
Returns:
`list[dict[str, TensorType]]`: A list of dictionaries of tensors representing the processed depth
predictions.
"""
requires_backends(self, "torch")
predicted_depth = outputs.predicted_depth
if (outputs_flipped is not None) and (predicted_depth.shape != outputs_flipped.predicted_depth.shape):
raise ValueError("Make sure that `outputs` and `outputs_flipped` have the same shape")
if (target_sizes is not None) and (len(predicted_depth) != len(target_sizes)):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the predicted depth"
)
if do_remove_padding is None:
do_remove_padding = self.do_pad
if source_sizes is None and do_remove_padding:
raise ValueError(
"Either `source_sizes` should be passed in, or `do_remove_padding` should be set to False"
)
if (source_sizes is not None) and (len(predicted_depth) != len(source_sizes)):
raise ValueError(
"Make sure that you pass in as many source image sizes as the batch dimension of the logits"
)
if outputs_flipped is not None:
predicted_depth = (predicted_depth + torch.flip(outputs_flipped.predicted_depth, dims=[-1])) / 2
predicted_depth = predicted_depth.unsqueeze(1)
# Zoe Depth model adds padding around the images to fix the boundary artifacts in the output depth map
# The padding length is `int(np.sqrt(img_h/2) * fh)` for the height and similar for the width
# fh (and fw respectively) are equal to '3' by default
# Check [here](https://github.com/isl-org/ZoeDepth/blob/edb6daf45458569e24f50250ef1ed08c015f17a7/zoedepth/models/depth_model.py#L57)
# for the original implementation.
# In this section, we remove this padding to get the final depth image and depth prediction
padding_factor_h = padding_factor_w = 3
results = []
target_sizes = [None] * len(predicted_depth) if target_sizes is None else target_sizes
source_sizes = [None] * len(predicted_depth) if source_sizes is None else source_sizes
for depth, target_size, source_size in zip(predicted_depth, target_sizes, source_sizes):
# depth.shape = [1, H, W]
if source_size is not None:
pad_h = pad_w = 0
if do_remove_padding:
pad_h = int(np.sqrt(source_size[0] / 2) * padding_factor_h)
pad_w = int(np.sqrt(source_size[1] / 2) * padding_factor_w)
depth = F.resize(
depth,
size=[source_size[0] + 2 * pad_h, source_size[1] + 2 * pad_w],
interpolation=F.InterpolationMode.BICUBIC,
antialias=False,
)
if pad_h > 0:
depth = depth[:, pad_h:-pad_h, :]
if pad_w > 0:
depth = depth[:, :, pad_w:-pad_w]
if target_size is not None:
target_size = [target_size[0], target_size[1]]
depth = F.resize(
depth,
size=target_size,
interpolation=F.InterpolationMode.BICUBIC,
antialias=False,
)
depth = depth.squeeze(0)
# depth.shape = [H, W]
results.append({"predicted_depth": depth})
return results
__all__ = ["ZoeDepthImageProcessorFast"]
|
ZoeDepthImageProcessorFast
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/comparison/test_cond_format06.py
|
{
"start": 315,
"end": 1280
}
|
class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("cond_format06.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with conditional formatting."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
format1 = workbook.add_format(
{"pattern": 15, "fg_color": "#FF0000", "bg_color": "#FFFF00"}
)
worksheet.write("A1", 10)
worksheet.write("A2", 20)
worksheet.write("A3", 30)
worksheet.write("A4", 40)
worksheet.conditional_format(
"A1",
{
"type": "cell",
"format": format1,
"criteria": ">",
"value": 7,
},
)
workbook.close()
self.assertExcelEqual()
|
TestCompareXLSXFiles
|
python
|
apache__airflow
|
providers/google/tests/unit/google/cloud/hooks/test_cloud_run.py
|
{
"start": 10286,
"end": 10956
}
|
class ____:
@pytest.mark.asyncio
async def test_get_operation(self):
hook = CloudRunAsyncHook()
hook.get_conn = mock.AsyncMock()
await hook.get_operation(operation_name=OPERATION_NAME)
hook.get_conn.return_value.get_operation.assert_called_once_with(
operations_pb2.GetOperationRequest(name=OPERATION_NAME), timeout=120
)
def mock_get_operation(self, expected_operation):
get_operation_mock = mock.AsyncMock()
get_operation_mock.return_value = expected_operation
return get_operation_mock
def _dummy_get_credentials(self):
pass
@pytest.mark.db_test
|
TestCloudRunAsyncHook
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/typeNarrowingTypedDict2.py
|
{
"start": 1380,
"end": 1436
}
|
class ____(TypedDict):
name: Literal["A"]
a: str
|
A
|
python
|
pytorch__pytorch
|
torch/_inductor/template_heuristics/cutedsl.py
|
{
"start": 339,
"end": 4453
}
|
class ____:
TILE_M: int = 128
TILE_N: int = 192
CLUSTER_M: int = 2
CLUSTER_N: int = 1
USE_2_CTA: bool = False
TENSORMAP_UPDATE_MODE: TensorMapUpdateMode = TensorMapUpdateMode.SMEM
def get_exhaustive_groupgemm_configs() -> list[CuTeGemmConfig]:
"""
Returns the exhaustive configuration set for the Blackwell CuTeDSL Grouped GEMM kernel.
For information regarding valid config sets, see:
https://github.com/NVIDIA/cutlass/blob/main/examples/python/CuTeDSL/blackwell/grouped_gemm.py
"""
# Tile_n is always the same regardless of 2cta
tile_n_vals = [32, 64, 96, 128, 160, 192, 224, 256]
# Valid clusters
clusters_no_2cta = [
(1, 1),
(1, 2),
(1, 4),
(1, 8),
(1, 16),
(2, 1),
(2, 2),
(2, 4),
(2, 8),
(4, 1),
(4, 2),
(4, 4),
(8, 1),
(8, 2),
(16, 1),
]
clusters_2cta = [
(2, 1),
(2, 2),
(2, 4),
(2, 8),
(4, 1),
(4, 2),
(4, 4),
(8, 1),
(8, 2),
(16, 1),
]
configs: list[CuTeGemmConfig] = []
for use_2cta, cluster_set, tile_m_range in [
(False, clusters_no_2cta, [64, 128]),
(True, clusters_2cta, [128, 256]),
]:
for tensormap_update_mode, tile_m, tile_n, (cluster_m, cluster_n) in product(
[TensorMapUpdateMode.SMEM, TensorMapUpdateMode.GMEM],
tile_m_range,
tile_n_vals,
cluster_set,
):
configs.append(
CuTeGemmConfig(
tile_m,
tile_n,
cluster_m,
cluster_n,
USE_2_CTA=use_2cta,
TENSORMAP_UPDATE_MODE=tensormap_update_mode,
)
)
return configs
def get_default_groupgemm_configs() -> list[CuTeGemmConfig]:
"""
Returns the default configuration set for the Blackwell CuTeDSL Grouped GEMM kernel.
"""
config_tuples = [
(128, 256, 2, 1, False, TensorMapUpdateMode.SMEM),
(256, 160, 2, 1, True, TensorMapUpdateMode.GMEM),
(256, 256, 2, 1, True, TensorMapUpdateMode.GMEM),
(64, 32, 1, 1, False, TensorMapUpdateMode.GMEM),
(64, 256, 1, 2, False, TensorMapUpdateMode.SMEM),
(128, 256, 1, 2, False, TensorMapUpdateMode.SMEM),
(256, 256, 2, 2, True, TensorMapUpdateMode.GMEM),
(128, 256, 1, 2, False, TensorMapUpdateMode.GMEM),
(64, 32, 1, 1, False, TensorMapUpdateMode.SMEM),
(256, 256, 2, 1, True, TensorMapUpdateMode.SMEM),
(128, 256, 1, 1, False, TensorMapUpdateMode.GMEM),
(256, 256, 8, 1, True, TensorMapUpdateMode.GMEM),
(64, 32, 1, 2, False, TensorMapUpdateMode.SMEM),
(256, 192, 2, 1, True, TensorMapUpdateMode.GMEM),
(256, 256, 2, 2, True, TensorMapUpdateMode.SMEM),
(128, 96, 1, 2, False, TensorMapUpdateMode.SMEM),
(64, 192, 1, 1, False, TensorMapUpdateMode.SMEM),
(64, 64, 1, 1, False, TensorMapUpdateMode.GMEM),
(64, 192, 1, 1, False, TensorMapUpdateMode.GMEM),
(128, 64, 1, 1, False, TensorMapUpdateMode.GMEM),
(64, 160, 1, 1, False, TensorMapUpdateMode.GMEM),
(64, 256, 1, 1, False, TensorMapUpdateMode.GMEM),
]
return [CuTeGemmConfig(*args) for args in config_tuples]
def get_groupgemm_configs() -> list[CuTeGemmConfig]:
"""
Returns the configuration set for the Blackwell CuTeDSL Grouped GEMM kernel.
Note: CuTeDSL autotuning is still experimental — enabling it may trigger kernel launch failures
or unstable results. By default, autotuning is disabled and we return only
a single baseline config.
"""
if (
config.cutedsl_enable_autotuning
and config.max_autotune_gemm_search_space == "EXHAUSTIVE"
):
return get_exhaustive_groupgemm_configs()
elif config.cutedsl_enable_autotuning:
return get_default_groupgemm_configs()
else:
return [get_default_groupgemm_configs()[0]]
|
CuTeGemmConfig
|
python
|
docker__docker-py
|
docker/types/services.py
|
{
"start": 19384,
"end": 19530
}
|
class ____:
_values = (
'none',
'on-failure',
'any',
)
NONE, ON_FAILURE, ANY = _values
|
RestartConditionTypesEnum
|
python
|
huggingface__transformers
|
src/transformers/models/switch_transformers/modular_switch_transformers.py
|
{
"start": 7865,
"end": 7932
}
|
class ____(T5DenseActDense):
pass
|
SwitchTransformersDenseActDense
|
python
|
matplotlib__matplotlib
|
lib/matplotlib/_mathtext.py
|
{
"start": 54188,
"end": 55615
}
|
class ____(Hlist):
"""
A character as close to the given height and depth as possible.
When using a font with multiple height versions of some characters (such as
the BaKoMa fonts), the correct glyph will be selected, otherwise this will
always just return a scaled version of the glyph.
"""
def __init__(self, c: str, height: float, depth: float, state: ParserState,
always: bool = False, factor: float | None = None):
alternatives = state.fontset.get_sized_alternatives_for_symbol(state.font, c)
x_height = state.fontset.get_xheight(state.font, state.fontsize, state.dpi)
state = state.copy()
target_total = height + depth
for fontname, sym in alternatives:
state.font = fontname
char = Char(sym, state)
# Ensure that size 0 is chosen when the text is regular sized but
# with descender glyphs by subtracting 0.2 * x_height
if char.height + char.depth >= target_total - 0.2 * x_height:
break
shift = 0.0
if state.font != 0 or len(alternatives) == 1:
if factor is None:
factor = target_total / (char.height + char.depth)
state.fontsize *= factor
char = Char(sym, state)
shift = (depth - char.depth)
super().__init__([char])
self.shift_amount = shift
|
AutoHeightChar
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/comparison/test_optimize04.py
|
{
"start": 315,
"end": 1089
}
|
class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("optimize04.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(
self.got_filename, {"constant_memory": True, "in_memory": False}
)
worksheet = workbook.add_worksheet()
bold = workbook.add_format({"bold": 1})
italic = workbook.add_format({"italic": 1})
worksheet.write("A1", "Foo", bold)
worksheet.write("A2", "Bar", italic)
worksheet.write_rich_string("A3", "a", bold, "bc", "defg")
workbook.close()
self.assertExcelEqual()
|
TestCompareXLSXFiles
|
python
|
huggingface__transformers
|
src/transformers/models/got_ocr2/modeling_got_ocr2.py
|
{
"start": 1911,
"end": 2427
}
|
class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.lin1 = nn.Linear(config.hidden_size, config.mlp_dim)
self.lin2 = nn.Linear(config.mlp_dim, config.hidden_size)
self.act = ACT2FN[config.hidden_act]
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.lin1(hidden_states)
hidden_states = self.act(hidden_states)
hidden_states = self.lin2(hidden_states)
return hidden_states
|
GotOcr2MLPBlock
|
python
|
celery__celery
|
t/unit/conftest.py
|
{
"start": 15985,
"end": 22727
}
|
class ____:
def __init__(self, monkeypatch, request):
self.monkeypatch = monkeypatch
self.request = request
def __getattr__(self, name):
return getattr(self.monkeypatch, name)
def __call__(self, path, value=SENTINEL, name=None,
new=MagicMock, **kwargs):
value = self._value_or_mock(value, new, name, path, **kwargs)
self.monkeypatch.setattr(path, value)
return value
def object(self, target, attribute, *args, **kwargs):
return _wrap_context(
patch.object(target, attribute, *args, **kwargs),
self.request)
def _value_or_mock(self, value, new, name, path, **kwargs):
if value is SENTINEL:
value = new(name=name or path.rpartition('.')[2])
for k, v in kwargs.items():
setattr(value, k, v)
return value
def setattr(self, target, name=SENTINEL, value=SENTINEL, **kwargs):
# alias to __call__ with the interface of pytest.monkeypatch.setattr
if value is SENTINEL:
value, name = name, None
return self(target, value, name=name)
def setitem(self, dic, name, value=SENTINEL, new=MagicMock, **kwargs):
# same as pytest.monkeypatch.setattr but default value is MagicMock
value = self._value_or_mock(value, new, name, dic, **kwargs)
self.monkeypatch.setitem(dic, name, value)
return value
def modules(self, *mods):
modules = []
for mod in mods:
mod = mod.split('.')
modules.extend(reversed([
'.'.join(mod[:-i] if i else mod) for i in range(len(mod))
]))
modules = sorted(set(modules))
return _wrap_context(module_context_manager(*modules), self.request)
def _wrap_context(context, request):
ret = context.__enter__()
def fin():
context.__exit__(*sys.exc_info())
request.addfinalizer(fin)
return ret
@pytest.fixture()
def patching(monkeypatch, request):
"""Monkeypath.setattr shortcut.
Example:
.. code-block:: python
>>> def test_foo(patching):
>>> # execv value here will be mock.MagicMock by default.
>>> execv = patching('os.execv')
>>> patching('sys.platform', 'darwin') # set concrete value
>>> patching.setenv('DJANGO_SETTINGS_MODULE', 'x.settings')
>>> # val will be of type mock.MagicMock by default
>>> val = patching.setitem('path.to.dict', 'KEY')
"""
return _patching(monkeypatch, request)
@contextmanager
def stdouts():
"""Override `sys.stdout` and `sys.stderr` with `StringIO`
instances.
>>> with conftest.stdouts() as (stdout, stderr):
... something()
... self.assertIn('foo', stdout.getvalue())
"""
prev_out, prev_err = sys.stdout, sys.stderr
prev_rout, prev_rerr = sys.__stdout__, sys.__stderr__
mystdout, mystderr = WhateverIO(), WhateverIO()
sys.stdout = sys.__stdout__ = mystdout
sys.stderr = sys.__stderr__ = mystderr
try:
yield mystdout, mystderr
finally:
sys.stdout = prev_out
sys.stderr = prev_err
sys.__stdout__ = prev_rout
sys.__stderr__ = prev_rerr
@contextmanager
def reset_modules(*modules):
"""Remove modules from :data:`sys.modules` by name,
and reset back again when the test/context returns.
Example::
>>> with conftest.reset_modules('celery.result', 'celery.app.base'):
... pass
"""
prev = {
k: sys.modules.pop(k) for k in modules if k in sys.modules
}
try:
for k in modules:
reload(import_module(k))
yield
finally:
sys.modules.update(prev)
def get_logger_handlers(logger):
return [
h for h in logger.handlers
if not isinstance(h, logging.NullHandler)
]
@contextmanager
def wrap_logger(logger, loglevel=logging.ERROR):
"""Wrap :class:`logging.Logger` with a StringIO() handler.
yields a StringIO handle.
Example::
>>> with conftest.wrap_logger(logger, loglevel=logging.DEBUG) as sio:
... ...
... sio.getvalue()
"""
old_handlers = get_logger_handlers(logger)
sio = WhateverIO()
siohandler = logging.StreamHandler(sio)
logger.handlers = [siohandler]
try:
yield sio
finally:
logger.handlers = old_handlers
@contextmanager
def _mock_context(mock):
context = mock.return_value = Mock()
context.__enter__ = Mock()
context.__exit__ = Mock()
def on_exit(*x):
if x[0]:
raise x[0] from x[1]
context.__exit__.side_effect = on_exit
context.__enter__.return_value = context
try:
yield context
finally:
context.reset()
@contextmanager
def open(side_effect=None):
"""Patch builtins.open so that it returns StringIO object.
:param side_effect: Additional side effect for when the open context
is entered.
Example::
>>> with mock.open(io.BytesIO) as open_fh:
... something_opening_and_writing_bytes_to_a_file()
... self.assertIn(b'foo', open_fh.getvalue())
"""
with patch('builtins.open') as open_:
with _mock_context(open_) as context:
if side_effect is not None:
context.__enter__.side_effect = side_effect
val = context.__enter__.return_value = WhateverIO()
val.__exit__ = Mock()
yield val
@contextmanager
def module_exists(*modules):
"""Patch one or more modules to ensure they exist.
A module name with multiple paths (e.g. gevent.monkey) will
ensure all parent modules are also patched (``gevent`` +
``gevent.monkey``).
Example::
>>> with conftest.module_exists('gevent.monkey'):
... gevent.monkey.patch_all = Mock(name='patch_all')
... ...
"""
gen = []
old_modules = []
for module in modules:
if isinstance(module, str):
module = types.ModuleType(module)
gen.append(module)
if module.__name__ in sys.modules:
old_modules.append(sys.modules[module.__name__])
sys.modules[module.__name__] = module
name = module.__name__
if '.' in name:
parent, _, attr = name.rpartition('.')
setattr(sys.modules[parent], attr, module)
try:
yield
finally:
for module in gen:
sys.modules.pop(module.__name__, None)
for module in old_modules:
sys.modules[module.__name__] = module
def _bind(f, o):
@wraps(f)
def bound_meth(*fargs, **fkwargs):
return f(o, *fargs, **fkwargs)
return bound_meth
|
_patching
|
python
|
huggingface__transformers
|
src/transformers/models/phi4_multimodal/modular_phi4_multimodal.py
|
{
"start": 24624,
"end": 26895
}
|
class ____(SiglipPreTrainedModel):
config: Phi4MultimodalVisionConfig
base_model_prefix = "phi4_vision"
input_modalities = ("image",)
supports_gradient_checkpointing = True
_no_split_modules = ["Phi4MultimodalVisionEncoderLayer"]
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_can_record_outputs = {
"hidden_states": Phi4MultimodalVisionEncoderLayer,
"attentions": Phi4MultimodalVisionAttention,
}
@torch.no_grad()
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, Phi4MultimodalVisionEmbeddings):
width = (
self.config.hidden_size
if isinstance(self.config, Phi4MultimodalVisionConfig)
else self.config.hidden_size
)
init.normal_(module.position_embedding.weight, std=1 / np.sqrt(width))
elif isinstance(module, nn.Embedding):
default_flax_embed_init(module.weight)
elif isinstance(module, Phi4MultimodalVisionAttention):
init.normal_(module.q_proj.weight)
init.normal_(module.k_proj.weight)
init.normal_(module.v_proj.weight)
init.normal_(module.out_proj.weight)
init.zeros_(module.q_proj.bias)
init.zeros_(module.k_proj.bias)
init.zeros_(module.v_proj.bias)
init.zeros_(module.out_proj.bias)
elif isinstance(module, Phi4MultimodalVisionMLP):
init.normal_(module.fc1.weight)
init.normal_(module.fc2.weight)
init.normal_(module.fc1.bias, std=1e-6)
init.normal_(module.fc2.bias, std=1e-6)
elif isinstance(module, Phi4MultimodalVisionMultiheadAttentionPoolingHead):
init.normal_(module.probe)
init.normal_(module.attention.in_proj_weight)
init.zeros_(module.attention.in_proj_bias)
elif isinstance(module, (nn.Linear, nn.Conv2d)):
lecun_normal_(module.weight)
if module.bias is not None:
init.zeros_(module.bias)
elif isinstance(module, nn.LayerNorm):
init.zeros_(module.bias)
init.ones_(module.weight)
|
Phi4MultimodalVisionPreTrainedModel
|
python
|
walkccc__LeetCode
|
solutions/925. Long Pressed Name/925.py
|
{
"start": 0,
"end": 267
}
|
class ____:
def isLongPressedName(self, name: str, typed: str) -> bool:
i = 0
for j, t in enumerate(typed):
if i < len(name) and name[i] == t:
i += 1
elif j == 0 or t != typed[j - 1]:
return False
return i == len(name)
|
Solution
|
python
|
getsentry__sentry
|
src/sentry/types/grouphash_metadata.py
|
{
"start": 4296,
"end": 4465
}
|
class ____(TemplateHashingMetadata, FingerprintHashingMetadata):
"""
Data from template-based bybrid fingerprinting
"""
pass
|
SaltedTemplateHashingMetadata
|
python
|
astropy__astropy
|
astropy/modeling/functional_models.py
|
{
"start": 90380,
"end": 92621
}
|
class ____(Fittable2DModel):
"""
Two dimensional Ricker Wavelet model (sometimes known as a "Mexican Hat"
model).
.. note::
See https://github.com/astropy/astropy/pull/9445 for discussions
related to renaming of this model.
Parameters
----------
amplitude : float
Amplitude
x_0 : float
x position of the peak
y_0 : float
y position of the peak
sigma : float
Width of the Ricker wavelet
See Also
--------
RickerWavelet1D, Gaussian2D
Notes
-----
Model formula:
.. math::
f(x, y) = A \\left(1 - \\frac{\\left(x - x_{0}\\right)^{2}
+ \\left(y - y_{0}\\right)^{2}}{\\sigma^{2}}\\right)
e^{\\frac{- \\left(x - x_{0}\\right)^{2}
- \\left(y - y_{0}\\right)^{2}}{2 \\sigma^{2}}}
"""
amplitude = Parameter(default=1, description="Amplitude (peak) value")
x_0 = Parameter(default=0, description="X position of the peak")
y_0 = Parameter(default=0, description="Y position of the peak")
sigma = Parameter(default=1, description="Width of the Ricker wavelet")
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, sigma):
"""Two dimensional Ricker Wavelet model function."""
rr_ww = ((x - x_0) ** 2 + (y - y_0) ** 2) / (2 * sigma**2)
return amplitude * (1 - rr_ww) * np.exp(-rr_ww)
@property
def input_units(self):
if self.x_0.input_unit is None:
return None
return {
self.inputs[0]: self.x_0.input_unit,
self.inputs[1]: self.y_0.input_unit,
}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {
"x_0": inputs_unit[self.inputs[0]],
"y_0": inputs_unit[self.inputs[0]],
"sigma": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
|
RickerWavelet2D
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/engine/cursor.py
|
{
"start": 50232,
"end": 52567
}
|
class ____(CursorFetchStrategy):
"""A cursor strategy that buffers rows fully upon creation.
Used for operations where a result is to be delivered
after the database conversation can not be continued,
such as MSSQL INSERT...OUTPUT after an autocommit.
"""
__slots__ = ("_rowbuffer", "alternate_cursor_description")
def __init__(
self,
dbapi_cursor: Optional[DBAPICursor],
alternate_description: Optional[_DBAPICursorDescription] = None,
initial_buffer: Optional[Iterable[Any]] = None,
):
self.alternate_cursor_description = alternate_description
if initial_buffer is not None:
self._rowbuffer = collections.deque(initial_buffer)
else:
assert dbapi_cursor is not None
self._rowbuffer = collections.deque(dbapi_cursor.fetchall())
def yield_per(
self, result: CursorResult[Any], dbapi_cursor: DBAPICursor, num: int
) -> Any:
pass
def soft_close(
self, result: CursorResult[Any], dbapi_cursor: Optional[DBAPICursor]
) -> None:
self._rowbuffer.clear()
super().soft_close(result, dbapi_cursor)
def hard_close(
self, result: CursorResult[Any], dbapi_cursor: Optional[DBAPICursor]
) -> None:
self._rowbuffer.clear()
super().hard_close(result, dbapi_cursor)
def fetchone(
self,
result: CursorResult[Any],
dbapi_cursor: DBAPICursor,
hard_close: bool = False,
) -> Any:
if self._rowbuffer:
return self._rowbuffer.popleft()
else:
result._soft_close(hard=hard_close)
return None
def fetchmany(
self,
result: CursorResult[Any],
dbapi_cursor: DBAPICursor,
size: Optional[int] = None,
) -> Any:
if size is None:
return self.fetchall(result, dbapi_cursor)
rb = self._rowbuffer
rows = [rb.popleft() for _ in range(min(size, len(rb)))]
if not rows:
result._soft_close()
return rows
def fetchall(
self, result: CursorResult[Any], dbapi_cursor: DBAPICursor
) -> Any:
ret = self._rowbuffer
self._rowbuffer = collections.deque()
result._soft_close()
return ret
|
FullyBufferedCursorFetchStrategy
|
python
|
ansible__ansible
|
lib/ansible/module_utils/_internal/_patches/_socket_patch.py
|
{
"start": 160,
"end": 252
}
|
class ____(int):
"""Wrapper around `int` to test if subclasses are accepted."""
|
_CustomInt
|
python
|
dagster-io__dagster
|
python_modules/automation/automation_tests/dagster_docs_tests/test_exclude_lists_audit.py
|
{
"start": 9340,
"end": 11540
}
|
class ____:
"""Test that audit functions call the validator with correct parameters."""
def test_audit_missing_public_calls_validator_correctly(self):
"""Test that _audit_exclude_missing_public calls validator with correct parameters."""
mock_validator = Mock()
mock_validator.find_public_symbols.return_value = []
with patch("automation.dagster_docs.commands.check.EXCLUDE_MISSING_PUBLIC", set()):
with patch(
"automation.dagster_docs.commands.check.EXCLUDE_MODULES_FROM_PUBLIC_SCAN",
{"excluded.module"},
):
_audit_exclude_missing_public(mock_validator)
mock_validator.find_public_symbols.assert_called_once_with(
exclude_modules={"excluded.module"}
)
def test_audit_missing_rst_calls_validator_correctly(self):
"""Test that _audit_exclude_missing_rst calls validator with correct parameters."""
mock_validator = Mock()
mock_validator.find_rst_documented_symbols.return_value = []
with patch("automation.dagster_docs.commands.check.EXCLUDE_MISSING_RST", set()):
with patch(
"automation.dagster_docs.commands.check.EXCLUDE_RST_FILES", {"excluded.rst"}
):
_audit_exclude_missing_rst(mock_validator)
mock_validator.find_rst_documented_symbols.assert_called_once_with(
exclude_files={"excluded.rst"}
)
def test_audit_missing_export_calls_validator_correctly(self):
"""Test that _audit_exclude_missing_export calls validator with correct parameters."""
mock_validator = Mock()
mock_validator.find_public_symbols.return_value = []
with patch("automation.dagster_docs.commands.check.EXCLUDE_MISSING_EXPORT", set()):
with patch(
"automation.dagster_docs.commands.check.EXCLUDE_MODULES_FROM_PUBLIC_SCAN",
{"excluded.module"},
):
_audit_exclude_missing_export(mock_validator)
mock_validator.find_public_symbols.assert_called_once_with(
exclude_modules={"excluded.module"}
)
|
TestAuditFunctionsCallValidator
|
python
|
redis__redis-py
|
redis/asyncio/multidb/failover.py
|
{
"start": 1276,
"end": 1833
}
|
class ____(AsyncFailoverStrategy):
"""
Failover strategy based on database weights.
"""
def __init__(self):
self._databases = WeightedList()
async def database(self) -> AsyncDatabase:
for database, _ in self._databases:
if database.circuit.state == CBState.CLOSED:
return database
raise NoValidDatabaseException("No valid database available for communication")
def set_databases(self, databases: Databases) -> None:
self._databases = databases
|
WeightBasedFailoverStrategy
|
python
|
huggingface__transformers
|
tests/models/umt5/test_modeling_umt5.py
|
{
"start": 20988,
"end": 24315
}
|
class ____(unittest.TestCase):
@slow
@unittest.skip(
"Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged"
)
def test_small_integration_test(self):
"""
For comparison run the kaggle notebook available here : https://www.kaggle.com/arthurzucker/umt5-inference
"""
model = UMT5ForConditionalGeneration.from_pretrained("google/umt5-small", return_dict=True).to(torch_device)
tokenizer = AutoTokenizer.from_pretrained("google/umt5-small", use_fast=False, legacy=False)
input_text = [
"Bonjour monsieur <extra_id_0> bien <extra_id_1>.",
"No se como puedo <extra_id_0>.",
"This is the reason why we <extra_id_0> them.",
"The <extra_id_0> walks in <extra_id_1>, seats",
"A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.",
]
input_ids = tokenizer(input_text, return_tensors="pt", padding=True).input_ids
# fmt: off
EXPECTED_IDS = torch.tensor(
[
[ 38530, 210703, 256299, 1410, 256298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25922, 256299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 19014, 10620, 758, 256299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256299, 14869, 281, 301, 256298, 275, 119983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256299, 14869, 281, 2234, 289, 2275, 333,61391, 289, 256298, 543, 256297, 168714, 329, 256296,274, 1],
]
)
# fmt: on
torch.testing.assert_close(input_ids, EXPECTED_IDS)
generated_ids = model.generate(input_ids.to(torch_device))
EXPECTED_FILLING = [
"<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>",
"<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
]
filling = tokenizer.batch_decode(generated_ids)
self.assertEqual(filling, EXPECTED_FILLING)
|
Umt5IntegrationTest
|
python
|
doocs__leetcode
|
solution/3300-3399/3381.Maximum Subarray Sum With Length Divisible by K/Solution.py
|
{
"start": 0,
"end": 297
}
|
class ____:
def maxSubarraySum(self, nums: List[int], k: int) -> int:
f = [inf] * k
ans = -inf
s = f[-1] = 0
for i, x in enumerate(nums):
s += x
ans = max(ans, s - f[i % k])
f[i % k] = min(f[i % k], s)
return ans
|
Solution
|
python
|
ansible__ansible
|
lib/ansible/executor/module_common.py
|
{
"start": 41211,
"end": 41550
}
|
class ____:
"""Payload required to execute an Ansible module, along with information required to do so."""
b_module_data: bytes
module_style: t.Literal['binary', 'new', 'non_native_want_json', 'old']
shebang: str | None
serialization_profile: str
@dataclasses.dataclass(kw_only=True, slots=True, frozen=True)
|
_BuiltModule
|
python
|
tensorflow__tensorflow
|
tensorflow/python/tpu/feature_column_v2_test.py
|
{
"start": 2576,
"end": 8005
}
|
class ____(test.TestCase, parameterized.TestCase):
def test_defaults(self):
categorical_column = fc_lib.categorical_column_with_identity(
key='aaa', num_buckets=3)
embedding_dimension = 2
embedding_column = tpu_fc.embedding_column_v2(
categorical_column, dimension=embedding_dimension)
# Can't test default initializer as it's a random function.
self.assertIs(categorical_column, embedding_column.categorical_column)
self.assertEqual(embedding_dimension, embedding_column.dimension)
self.assertEqual('mean', embedding_column.combiner)
self.assertEqual('aaa_embedding', embedding_column.name)
self.assertEqual((embedding_dimension,), embedding_column.variable_shape)
def test_all_constructor_args(self):
categorical_column = fc_lib.categorical_column_with_identity(
key='aaa', num_buckets=3)
embedding_dimension = 2
embedding_column = tpu_fc.embedding_column_v2(
categorical_column,
dimension=embedding_dimension,
combiner='my_combiner',
initializer=lambda: 'my_initializer')
self.assertIs(categorical_column, embedding_column.categorical_column)
self.assertEqual(embedding_dimension, embedding_column.dimension)
self.assertEqual('my_combiner', embedding_column.combiner)
self.assertEqual('my_initializer', embedding_column.initializer())
self.assertEqual('aaa_embedding', embedding_column.name)
self.assertEqual((embedding_dimension,), embedding_column.variable_shape)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int64)
}, embedding_column._parse_example_spec)
def test_deepcopy(self):
categorical_column = fc_lib.categorical_column_with_identity(
key='aaa', num_buckets=3)
embedding_column = tpu_fc.embedding_column_v2(
categorical_column, dimension=2)
embedding_column_copy = copy.deepcopy(embedding_column)
self.assertEqual(embedding_column.dimension,
embedding_column_copy.dimension)
self.assertEqual(embedding_column._max_sequence_length,
embedding_column_copy._max_sequence_length)
def test_with_scope_validation(self):
categorical_column = fc_lib.categorical_column_with_identity(
key='aaa', num_buckets=3)
embedding_dimension = 2
initializer = init_ops.truncated_normal_initializer(mean=0.0, stddev=.5)
embedding_column = tpu_fc._TPUEmbeddingColumnV2(
categorical_column=categorical_column,
dimension=embedding_dimension,
combiner='mean',
initializer=initializer,
max_sequence_length=0,
learning_rate_fn=None,
use_safe_embedding_lookup=True,
bypass_scope_validation=False)
self.assertIs(categorical_column, embedding_column.categorical_column)
self.assertEqual(embedding_dimension, embedding_column.dimension)
state_manager = _TestStateManager()
with tpu_function.tpu_shard_context(1):
with variable_scope.variable_scope('tower1/scope1'):
embedding_column.create_state(state_manager)
with variable_scope.variable_scope('tower2/scope2'):
# With default scope validation, the same column cannot be used in a new
# variable scope.
with self.assertRaisesRegex(ValueError,
'the variable scope name is different'):
embedding_column.create_state(state_manager)
def test_bypass_scope_validation(self):
categorical_column = fc_lib.categorical_column_with_identity(
key='aaa', num_buckets=3)
embedding_dimension = 2
initializer = init_ops.truncated_normal_initializer(mean=0.0, stddev=.5)
embedding_column = tpu_fc._TPUEmbeddingColumnV2(
categorical_column=categorical_column,
dimension=embedding_dimension,
combiner='mean',
initializer=initializer,
max_sequence_length=0,
learning_rate_fn=None,
use_safe_embedding_lookup=True,
bypass_scope_validation=True)
self.assertIs(categorical_column, embedding_column.categorical_column)
self.assertEqual(embedding_dimension, embedding_column.dimension)
state_manager = _TestStateManager()
with tpu_function.tpu_shard_context(1):
with variable_scope.variable_scope('tower1/scope1'):
embedding_column.create_state(state_manager)
with variable_scope.variable_scope('tower2/scope2'):
embedding_column.create_state(state_manager)
def test_deepcopy_with_bypass_scope_validation(self):
categorical_column = fc_lib.categorical_column_with_identity(
key='aaa', num_buckets=3)
embedding_dimension = 2
initializer = init_ops.truncated_normal_initializer(mean=0.0, stddev=.5)
embedding_column = tpu_fc._TPUEmbeddingColumnV2(
categorical_column=categorical_column,
dimension=embedding_dimension,
combiner='mean',
initializer=initializer,
max_sequence_length=0,
use_safe_embedding_lookup=False,
bypass_scope_validation=True)
embedding_column_copy = copy.deepcopy(embedding_column)
self.assertEqual(embedding_dimension, embedding_column_copy.dimension)
self.assertEqual(embedding_column._max_sequence_length,
embedding_column_copy._max_sequence_length)
self.assertTrue(embedding_column_copy._bypass_scope_validation)
self.assertFalse(embedding_column_copy.use_safe_embedding_lookup)
|
EmbeddingColumnTestV2
|
python
|
wandb__wandb
|
tools/local_wandb_server.py
|
{
"start": 7757,
"end": 7896
}
|
class ____(pydantic.BaseModel):
servers: dict[str, _ServerInfo] = {}
"""Map from server names to information about them."""
|
_InfoFile
|
python
|
pytorch__pytorch
|
test/test_custom_ops.py
|
{
"start": 169603,
"end": 176476
}
|
class ____(TestCase):
def get_sample_op_profile(self, opname) -> dict[str, set[OpProfile]]:
return {
opname: {
OpProfile(
args_profile=(
TensorMetadata(
rank=2,
dtype=torch.float32,
device=torch.device("cpu"),
layout=torch.strided,
),
TensorMetadata(
rank=2,
dtype=torch.float32,
device=torch.device("cpu"),
layout=torch.strided,
),
),
out_profile=TensorMetadata(
rank=2,
dtype=torch.float32,
device=torch.device("cpu"),
layout=torch.strided,
),
)
}
}
def test_fake_registration(self):
fm = torch._subclasses.FakeTensorMode(
shape_env=ShapeEnv(allow_dynamic_output_shape_ops=True)
)
t1 = fm.from_tensor(torch.ones(3, 3))
t2 = fm.from_tensor(torch.ones(3, 3))
op_profiles = self.get_sample_op_profile("mylib.foo2.default")
with torch.library._scoped_library("mylib", "FRAGMENT") as lib:
torch.library.define(
"mylib::foo2",
"(Tensor a, Tensor b) -> Tensor",
tags=torch.Tag.pt2_compliant_tag,
lib=lib,
)
@torch.library.impl("mylib::foo2", "cpu", lib=lib)
def foo_impl(a, b):
return a + b
with (
self.assertRaisesRegex(
torch._subclasses.fake_tensor.UnsupportedOperatorException,
"mylib.foo2.default",
),
fm,
):
torch.ops.mylib.foo2(t1, t2)
with (
torch._library.fake_profile.unsafe_generate_fake_kernels(op_profiles),
fm,
):
torch.ops.mylib.foo2(t1, t2)
with self.assertRaisesRegex(MissingOpProfile, "mylib::foo2"):
torch.ops.mylib.foo2(torch.ones(3, 3, 3), torch.ones(3, 3, 3))
with (
self.assertRaisesRegex(
torch._subclasses.fake_tensor.UnsupportedOperatorException,
"mylib.foo2.default",
),
fm,
):
torch.ops.mylib.foo2(t1, t2)
def test_duplicate_registration_impl(self):
fm = torch._subclasses.FakeTensorMode(
shape_env=ShapeEnv(allow_dynamic_output_shape_ops=True)
)
t1 = fm.from_tensor(torch.ones(3, 3))
t2 = fm.from_tensor(torch.ones(3, 3))
op_profiles = self.get_sample_op_profile("mylib.foo3.default")
with torch.library._scoped_library("mylib", "FRAGMENT") as lib:
torch.library.define(
"mylib::foo3",
"(Tensor a, Tensor b) -> Tensor",
tags=torch.Tag.pt2_compliant_tag,
lib=lib,
)
@torch.library.impl("mylib::foo3", "cpu", lib=lib)
def foo3_impl(a, b):
return a + b
@torch.library.register_fake("mylib::foo3", lib=lib)
def foo3_impl_fake(a, b):
return (a + b).to(dtype=torch.bfloat16)
with fm:
self.assertEqual(torch.ops.mylib.foo3(t1, t2).dtype, torch.bfloat16)
with torch._library.fake_profile.unsafe_generate_fake_kernels(op_profiles):
with fm:
self.assertEqual(torch.ops.mylib.foo3(t1, t2).dtype, torch.float32)
with fm:
self.assertEqual(torch.ops.mylib.foo3(t1, t2).dtype, torch.bfloat16)
def test_duplicate_registration_custom_op(self):
fm = torch._subclasses.FakeTensorMode(
shape_env=ShapeEnv(allow_dynamic_output_shape_ops=True)
)
t1 = fm.from_tensor(torch.ones(3, 3))
t2 = fm.from_tensor(torch.ones(3, 3))
op_profiles = self.get_sample_op_profile("mylib.foo1.default")
@torch.library.custom_op("mylib::foo1", mutates_args=())
def foo_impl(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
return a + b
@torch.library.register_fake("mylib::foo1")
def foo_impl_fake(a, b):
return torch.empty_like(a, dtype=torch.bfloat16)
with fm:
self.assertEqual(torch.ops.mylib.foo1(t1, t2).dtype, torch.bfloat16)
with torch._library.fake_profile.unsafe_generate_fake_kernels(op_profiles):
with fm:
self.assertEqual(torch.ops.mylib.foo1(t1, t2).dtype, torch.float32)
with fm:
self.assertEqual(torch.ops.mylib.foo1(t1, t2).dtype, torch.bfloat16)
def test_yaml(self):
op_profiles = self.get_sample_op_profile("mylib.foo.default")
yaml_str = generate_yaml_from_profiles(op_profiles)
loaded = read_profiles_from_yaml(yaml_str)
self.assertEqual(op_profiles, loaded)
@unittest.skipIf(IS_WINDOWS, "Windows not supported for this test")
def test_save_to_file(self):
op_profile = self.get_sample_op_profile("mylib.foo.default")
# Saving with buffer
buffer = io.BytesIO()
save_op_profiles(op_profile, buffer)
buffer.seek(0)
loaded = load_op_profiles(buffer)
self.assertEqual(op_profile, loaded)
# Saving with file
with tempfile.NamedTemporaryFile() as f:
save_op_profiles(op_profile, f.name)
f.seek(0)
loaded = load_op_profiles(f.name)
self.assertEqual(op_profile, loaded)
# Saving with Path
with TemporaryFileName() as fname:
path = Path(fname)
save_op_profiles(op_profile, path)
loaded = load_op_profiles(path)
self.assertEqual(op_profile, loaded)
def test_version(self):
op_profiles = self.get_sample_op_profile("mylib.foo.default")
yaml_str = generate_yaml_from_profiles(op_profiles)
loaded = yaml.safe_load(yaml_str)
loaded["torch_version"] = "2.7"
yaml_str = yaml.dump(loaded, sort_keys=False)
with self.assertRaisesRegex(RuntimeError, "Unable to load outdated profile"):
loaded = read_profiles_from_yaml(yaml_str)
only_for = ("cpu", "cuda", "xpu")
instantiate_device_type_tests(
TestCustomOpTesting, globals(), only_for=only_for, allow_xpu=True
)
instantiate_parametrized_tests(TestCustomOp)
instantiate_parametrized_tests(TestCustomOpAPI)
if __name__ == "__main__":
run_tests()
|
TestOpProfiles
|
python
|
prompt-toolkit__python-prompt-toolkit
|
src/prompt_toolkit/completion/base.py
|
{
"start": 10745,
"end": 11053
}
|
class ____(Completer):
"""
A completer that doesn't return any completion.
"""
def get_completions(
self, document: Document, complete_event: CompleteEvent
) -> Iterable[Completion]:
return []
def __repr__(self) -> str:
return "DummyCompleter()"
|
DummyCompleter
|
python
|
tensorflow__tensorflow
|
tensorflow/python/types/core.py
|
{
"start": 2318,
"end": 2581
}
|
class ____(Tensor):
"""Tensor that can be associated with a value (aka "eager tensor").
These objects represent the (usually future) output of executing an op
immediately.
"""
def numpy(self):
pass
@tf_export("types.experimental.FunctionType")
|
Value
|
python
|
google__flatbuffers
|
python/flatbuffers/builder.py
|
{
"start": 1695,
"end": 1837
}
|
class ____(RuntimeError):
"""Error caused by causing a Builder to exceed the hardcoded limit of 2
gigabytes.
"""
pass
|
BuilderSizeError
|
python
|
getsentry__sentry
|
src/sentry/deletions/defaults/querysubscription.py
|
{
"start": 133,
"end": 1138
}
|
class ____(ModelDeletionTask[QuerySubscription]):
def delete_instance(self, instance: QuerySubscription) -> None:
from sentry.incidents.models.incident import Incident
# Clear the foreign key as the schema was created without a cascade clause
Incident.objects.filter(subscription_id=instance.id).update(subscription_id=None)
super().delete_instance(instance)
def get_child_relations(self, instance: QuerySubscription) -> list[BaseRelation]:
from sentry.incidents.models.alert_rule import AlertRule
from sentry.snuba.models import SnubaQuery
if not AlertRule.objects_with_snapshots.filter(
snuba_query_id=instance.snuba_query_id
).exists():
if (
QuerySubscription.objects.filter(snuba_query_id=instance.snuba_query_id).count()
== 1
):
return [ModelRelation(SnubaQuery, {"id": instance.snuba_query_id})]
return []
|
QuerySubscriptionDeletionTask
|
python
|
huggingface__transformers
|
src/transformers/models/roformer/modeling_roformer.py
|
{
"start": 39001,
"end": 43475
}
|
class ____(RoFormerPreTrainedModel, GenerationMixin):
_tied_weights_keys = {
"cls.predictions.decoder.bias": "cls.predictions.bias",
"cls.predictions.decoder.weight": "roformer.embeddings.word_embeddings.weight",
}
def __init__(self, config):
super().__init__(config)
if not config.is_decoder:
logger.warning("If you want to use `RoFormerForCausalLM` as a standalone, add `is_decoder=True.`")
self.roformer = RoFormerModel(config)
self.cls = RoFormerOnlyMLMHead(config)
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
self.cls.predictions.bias = new_embeddings.bias
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
past_key_values: Optional[Cache] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.Tensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs,
) -> Union[CausalLMOutputWithCrossAttentions, tuple[torch.Tensor]]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
`[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]`.
Example:
```python
>>> from transformers import AutoTokenizer, RoFormerForCausalLM, RoFormerConfig
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("junnyu/roformer_chinese_base")
>>> config = RoFormerConfig.from_pretrained("junnyu/roformer_chinese_base")
>>> config.is_decoder = True
>>> model = RoFormerForCausalLM.from_pretrained("junnyu/roformer_chinese_base", config=config)
>>> inputs = tokenizer("今天天气非常好。", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.logits
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.roformer(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
cache_position=cache_position,
)
hidden_states = outputs[0]
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.cls(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
if not return_dict:
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return CausalLMOutputWithCrossAttentions(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
|
RoFormerForCausalLM
|
python
|
wandb__wandb
|
wandb/sdk/lib/wb_logging.py
|
{
"start": 4139,
"end": 4896
}
|
class ____:
"""Filters out messages logged for a different run."""
def __init__(self, run_id: str) -> None:
"""Create a _RunIDFilter.
Args:
run_id: Allows messages when the run ID is this or None.
"""
self._run_id = run_id
def filter(self, record: logging.LogRecord) -> bool:
"""Modify a log record and return whether it matches the run."""
run_id = _run_id.get()
if run_id is None:
record.run_id_tag = " [no run ID]"
return True
elif isinstance(run_id, _NotRunSpecific):
record.run_id_tag = " [all runs]"
return True
else:
record.run_id_tag = ""
return run_id == self._run_id
|
_RunIDFilter
|
python
|
getsentry__sentry
|
src/sentry/sentry_apps/metrics.py
|
{
"start": 1310,
"end": 1798
}
|
class ____(StrEnum):
"""Reasons why sentry app webhooks can fail"""
# Preparation fail
MISSING_SENTRY_APP = "missing_sentry_app"
MISSING_INSTALLATION = "missing_installation"
MISSING_EVENT = "missing_event"
INVALID_EVENT = "invalid_event"
MISSING_SERVICEHOOK = "missing_servicehook"
EVENT_NOT_IN_SERVCEHOOK = "event_not_in_servicehook"
MISSING_ISSUE_OCCURRENCE = "missing_issue_occurrence"
MISSING_USER = "missing_user"
|
SentryAppWebhookFailureReason
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.