language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | davidhalter__jedi | test/completion/async_.py | {
"start": 391,
"end": 529
} | class ____():
@staticmethod
async def b(c=1, d=2):
return 1
#! 9 ['def b']
await A.b()
#! 11 ['param d=2']
await A.b(d=3)
| A |
python | getsentry__sentry | tests/snuba/api/endpoints/test_organization_events.py | {
"start": 253913,
"end": 288402
} | class ____(OrganizationEventsEndpointTestBase):
def test_status(self) -> None:
self.store_event(
data={
"event_id": "a" * 32,
"timestamp": self.ten_mins_ago_iso,
"fingerprint": ["group1"],
},
project_id=self.project.id,
).group
group_2 = self.store_event(
data={
"event_id": "b" * 32,
"timestamp": self.ten_mins_ago_iso,
"fingerprint": ["group2"],
},
project_id=self.project.id,
).group
group_3 = self.store_event(
data={
"event_id": "c" * 32,
"timestamp": self.ten_mins_ago_iso,
"fingerprint": ["group3"],
},
project_id=self.project.id,
).group
query = {
"field": ["count()"],
"statsPeriod": "2h",
"query": "status:unresolved",
"dataset": "errors",
}
response = self.do_request(query)
assert response.status_code == 200, response.content
assert response.data["data"][0]["count()"] == 3
group_2.status = GroupStatus.IGNORED
group_2.substatus = GroupSubStatus.FOREVER
group_2.save(update_fields=["status", "substatus"])
group_3.status = GroupStatus.IGNORED
group_3.substatus = GroupSubStatus.FOREVER
group_3.save(update_fields=["status", "substatus"])
# XXX: Snuba caches query results, so change the time period so that the query
# changes enough to bust the cache.
query["statsPeriod"] = "3h"
response = self.do_request(query)
assert response.status_code == 200, response.content
assert response.data["data"][0]["count()"] == 1
def test_error_upsampling_with_allowlisted_project(self) -> None:
"""Test that count() is upsampled for allowlisted projects when querying error events."""
# Set up allowlisted project
with self.options({"issues.client_error_sampling.project_allowlist": [self.project.id]}):
# Store error event with error_sampling context
self.store_event(
data={
"event_id": "a" * 32,
"message": "Error event for upsampling",
"type": "error",
"exception": [{"type": "ValueError", "value": "Something went wrong"}],
"timestamp": self.ten_mins_ago_iso,
"fingerprint": ["group1"],
"contexts": {"error_sampling": {"client_sample_rate": 0.1}},
},
project_id=self.project.id,
)
# Store error event without error_sampling context (sample_weight = null should count as 1)
self.store_event(
data={
"event_id": "a1" * 16,
"message": "Error event without sampling",
"type": "error",
"exception": [{"type": "ValueError", "value": "Something else went wrong"}],
"timestamp": self.ten_mins_ago_iso,
"fingerprint": ["group1_no_sampling"],
},
project_id=self.project.id,
)
# Test with errors dataset
query = {
"field": ["count()"],
"statsPeriod": "2h",
"query": "event.type:error",
"dataset": "errors",
}
response = self.do_request(query)
assert response.status_code == 200, response.content
# Expect the count to be upsampled (1 event / 0.1 = 10) + 1 event with no sampling = 11
assert response.data["data"][0]["count()"] == 11
# Check meta information
meta = response.data["meta"]
assert "fields" in meta
assert "count()" in meta["fields"]
assert meta["fields"]["count()"] == "integer"
def test_error_upsampling_eps_with_allowlisted_project(self) -> None:
"""Test that eps() is upsampled for allowlisted projects when querying error events."""
# Set up allowlisted project
with self.options({"issues.client_error_sampling.project_allowlist": [self.project.id]}):
# Store error event with error_sampling context
self.store_event(
data={
"event_id": "b" * 32,
"message": "Error event for eps upsampling",
"type": "error",
"exception": [{"type": "ValueError", "value": "Something went wrong"}],
"timestamp": self.ten_mins_ago_iso,
"fingerprint": ["group2"],
"contexts": {"error_sampling": {"client_sample_rate": 0.1}},
},
project_id=self.project.id,
)
# Store error event without error_sampling context (sample_weight = null should count as 1)
self.store_event(
data={
"event_id": "b1" * 16,
"message": "Error event without sampling for eps",
"type": "error",
"exception": [{"type": "ValueError", "value": "Something else went wrong"}],
"timestamp": self.ten_mins_ago_iso,
"fingerprint": ["group2_no_sampling"],
},
project_id=self.project.id,
)
# Test with errors dataset - eps() should be upsampled
query = {
"field": ["eps()"],
"statsPeriod": "2h",
"query": "event.type:error",
"dataset": "errors",
}
response = self.do_request(query)
assert response.status_code == 200, response.content
# Expect eps to be upsampled (10 events / 7200 seconds) + (1 event / 7200 seconds) = 11/7200
# Since we have 1 event upsampled to 10 + 1 event with no sampling over 2 hour period
expected_eps = 11 / 7200
actual_eps = response.data["data"][0]["eps()"]
assert abs(actual_eps - expected_eps) < 0.0001 # Allow small rounding differences
# Check meta information
meta = response.data["meta"]
assert "fields" in meta
assert "eps()" in meta["fields"]
assert meta["fields"]["eps()"] == "rate"
assert meta["units"]["eps()"] == "1/second"
def test_error_upsampling_epm_with_allowlisted_project(self) -> None:
"""Test that epm() is upsampled for allowlisted projects when querying error events."""
# Set up allowlisted project
with self.options({"issues.client_error_sampling.project_allowlist": [self.project.id]}):
# Store error event with error_sampling context
self.store_event(
data={
"event_id": "c" * 32,
"message": "Error event for epm upsampling",
"type": "error",
"exception": [{"type": "ValueError", "value": "Something went wrong"}],
"timestamp": self.ten_mins_ago_iso,
"fingerprint": ["group3"],
"contexts": {"error_sampling": {"client_sample_rate": 0.1}},
},
project_id=self.project.id,
)
# Store error event without error_sampling context (sample_weight = null should count as 1)
self.store_event(
data={
"event_id": "c1" * 16,
"message": "Error event without sampling for epm",
"type": "error",
"exception": [{"type": "ValueError", "value": "Something else went wrong"}],
"timestamp": self.ten_mins_ago_iso,
"fingerprint": ["group3_no_sampling"],
},
project_id=self.project.id,
)
# Test with errors dataset - epm() should be upsampled
query = {
"field": ["epm()"],
"statsPeriod": "2h",
"query": "event.type:error",
"dataset": "errors",
}
response = self.do_request(query)
assert response.status_code == 200, response.content
# Expect epm to be upsampled (10 events / 120 minutes) + (1 event / 120 minutes) = 11/120
# Since we have 1 event upsampled to 10 + 1 event with no sampling over 2 hour period
expected_epm = 11 / 120
actual_epm = response.data["data"][0]["epm()"]
assert abs(actual_epm - expected_epm) < 0.001 # Allow small rounding differences
# Check meta information
meta = response.data["meta"]
assert "fields" in meta
assert "epm()" in meta["fields"]
assert meta["fields"]["epm()"] == "rate"
assert meta["units"]["epm()"] == "1/minute"
def test_error_upsampling_with_no_allowlist(self) -> None:
"""Test that count() is not upsampled when project is not allowlisted."""
# No allowlisted projects
with self.options({"issues.client_error_sampling.project_allowlist": []}):
# Store error event with error_sampling context
self.store_event(
data={
"event_id": "a" * 32,
"message": "Error event for upsampling",
"type": "error",
"exception": [{"type": "ValueError", "value": "Something went wrong"}],
"timestamp": self.ten_mins_ago_iso,
"fingerprint": ["group1"],
"contexts": {"error_sampling": {"client_sample_rate": 0.1}},
},
project_id=self.project.id,
)
# Test with errors dataset
query = {
"field": ["count()"],
"statsPeriod": "2h",
"query": "event.type:error",
"dataset": "errors",
}
response = self.do_request(query)
assert response.status_code == 200, response.content
# Expect the count to remain as-is (no upsampling)
assert response.data["data"][0]["count()"] == 1
def test_error_upsampling_with_partial_allowlist(self) -> None:
"""Test that count() is upsampled when any project in the query is allowlisted."""
# Create a second project
project2 = self.create_project(organization=self.organization)
# Only allowlist the first project
with self.options({"issues.client_error_sampling.project_allowlist": [self.project.id]}):
# Store error events in both projects
self.store_event(
data={
"event_id": "a" * 32,
"message": "Error event for upsampling",
"type": "error",
"exception": [{"type": "ValueError", "value": "Something went wrong"}],
"timestamp": self.ten_mins_ago_iso,
"fingerprint": ["group1"],
"contexts": {"error_sampling": {"client_sample_rate": 0.1}},
},
project_id=self.project.id,
)
self.store_event(
data={
"event_id": "b" * 32,
"message": "Error event for upsampling",
"type": "error",
"exception": [{"type": "ValueError", "value": "Something went wrong"}],
"timestamp": self.ten_mins_ago_iso,
"fingerprint": ["group2"],
"contexts": {"error_sampling": {"client_sample_rate": 0.1}},
},
project_id=project2.id,
)
# Test with errors dataset, querying both projects
query = {
"field": ["count()"],
"statsPeriod": "2h",
"query": "event.type:error",
"dataset": "errors",
"project": [self.project.id, project2.id],
}
features = {"organizations:discover-basic": True}
response = self.do_request(query, features=features)
assert response.status_code == 200, response.content
# Expect upsampling since any project is allowlisted (both events upsampled: 10 + 10 = 20)
assert response.data["data"][0]["count()"] == 20
def test_sample_count_with_allowlisted_project(self) -> None:
"""Test that sample_count() returns raw sample count (not upsampled) for allowlisted projects."""
# Set up allowlisted project
with self.options({"issues.client_error_sampling.project_allowlist": [self.project.id]}):
# Store error event with error_sampling context
self.store_event(
data={
"event_id": "a" * 32,
"message": "Error event for sample_count",
"type": "error",
"exception": [{"type": "ValueError", "value": "Something went wrong"}],
"timestamp": self.ten_mins_ago_iso,
"fingerprint": ["group1"],
"contexts": {"error_sampling": {"client_sample_rate": 0.1}},
},
project_id=self.project.id,
)
# Store error event without error_sampling context (sample_weight = null should count as 1)
self.store_event(
data={
"event_id": "a1" * 16,
"message": "Error event without sampling",
"type": "error",
"exception": [{"type": "ValueError", "value": "Something else went wrong"}],
"timestamp": self.ten_mins_ago_iso,
"fingerprint": ["group1_no_sampling"],
},
project_id=self.project.id,
)
# Test with errors dataset - sample_count() should return raw count, not upsampled
query = {
"field": ["sample_count()"],
"statsPeriod": "2h",
"query": "event.type:error",
"dataset": "errors",
}
response = self.do_request(query)
assert response.status_code == 200, response.content
# Expect sample_count to return raw count: 2 events (not upsampled 11)
assert response.data["data"][0]["sample_count()"] == 2
# Check meta information
meta = response.data["meta"]
assert "fields" in meta
assert "sample_count()" in meta["fields"]
assert meta["fields"]["sample_count()"] == "integer"
def test_sample_eps_with_allowlisted_project(self) -> None:
"""Test that sample_eps() returns raw sample rate (not upsampled) for allowlisted projects."""
# Set up allowlisted project
with self.options({"issues.client_error_sampling.project_allowlist": [self.project.id]}):
# Store error event with error_sampling context
self.store_event(
data={
"event_id": "b" * 32,
"message": "Error event for sample_eps",
"type": "error",
"exception": [{"type": "ValueError", "value": "Something went wrong"}],
"timestamp": self.ten_mins_ago_iso,
"fingerprint": ["group2"],
"contexts": {"error_sampling": {"client_sample_rate": 0.1}},
},
project_id=self.project.id,
)
# Store error event without error_sampling context (sample_weight = null should count as 1)
self.store_event(
data={
"event_id": "b1" * 16,
"message": "Error event without sampling for sample_eps",
"type": "error",
"exception": [{"type": "ValueError", "value": "Something else went wrong"}],
"timestamp": self.ten_mins_ago_iso,
"fingerprint": ["group2_no_sampling"],
},
project_id=self.project.id,
)
# Test with errors dataset - sample_eps() should return raw rate, not upsampled
query = {
"field": ["sample_eps()"],
"statsPeriod": "2h",
"query": "event.type:error",
"dataset": "errors",
}
response = self.do_request(query)
assert response.status_code == 200, response.content
# Expect sample_eps to return raw rate: 2 events / 7200 seconds = 2/7200
expected_sample_eps = 2 / 7200
actual_sample_eps = response.data["data"][0]["sample_eps()"]
assert (
abs(actual_sample_eps - expected_sample_eps) < 0.0001
) # Allow small rounding differences
# Check meta information
meta = response.data["meta"]
assert "fields" in meta
assert "sample_eps()" in meta["fields"]
assert meta["fields"]["sample_eps()"] == "rate"
assert meta["units"]["sample_eps()"] == "1/second"
def test_sample_epm_with_allowlisted_project(self) -> None:
"""Test that sample_epm() returns raw sample rate (not upsampled) for allowlisted projects."""
# Set up allowlisted project
with self.options({"issues.client_error_sampling.project_allowlist": [self.project.id]}):
# Store error event with error_sampling context
self.store_event(
data={
"event_id": "c" * 32,
"message": "Error event for sample_epm",
"type": "error",
"exception": [{"type": "ValueError", "value": "Something went wrong"}],
"timestamp": self.ten_mins_ago_iso,
"fingerprint": ["group3"],
"contexts": {"error_sampling": {"client_sample_rate": 0.1}},
},
project_id=self.project.id,
)
# Store error event without error_sampling context (sample_weight = null should count as 1)
self.store_event(
data={
"event_id": "c1" * 16,
"message": "Error event without sampling for sample_epm",
"type": "error",
"exception": [{"type": "ValueError", "value": "Something else went wrong"}],
"timestamp": self.ten_mins_ago_iso,
"fingerprint": ["group3_no_sampling"],
},
project_id=self.project.id,
)
# Test with errors dataset - sample_epm() should return raw rate, not upsampled
query = {
"field": ["sample_epm()"],
"statsPeriod": "2h",
"query": "event.type:error",
"dataset": "errors",
}
response = self.do_request(query)
assert response.status_code == 200, response.content
# Expect sample_epm to return raw rate: 2 events / 120 minutes = 2/120
expected_sample_epm = 2 / 120
actual_sample_epm = response.data["data"][0]["sample_epm()"]
assert (
abs(actual_sample_epm - expected_sample_epm) < 0.001
) # Allow small rounding differences
# Check meta information
meta = response.data["meta"]
assert "fields" in meta
assert "sample_epm()" in meta["fields"]
assert meta["fields"]["sample_epm()"] == "rate"
assert meta["units"]["sample_epm()"] == "1/minute"
def test_sort_upsampled_columns(self) -> None:
# Create two issues/groups where raw vs upsampled metrics imply different orderings
# A: 1 sampled event with client_sample_rate=0.1 -> upsampled_count=10, raw=1
# B: 2 unsampled events -> upsampled_count=2, raw=2
with self.options({"issues.client_error_sampling.project_allowlist": [self.project.id]}):
event_a = self.store_event(
data={
"event_id": "a" * 32,
"message": "Sampled event A",
"type": "error",
"exception": [{"type": "ValueError", "value": "x"}],
"timestamp": self.ten_mins_ago_iso,
"fingerprint": ["upsampling_group_a"],
"contexts": {"error_sampling": {"client_sample_rate": 0.1}},
},
project_id=self.project.id,
)
event_b1 = self.store_event(
data={
"event_id": "b" * 32,
"message": "Unsampled event B1",
"type": "error",
"exception": [{"type": "ValueError", "value": "y"}],
"timestamp": self.ten_mins_ago_iso,
"fingerprint": ["upsampling_group_b"],
},
project_id=self.project.id,
)
self.store_event(
data={
"event_id": "c" * 32,
"message": "Unsampled event B2",
"type": "error",
"exception": [{"type": "ValueError", "value": "z"}],
"timestamp": self.ten_mins_ago_iso,
"fingerprint": ["upsampling_group_b"],
},
project_id=self.project.id,
)
group_a_id = event_a.group.id
group_b_id = event_b1.group.id
# Test all 6 upsampling fields with sorting
# A: 1 sampled event with rate 0.1 -> upsampled=10, raw=1
# B: 2 unsampled events -> upsampled=2, raw=2
# Test all 6 upsampling fields with sorting - all work!
test_cases = [
# (field, sort_field, expected_a_first) - True if A should come first in desc order
("count()", "count", True), # upsampled: A=10 > B=2
("eps()", "eps", True), # upsampled: A=10/120 > B=2/120
("epm()", "epm", True), # upsampled: A=10/2 > B=2/2
("sample_count()", "sample_count", False), # raw: A=1 < B=2
("sample_eps()", "sample_eps", False), # raw: A=1/120 < B=2/120
("sample_epm()", "sample_epm", False), # raw: A=1/2 < B=2/2
]
for field, sort_field, expected_a_first in test_cases:
query = {
"field": ["issue.id", field],
"statsPeriod": "2h",
"query": "event.type:error",
"dataset": "errors",
"sort": f"-{sort_field}",
"per_page": 10,
}
response = self.do_request(query)
assert response.status_code == 200, f"Field {field} failed: {response.content}"
data = response.data["data"]
assert len(data) >= 2
if expected_a_first:
# A should come first (higher upsampled values)
assert (
data[0]["issue.id"] == group_a_id
), f"Field {field}: Expected group A ({group_a_id}) first, but got {data[0]['issue.id']}"
assert (
data[1]["issue.id"] == group_b_id
), f"Field {field}: Expected group B ({group_b_id}) second, but got {data[1]['issue.id']}"
else:
# B should come first (higher raw values)
assert (
data[0]["issue.id"] == group_b_id
), f"Field {field}: Expected group B ({group_b_id}) first, but got {data[0]['issue.id']}"
assert (
data[1]["issue.id"] == group_a_id
), f"Field {field}: Expected group A ({group_a_id}) second, but got {data[1]['issue.id']}"
def test_is_status(self) -> None:
self.store_event(
data={
"event_id": "a" * 32,
"timestamp": self.ten_mins_ago_iso,
"fingerprint": ["group1"],
},
project_id=self.project.id,
).group
group_2 = self.store_event(
data={
"event_id": "b" * 32,
"timestamp": self.ten_mins_ago_iso,
"fingerprint": ["group2"],
},
project_id=self.project.id,
).group
group_3 = self.store_event(
data={
"event_id": "c" * 32,
"timestamp": self.ten_mins_ago_iso,
"fingerprint": ["group3"],
},
project_id=self.project.id,
).group
query = {
"field": ["count()"],
"statsPeriod": "2h",
"query": "is:unresolved",
"dataset": "errors",
}
response = self.do_request(query)
assert response.status_code == 200, response.content
assert response.data["data"][0]["count()"] == 3
group_2.status = GroupStatus.IGNORED
group_2.substatus = GroupSubStatus.FOREVER
group_2.save(update_fields=["status", "substatus"])
group_3.status = GroupStatus.IGNORED
group_3.substatus = GroupSubStatus.FOREVER
group_3.save(update_fields=["status", "substatus"])
# XXX: Snuba caches query results, so change the time period so that the query
# changes enough to bust the cache.
query["statsPeriod"] = "3h"
response = self.do_request(query)
assert response.status_code == 200, response.content
assert response.data["data"][0]["count()"] == 1
def test_short_group_id(self) -> None:
group_1 = self.store_event(
data={
"event_id": "a" * 32,
"timestamp": self.ten_mins_ago_iso,
"fingerprint": ["group1"],
},
project_id=self.project.id,
).group
query = {
"field": ["count()"],
"statsPeriod": "1h",
"query": f"project:{group_1.project.slug} issue:{group_1.qualified_short_id}",
"dataset": "errors",
}
response = self.do_request(query)
assert response.status_code == 200, response.content
assert response.data["data"][0]["count()"] == 1
def test_user_display(self) -> None:
group_1 = self.store_event(
data={
"event_id": "a" * 32,
"timestamp": self.ten_mins_ago_iso,
"fingerprint": ["group1"],
"user": {
"email": "hellboy@bar.com",
},
},
project_id=self.project.id,
).group
features = {
"organizations:discover-basic": True,
}
query = {
"field": ["user.display"],
"query": f"user.display:hell* issue.id:{group_1.id}",
"statsPeriod": "24h",
"dataset": "errors",
}
response = self.do_request(query, features=features)
assert response.status_code == 200, response.content
data = response.data["data"]
assert len(data) == 1
result = {r["user.display"] for r in data}
assert result == {"hellboy@bar.com"}
def test_performance_score(self) -> None:
self.transaction_data["measurements"] = {
"score.lcp": {"value": 0.03},
"score.weight.lcp": {"value": 0.3},
}
self.store_event(self.transaction_data, self.project.id)
self.transaction_data["measurements"] = {
"score.lcp": {"value": 1.0},
"score.weight.lcp": {"value": 1.0},
}
self.store_event(self.transaction_data, self.project.id)
query = {
"field": [
"performance_score(measurements.score.lcp)",
]
}
response = self.do_request(query)
assert response.status_code == 200, response.content
assert len(response.data["data"]) == 1
assert response.data["data"][0] == {
"performance_score(measurements.score.lcp)": 0.7923076923076923,
}
def test_invalid_performance_score_column(self) -> None:
self.transaction_data["measurements"] = {
"score.total": {"value": 0.0},
}
self.store_event(self.transaction_data, self.project.id)
query = {
"field": [
"performance_score(measurements.score.fp)",
]
}
response = self.do_request(query)
assert response.status_code == 400, response.content
def test_all_events_fields(self) -> None:
user_data = {
"id": self.user.id,
"username": "user",
"email": "hellboy@bar.com",
"ip_address": "127.0.0.1",
}
replay_id = uuid.uuid4().hex
event = self.store_event(
data={
"timestamp": self.ten_mins_ago_iso,
"fingerprint": ["group1"],
"contexts": {
"trace": {
"trace_id": str(uuid.uuid4().hex),
"span_id": "933e5c9a8e464da9",
"type": "trace",
},
"replay": {"replay_id": replay_id},
},
"tags": {"device": "Mac"},
"user": user_data,
},
project_id=self.project.id,
)
query = {
"field": [
"id",
"transaction",
"title",
"release",
"environment",
"user.display",
"device",
"os",
"replayId",
"timestamp",
],
"statsPeriod": "2d",
"query": "is:unresolved",
"dataset": "errors",
"sort": "-title",
}
response = self.do_request(query)
assert response.status_code == 200, response.content
data = response.data["data"][0]
assert data == {
"id": event.event_id,
"transaction": "",
"project.name": event.project.name.lower(),
"title": event.group.title,
"release": event.release,
"environment": None,
"user.display": user_data["email"],
"device": "Mac",
"replayId": replay_id,
"os": "",
"timestamp": event.datetime.replace(microsecond=0).isoformat(),
}
def test_opportunity_score(self) -> None:
self.transaction_data["measurements"] = {
"score.lcp": {"value": 0.03},
"score.weight.lcp": {"value": 0.3},
"score.fcp": {"value": 0.4},
"score.weight.fcp": {"value": 0.7},
"score.total": {"value": 0.43},
}
self.store_event(self.transaction_data, self.project.id)
self.transaction_data["measurements"] = {
"score.lcp": {"value": 1.0},
"score.weight.lcp": {"value": 1.0},
"score.total": {"value": 1.0},
}
self.store_event(self.transaction_data, self.project.id)
self.transaction_data["measurements"] = {
"score.total": {"value": 0.0},
}
self.store_event(self.transaction_data, self.project.id)
query = {
"field": [
"opportunity_score(measurements.score.lcp)",
"opportunity_score(measurements.score.total)",
]
}
response = self.do_request(query)
assert response.status_code == 200, response.content
assert len(response.data["data"]) == 1
assert response.data["data"][0] == {
"opportunity_score(measurements.score.lcp)": 0.27,
"opportunity_score(measurements.score.total)": 1.57,
}
def test_count_scores(self) -> None:
self.transaction_data["measurements"] = {
"score.lcp": {"value": 0.03},
"score.total": {"value": 0.43},
}
self.store_event(self.transaction_data, self.project.id)
self.transaction_data["measurements"] = {
"score.total": {"value": 1.0},
}
self.store_event(self.transaction_data, self.project.id)
self.transaction_data["measurements"] = {
"score.total": {"value": 0.0},
}
self.store_event(self.transaction_data, self.project.id)
query = {
"field": [
"count_scores(measurements.score.lcp)",
"count_scores(measurements.score.total)",
]
}
response = self.do_request(query)
assert response.status_code == 200, response.content
assert len(response.data["data"]) == 1
assert response.data["data"][0] == {
"count_scores(measurements.score.lcp)": 1,
"count_scores(measurements.score.total)": 3,
}
def test_remapping(self) -> None:
self.store_event(self.transaction_data, self.project.id)
response = self.do_request(
{
"field": [
"transaction.duration",
"span.duration",
],
"query": "has:span.duration",
}
)
assert response.status_code == 200, response.content
data = response.data["data"]
meta = response.data["meta"]
assert len(data) == 1
assert data[0]["transaction.duration"] == 3000
assert data[0]["span.duration"] == 3000
assert meta["fields"]["span.duration"] == "duration"
assert meta["fields"]["transaction.duration"] == "duration"
assert meta["units"]["span.duration"] == "millisecond"
assert meta["units"]["transaction.duration"] == "millisecond"
| OrganizationEventsErrorsDatasetEndpointTest |
python | apache__airflow | providers/microsoft/winrm/src/airflow/providers/microsoft/winrm/hooks/winrm.py | {
"start": 1226,
"end": 14125
} | class ____(BaseHook):
"""
Hook for winrm remote execution using pywinrm.
:seealso: https://github.com/diyan/pywinrm/blob/master/winrm/protocol.py
:param ssh_conn_id: connection id from airflow Connections from where
all the required parameters can be fetched like username and password,
though priority is given to the params passed during init.
:param endpoint: When not set, endpoint will be constructed like this:
'http://{remote_host}:{remote_port}/wsman'
:param remote_host: Remote host to connect to. Ignored if `endpoint` is set.
:param remote_port: Remote port to connect to. Ignored if `endpoint` is set.
:param transport: transport type, one of 'plaintext' (default), 'kerberos', 'ssl', 'ntlm', 'credssp'
:param username: username to connect to the remote_host
:param password: password of the username to connect to the remote_host
:param service: the service name, default is HTTP
:param keytab: the path to a keytab file if you are using one
:param ca_trust_path: Certification Authority trust path
:param cert_pem: client authentication certificate file path in PEM format
:param cert_key_pem: client authentication certificate key file path in PEM format
:param server_cert_validation: whether server certificate should be validated on
Python versions that support it; one of 'validate' (default), 'ignore'
:param kerberos_delegation: if True, TGT is sent to target server to
allow multiple hops
:param read_timeout_sec: maximum seconds to wait before an HTTP connect/read times out (default 30).
This value should be slightly higher than operation_timeout_sec,
as the server can block *at least* that long.
:param operation_timeout_sec: maximum allowed time in seconds for any single wsman
HTTP operation (default 20). Note that operation timeouts while receiving output
(the only wsman operation that should take any significant time,
and where these timeouts are expected) will be silently retried indefinitely.
:param kerberos_hostname_override: the hostname to use for the kerberos exchange
(defaults to the hostname in the endpoint URL)
:param message_encryption: Will encrypt the WinRM messages if set
and the transport auth supports message encryption. (Default 'auto')
:param credssp_disable_tlsv1_2: Whether to disable TLSv1.2 support and work with older
protocols like TLSv1.0, default is False
:param send_cbt: Will send the channel bindings over a HTTPS channel (Default: True)
"""
def __init__(
self,
ssh_conn_id: str | None = None,
endpoint: str | None = None,
remote_host: str | None = None,
remote_port: int = 5985,
transport: str = "plaintext",
username: str | None = None,
password: str | None = None,
service: str = "HTTP",
keytab: str | None = None,
ca_trust_path: str | None = None,
cert_pem: str | None = None,
cert_key_pem: str | None = None,
server_cert_validation: str = "validate",
kerberos_delegation: bool = False,
read_timeout_sec: int = 30,
operation_timeout_sec: int = 20,
kerberos_hostname_override: str | None = None,
message_encryption: str | None = "auto",
credssp_disable_tlsv1_2: bool = False,
send_cbt: bool = True,
) -> None:
super().__init__()
self.ssh_conn_id = ssh_conn_id
self.endpoint = endpoint
self.remote_host = remote_host
self.remote_port = remote_port
self.transport = transport
self.username = username
self.password = password
self.service = service
self.keytab = keytab
self.ca_trust_path = ca_trust_path
self.cert_pem = cert_pem
self.cert_key_pem = cert_key_pem
self.server_cert_validation = server_cert_validation
self.kerberos_delegation = kerberos_delegation
self.read_timeout_sec = read_timeout_sec
self.operation_timeout_sec = operation_timeout_sec
self.kerberos_hostname_override = kerberos_hostname_override
self.message_encryption = message_encryption
self.credssp_disable_tlsv1_2 = credssp_disable_tlsv1_2
self.send_cbt = send_cbt
self.winrm_protocol = None
def get_conn(self):
self.log.debug("Creating WinRM client for conn_id: %s", self.ssh_conn_id)
if self.ssh_conn_id is not None:
conn = self.get_connection(self.ssh_conn_id)
if self.username is None:
self.username = conn.login
if self.password is None:
self.password = conn.password
if self.remote_host is None:
self.remote_host = conn.host
if conn.extra is not None:
extra_options = conn.extra_dejson
if "endpoint" in extra_options:
self.endpoint = str(extra_options["endpoint"])
if "remote_port" in extra_options:
self.remote_port = int(extra_options["remote_port"])
if "transport" in extra_options:
self.transport = str(extra_options["transport"])
if "service" in extra_options:
self.service = str(extra_options["service"])
if "keytab" in extra_options:
self.keytab = str(extra_options["keytab"])
if "ca_trust_path" in extra_options:
self.ca_trust_path = str(extra_options["ca_trust_path"])
if "cert_pem" in extra_options:
self.cert_pem = str(extra_options["cert_pem"])
if "cert_key_pem" in extra_options:
self.cert_key_pem = str(extra_options["cert_key_pem"])
if "server_cert_validation" in extra_options:
self.server_cert_validation = str(extra_options["server_cert_validation"])
if "kerberos_delegation" in extra_options:
self.kerberos_delegation = str(extra_options["kerberos_delegation"]).lower() == "true"
if "read_timeout_sec" in extra_options:
self.read_timeout_sec = int(extra_options["read_timeout_sec"])
if "operation_timeout_sec" in extra_options:
self.operation_timeout_sec = int(extra_options["operation_timeout_sec"])
if "kerberos_hostname_override" in extra_options:
self.kerberos_hostname_override = str(extra_options["kerberos_hostname_override"])
if "message_encryption" in extra_options:
self.message_encryption = str(extra_options["message_encryption"])
if "credssp_disable_tlsv1_2" in extra_options:
self.credssp_disable_tlsv1_2 = (
str(extra_options["credssp_disable_tlsv1_2"]).lower() == "true"
)
if "send_cbt" in extra_options:
self.send_cbt = str(extra_options["send_cbt"]).lower() == "true"
if not self.remote_host:
raise AirflowException("Missing required param: remote_host")
# Auto detecting username values from system
if not self.username:
self.log.debug(
"username to WinRM to host: %s is not specified for connection id"
" %s. Using system's default provided by getpass.getuser()",
self.remote_host,
self.ssh_conn_id,
)
self.username = getuser()
# If endpoint is not set, then build a standard wsman endpoint from host and port.
if not self.endpoint:
self.endpoint = f"http://{self.remote_host}:{self.remote_port}/wsman"
try:
if self.password and self.password.strip():
self.winrm_protocol = Protocol(
endpoint=self.endpoint,
transport=self.transport,
username=self.username,
password=self.password,
service=self.service,
keytab=self.keytab,
ca_trust_path=self.ca_trust_path,
cert_pem=self.cert_pem,
cert_key_pem=self.cert_key_pem,
server_cert_validation=self.server_cert_validation,
kerberos_delegation=self.kerberos_delegation,
read_timeout_sec=self.read_timeout_sec,
operation_timeout_sec=self.operation_timeout_sec,
kerberos_hostname_override=self.kerberos_hostname_override,
message_encryption=self.message_encryption,
credssp_disable_tlsv1_2=self.credssp_disable_tlsv1_2,
send_cbt=self.send_cbt,
)
except Exception as error:
error_msg = f"Error creating connection to host: {self.remote_host}, error: {error}"
self.log.error(error_msg)
raise AirflowException(error_msg)
if not hasattr(self.winrm_protocol, "get_command_output_raw"):
# since pywinrm>=0.5 get_command_output_raw replace _raw_get_command_output
self.winrm_protocol.get_command_output_raw = self.winrm_protocol._raw_get_command_output
return self.winrm_protocol
def run(
self,
command: str,
ps_path: str | None = None,
output_encoding: str = "utf-8",
return_output: bool = True,
working_directory: str | None = None,
) -> tuple[int, list[bytes], list[bytes]]:
"""
Run a command.
:param command: command to execute on remote host.
:param ps_path: path to powershell, `powershell` for v5.1- and `pwsh` for v6+.
If specified, it will execute the command as powershell script.
:param output_encoding: the encoding used to decode stout and stderr.
:param return_output: Whether to accumulate and return the stdout or not.
:param working_directory: specify working directory.
:return: returns a tuple containing return_code, stdout and stderr in order.
"""
winrm_client = self.get_conn()
self.log.info("Establishing WinRM connection to host: %s", self.remote_host)
try:
shell_id = winrm_client.open_shell(working_directory=working_directory)
except Exception as error:
error_msg = f"Error connecting to host: {self.remote_host}, error: {error}"
self.log.error(error_msg)
raise AirflowException(error_msg)
try:
if ps_path is not None:
self.log.info("Running command as powershell script: '%s'...", command)
encoded_ps = b64encode(command.encode("utf_16_le")).decode("ascii")
command_id = winrm_client.run_command(shell_id, f"{ps_path} -encodedcommand {encoded_ps}")
else:
self.log.info("Running command: '%s'...", command)
command_id = winrm_client.run_command(shell_id, command)
# See: https://github.com/diyan/pywinrm/blob/master/winrm/protocol.py
stdout_buffer = []
stderr_buffer = []
command_done = False
while not command_done:
# this is an expected error when waiting for a long-running process, just silently retry
with suppress(WinRMOperationTimeoutError):
(
stdout,
stderr,
return_code,
command_done,
) = winrm_client.get_command_output_raw(shell_id, command_id)
# Only buffer stdout if we need to so that we minimize memory usage.
if return_output:
stdout_buffer.append(stdout)
stderr_buffer.append(stderr)
for line in stdout.decode(output_encoding).splitlines():
self.log.info(line)
for line in stderr.decode(output_encoding).splitlines():
self.log.warning(line)
winrm_client.cleanup_command(shell_id, command_id)
return return_code, stdout_buffer, stderr_buffer
except Exception as e:
raise AirflowException(f"WinRM operator error: {e}")
finally:
winrm_client.close_shell(shell_id)
def test_connection(self):
try:
(r_code, std_out, std_err) = self.run("cd")
if r_code != 0:
raise RuntimeError(std_err)
return True, "Connection successful."
except Exception as e:
return False, str(e)
| WinRMHook |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 70576,
"end": 70733
} | class ____(BaseModel, extra="forbid"):
conditions: List["Condition"] = Field(..., description="")
min_count: int = Field(..., description="")
| MinShould |
python | django__django | tests/admin_views/admin.py | {
"start": 27645,
"end": 27778
} | class ____(admin.ModelAdmin):
form = FormWithoutVisibleField
fieldsets = EmptyModelVisibleAdmin.fieldsets
| EmptyModelHiddenAdmin |
python | sympy__sympy | sympy/categories/baseclasses.py | {
"start": 3856,
"end": 4610
} | class ____(Morphism):
"""
Represents an identity morphism.
Explanation
===========
An identity morphism is a morphism with equal domain and codomain,
which acts as an identity with respect to composition.
Examples
========
>>> from sympy.categories import Object, NamedMorphism, IdentityMorphism
>>> A = Object("A")
>>> B = Object("B")
>>> f = NamedMorphism(A, B, "f")
>>> id_A = IdentityMorphism(A)
>>> id_B = IdentityMorphism(B)
>>> f * id_A == f
True
>>> id_B * f == f
True
See Also
========
Morphism
"""
def __new__(cls, domain):
return Basic.__new__(cls, domain)
@property
def codomain(self):
return self.domain
| IdentityMorphism |
python | kubernetes-client__python | kubernetes/client/models/v1_priority_class.py | {
"start": 383,
"end": 10904
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'description': 'str',
'global_default': 'bool',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'preemption_policy': 'str',
'value': 'int'
}
attribute_map = {
'api_version': 'apiVersion',
'description': 'description',
'global_default': 'globalDefault',
'kind': 'kind',
'metadata': 'metadata',
'preemption_policy': 'preemptionPolicy',
'value': 'value'
}
def __init__(self, api_version=None, description=None, global_default=None, kind=None, metadata=None, preemption_policy=None, value=None, local_vars_configuration=None): # noqa: E501
"""V1PriorityClass - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._description = None
self._global_default = None
self._kind = None
self._metadata = None
self._preemption_policy = None
self._value = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if description is not None:
self.description = description
if global_default is not None:
self.global_default = global_default
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
if preemption_policy is not None:
self.preemption_policy = preemption_policy
self.value = value
@property
def api_version(self):
"""Gets the api_version of this V1PriorityClass. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1PriorityClass. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1PriorityClass.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1PriorityClass. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def description(self):
"""Gets the description of this V1PriorityClass. # noqa: E501
description is an arbitrary string that usually provides guidelines on when this priority class should be used. # noqa: E501
:return: The description of this V1PriorityClass. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this V1PriorityClass.
description is an arbitrary string that usually provides guidelines on when this priority class should be used. # noqa: E501
:param description: The description of this V1PriorityClass. # noqa: E501
:type: str
"""
self._description = description
@property
def global_default(self):
"""Gets the global_default of this V1PriorityClass. # noqa: E501
globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority. # noqa: E501
:return: The global_default of this V1PriorityClass. # noqa: E501
:rtype: bool
"""
return self._global_default
@global_default.setter
def global_default(self, global_default):
"""Sets the global_default of this V1PriorityClass.
globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority. # noqa: E501
:param global_default: The global_default of this V1PriorityClass. # noqa: E501
:type: bool
"""
self._global_default = global_default
@property
def kind(self):
"""Gets the kind of this V1PriorityClass. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1PriorityClass. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1PriorityClass.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1PriorityClass. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1PriorityClass. # noqa: E501
:return: The metadata of this V1PriorityClass. # noqa: E501
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1PriorityClass.
:param metadata: The metadata of this V1PriorityClass. # noqa: E501
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def preemption_policy(self):
"""Gets the preemption_policy of this V1PriorityClass. # noqa: E501
preemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. # noqa: E501
:return: The preemption_policy of this V1PriorityClass. # noqa: E501
:rtype: str
"""
return self._preemption_policy
@preemption_policy.setter
def preemption_policy(self, preemption_policy):
"""Sets the preemption_policy of this V1PriorityClass.
preemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. # noqa: E501
:param preemption_policy: The preemption_policy of this V1PriorityClass. # noqa: E501
:type: str
"""
self._preemption_policy = preemption_policy
@property
def value(self):
"""Gets the value of this V1PriorityClass. # noqa: E501
value represents the integer value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec. # noqa: E501
:return: The value of this V1PriorityClass. # noqa: E501
:rtype: int
"""
return self._value
@value.setter
def value(self, value):
"""Sets the value of this V1PriorityClass.
value represents the integer value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec. # noqa: E501
:param value: The value of this V1PriorityClass. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and value is None: # noqa: E501
raise ValueError("Invalid value for `value`, must not be `None`") # noqa: E501
self._value = value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1PriorityClass):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1PriorityClass):
return True
return self.to_dict() != other.to_dict()
| V1PriorityClass |
python | keras-team__keras | keras/src/ops/math.py | {
"start": 13478,
"end": 15812
} | class ____(Operation):
def compute_output_spec(self, x):
axes = (-2, -1)
if not isinstance(x, (tuple, list)) or len(x) != 2:
raise ValueError(
"Input `x` should be a tuple of two tensors - real and "
f"imaginary. Received: x={x}"
)
real, imag = x
# Both real and imaginary parts should have the same shape.
if real.shape != imag.shape:
raise ValueError(
"Input `x` should be a tuple of two tensors - real and "
"imaginary. Both the real and imaginary parts should have the "
f"same shape. Received: x[0].shape = {real.shape}, "
f"x[1].shape = {imag.shape}"
)
# We are calculating 2D FFT. Hence, rank >= 2.
if len(real.shape) < 2:
raise ValueError(
f"Input should have rank >= 2. "
f"Received: input.shape = {real.shape}"
)
# The axes along which we are calculating FFT should be fully-defined.
m = real.shape[axes[0]]
n = real.shape[axes[1]]
if m is None or n is None:
raise ValueError(
f"Input should have its {axes} axes fully-defined. "
f"Received: input.shape = {real.shape}"
)
return (
KerasTensor(shape=real.shape, dtype=real.dtype),
KerasTensor(shape=imag.shape, dtype=imag.dtype),
)
def call(self, x):
return backend.math.fft2(x)
@keras_export("keras.ops.fft2")
def fft2(x):
"""Computes the 2D Fast Fourier Transform along the last two axes of input.
Args:
x: Tuple of the real and imaginary parts of the input tensor. Both
tensors in the tuple should be of floating type.
Returns:
A tuple containing two tensors - the real and imaginary parts of the
output.
Example:
>>> x = (
... keras.ops.convert_to_tensor([[1., 2.], [2., 1.]]),
... keras.ops.convert_to_tensor([[0., 1.], [1., 0.]]),
... )
>>> fft2(x)
(array([[ 6., 0.],
[ 0., -2.]], dtype=float32), array([[ 2., 0.],
[ 0., -2.]], dtype=float32))
"""
if any_symbolic_tensors(x):
return FFT2().symbolic_call(x)
return backend.math.fft2(x)
| FFT2 |
python | Netflix__metaflow | metaflow/plugins/cards/card_modules/basic.py | {
"start": 8206,
"end": 8500
} | class ____(DefaultComponent):
type = "html"
def __init__(self, data=None):
super().__init__(title=None, subtitle=None)
self._data = data
def render(self):
datadict = super().render()
datadict["data"] = self._data
return datadict
| HTMLComponent |
python | python-excel__xlwt | xlwt/BIFFRecords.py | {
"start": 47672,
"end": 51053
} | class ____(BiffRecord):
"""
Record WINDOW2, BIFF8:
Offset Size Contents
0 2 Option flags (see below)
2 2 Index to first visible row
4 2 Index to first visible column
6 2 Colour index of grid line colour. Note that in BIFF2-BIFF7 an RGB colour is
written instead.
8 2 Not used
10 2 Cached magnification factor in page break preview (in percent); 0 = Default (60%)
12 2 Cached magnification factor in normal view (in percent); 0 = Default (100%)
14 4 Not used
In BIFF8 this record stores used magnification factors for page break
preview and normal view. These values are used to restore the
magnification, when the view is changed. The real magnification of the
currently active view is stored in the SCL record. The type of the
active view is stored in the option flags field (see below).
0 0001H 0 = Show formula results 1 = Show formulas
1 0002H 0 = Do not show grid lines 1 = Show grid lines
2 0004H 0 = Do not show sheet headers 1 = Show sheet headers
3 0008H 0 = Panes are not frozen 1 = Panes are frozen (freeze)
4 0010H 0 = Show zero values as empty cells 1 = Show zero values
5 0020H 0 = Manual grid line colour 1 = Automatic grid line colour
6 0040H 0 = Columns from left to right 1 = Columns from right to left
7 0080H 0 = Do not show outline symbols 1 = Show outline symbols
8 0100H 0 = Keep splits if pane freeze is removed 1 = Remove splits if pane freeze is removed
9 0200H 0 = Sheet not selected 1 = Sheet selected (BIFF5-BIFF8)
10 0400H 0 = Sheet not visible 1 = Sheet visible (BIFF5-BIFF8)
11 0800H 0 = Show in normal view 1 = Show in page break preview (BIFF8)
The freeze flag specifies, if a following PANE record describes unfrozen or frozen panes.
*** This class appends the optional SCL record ***
Record SCL, BIFF4-BIFF8:
This record stores the magnification of the active view of the current worksheet.
In BIFF8 this can be either the normal view or the page break preview.
This is determined in the WINDOW2 record. The SCL record is part of the
Sheet View Settings Block.
Offset Size Contents
0 2 Numerator of the view magnification fraction (num)
2 2 Denumerator [denominator] of the view magnification fraction (den)
The magnification is stored as reduced fraction. The magnification results from num/den.
SJM note: Excel expresses (e.g.) 25% in reduced form i.e. 1/4. Reason unknown. This code
writes 25/100, and Excel is happy with that.
"""
_REC_ID = 0x023E
def __init__(self, options, first_visible_row, first_visible_col,
grid_colour, preview_magn, normal_magn, scl_magn):
self._rec_data = pack('<7HL', options,
first_visible_row, first_visible_col,
grid_colour,
0x00,
preview_magn, normal_magn,
0x00)
if scl_magn is not None:
self._scl_rec = pack('<4H', 0x00A0, 4, scl_magn, 100)
else:
self._scl_rec = b''
def get(self):
return self.get_rec_header() + self._rec_data + self._scl_rec
| Window2Record |
python | getsentry__sentry | src/sentry/preprod/api/models/project_preprod_list_builds_models.py | {
"start": 169,
"end": 343
} | class ____(BaseModel):
next: int | None
prev: int | None
has_next: bool
has_prev: bool
page: int
per_page: int
total_count: int | str
| PaginationInfo |
python | huggingface__transformers | src/transformers/models/ernie4_5_moe/modeling_ernie4_5_moe.py | {
"start": 29669,
"end": 33515
} | class ____(Ernie4_5_MoePreTrainedModel, GenerationMixin):
_tied_weights_keys = {"lm_head.weight": "model.embed_tokens.weight"}
_tp_plan = {"lm_head": "colwise_rep"}
_pp_plan = {"lm_head": (["hidden_states"], ["logits"])}
def __init__(self, config):
super().__init__(config)
self.model = Ernie4_5_MoeModel(config)
self.vocab_size = config.vocab_size
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=config.use_bias)
self.router_aux_loss_coef = config.router_aux_loss_coef
self.num_experts = config.moe_num_experts
self.num_experts_per_tok = config.moe_k
# Initialize weights and apply final processing
self.post_init()
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_router_logits: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs: Unpack[TransformersKwargs],
) -> MoeCausalLMOutputWithPast:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
"""
output_router_logits = (
output_router_logits if output_router_logits is not None else self.config.output_router_logits
)
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
outputs: MoeModelOutputWithPast = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_router_logits=output_router_logits,
cache_position=cache_position,
**kwargs,
)
hidden_states = outputs.last_hidden_state
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits, labels, self.vocab_size, **kwargs)
aux_loss = None
if output_router_logits:
aux_loss = load_balancing_loss_func(
outputs.router_logits,
self.num_experts,
self.num_experts_per_tok,
attention_mask,
)
if labels is not None:
loss += self.router_aux_loss_coef * aux_loss.to(loss.device) # make sure to reside in the same device
return MoeCausalLMOutputWithPast(
loss=loss,
aux_loss=aux_loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
router_logits=outputs.router_logits,
)
__all__ = ["Ernie4_5_MoeForCausalLM", "Ernie4_5_MoeModel", "Ernie4_5_MoePreTrainedModel"]
| Ernie4_5_MoeForCausalLM |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_pie/PIE796.py | {
"start": 922,
"end": 1057
} | class ____(enum.Enum):
A = cast(SomeType, ...)
B = cast(SomeType, ...) # PIE796
C = cast(SomeType, ...) # PIE796
| FakeEnum11 |
python | langchain-ai__langchain | libs/partners/prompty/langchain_prompty/parsers.py | {
"start": 642,
"end": 4732
} | class ____(Invoker):
"""Parse a chat prompt into a list of messages."""
def __init__(self, prompty: Prompty) -> None:
self.prompty = prompty
self.roles = RoleMap.ROLES
self.path = self.prompty.file.parent
def inline_image(self, image_item: str) -> str:
# pass through if it's a url or base64 encoded
if image_item.startswith("http") or image_item.startswith("data"):
return image_item
# otherwise, it's a local file - need to base64 encode it
else:
image_path = self.path / image_item
with open(image_path, "rb") as f:
base64_image = base64.b64encode(f.read()).decode("utf-8")
if image_path.suffix == ".png":
return f"data:image/png;base64,{base64_image}"
elif image_path.suffix == ".jpg":
return f"data:image/jpeg;base64,{base64_image}"
elif image_path.suffix == ".jpeg":
return f"data:image/jpeg;base64,{base64_image}"
else:
raise ValueError(
f"Invalid image format {image_path.suffix} - currently only .png "
"and .jpg / .jpeg are supported."
)
def parse_content(self, content: str) -> str | list:
"""for parsing inline images"""
# regular expression to parse markdown images
image = r"(?P<alt>!\[[^\]]*\])\((?P<filename>.*?)(?=\"|\))\)"
matches = re.findall(image, content, flags=re.MULTILINE)
if len(matches) > 0:
content_items = []
content_chunks = re.split(image, content, flags=re.MULTILINE)
current_chunk = 0
for i in range(len(content_chunks)):
# image entry
if (
current_chunk < len(matches)
and content_chunks[i] == matches[current_chunk][0]
):
content_items.append(
{
"type": "image_url",
"image_url": {
"url": self.inline_image(
matches[current_chunk][1].split(" ")[0].strip()
)
},
}
)
# second part of image entry
elif (
current_chunk < len(matches)
and content_chunks[i] == matches[current_chunk][1]
):
current_chunk += 1
# text entry
else:
if len(content_chunks[i].strip()) > 0:
content_items.append(
{"type": "text", "text": content_chunks[i].strip()}
)
return content_items
else:
return content
def invoke(self, data: BaseModel) -> BaseModel:
if not isinstance(data, SimpleModel):
raise ValueError("data must be an instance of SimpleModel")
messages = []
separator = r"(?i)^\s*#?\s*(" + "|".join(self.roles) + r")\s*:\s*\n"
# get valid chunks - remove empty items
chunks = [
item
for item in re.split(separator, data.item, flags=re.MULTILINE)
if len(item.strip()) > 0
]
# if no starter role, then inject system role
if chunks[0].strip().lower() not in self.roles:
chunks.insert(0, "system")
# if last chunk is role entry, then remove (no content?)
if chunks[-1].strip().lower() in self.roles:
chunks.pop()
if len(chunks) % 2 != 0:
raise ValueError("Invalid prompt format")
# create messages
for i in range(0, len(chunks), 2):
role = chunks[i].strip().lower()
content = chunks[i + 1].strip()
messages.append({"role": role, "content": self.parse_content(content)})
return SimpleModel[list](item=messages)
| PromptyChatParser |
python | pypa__hatch | tests/env/plugin/test_interface.py | {
"start": 97647,
"end": 100748
} | class ____:
def test_basic(self, temp_dir, isolated_data_dir, platform, global_application):
for i in range(3):
project_file = temp_dir / f"foo{i}" / "pyproject.toml"
project_file.parent.mkdir()
project_file.write_text(
f"""\
[build-system]
requires = ["hatchling"]
build-backend = "hatchling.build"
[project]
name = "foo{i}"
version = "0.0.1"
dependencies = ["pkg-{i}"]
"""
)
config = {
"project": {"name": "my_app", "version": "0.0.1"},
"tool": {"hatch": {"envs": {"default": {"workspace": {"members": [{"path": "f*"}]}}}}},
}
project = Project(temp_dir, config=config)
environment = MockEnvironment(
temp_dir,
project.metadata,
"default",
project.config.envs["default"],
{},
isolated_data_dir,
isolated_data_dir,
platform,
0,
global_application,
)
assert environment.workspace.get_dependencies() == ["pkg-0", "pkg-1", "pkg-2"]
def test_features(self, temp_dir, isolated_data_dir, platform, global_application):
for i in range(3):
project_file = temp_dir / f"foo{i}" / "pyproject.toml"
project_file.parent.mkdir()
project_file.write_text(
f"""\
[build-system]
requires = ["hatchling"]
build-backend = "hatchling.build"
[project]
name = "foo{i}"
version = "0.0.1"
dependencies = ["pkg-{i}"]
[project.optional-dependencies]
feature1 = ["pkg-feature-1{i}"]
feature2 = ["pkg-feature-2{i}"]
feature3 = ["pkg-feature-3{i}"]
"""
)
config = {
"project": {"name": "my_app", "version": "0.0.1"},
"tool": {
"hatch": {
"envs": {
"default": {
"workspace": {
"members": [
{"path": "foo0", "features": ["feature1"]},
{"path": "foo1", "features": ["feature1", "feature2"]},
{"path": "foo2", "features": ["feature1", "feature2", "feature3"]},
],
},
},
},
},
},
}
project = Project(temp_dir, config=config)
environment = MockEnvironment(
temp_dir,
project.metadata,
"default",
project.config.envs["default"],
{},
isolated_data_dir,
isolated_data_dir,
platform,
0,
global_application,
)
assert environment.workspace.get_dependencies() == [
"pkg-0",
"pkg-feature-10",
"pkg-1",
"pkg-feature-11",
"pkg-feature-21",
"pkg-2",
"pkg-feature-12",
"pkg-feature-22",
"pkg-feature-32",
]
| TestWorkspaceDependencies |
python | wandb__wandb | tests/system_tests/test_notebooks/test_jupyter_server/conftest.py | {
"start": 4931,
"end": 9062
} | class ____:
"""A client for executing notebooks against a Jupyter server.
The client is tied to a specific session and kernel
created by the Jupyter server.
"""
def __init__(self, session_id: str, kernel_id: str):
self.session_id = session_id
self.kernel_id = kernel_id
self.connection_file = self._get_connection_file(kernel_id)
self.nb_client = self._create_nb_client(self.connection_file)
def _get_connection_file(self, kernel_id: str) -> str:
"""Find the connection file for a kernel in the default runtime directory."""
max_retries = 30
default_runtime_dir = Path(jupyter_core.paths.jupyter_runtime_dir())
for _ in range(max_retries):
matching = list(default_runtime_dir.glob(f"kernel-{kernel_id}*.json"))
if matching:
return str(matching[0])
time.sleep(0.5)
raise AssertionError(
f"No connection file found for kernel {kernel_id} after {max_retries * 0.5}s"
)
def _create_nb_client(self, connection_file: str) -> BlockingKernelClient:
with open(connection_file) as f:
connection_info = json.load(f)
client = BlockingKernelClient()
client.load_connection_info(connection_info)
client.start_channels()
client.wait_for_ready(timeout=10)
return client
def execute_notebook(self, notebook: nbformat.NotebookNode):
"""Execute a notebook in the notebook."""
executed_notebook = notebook.copy()
for cell in executed_notebook.cells:
self.execute_cell(cell)
return executed_notebook
def execute_cell(self, cell):
"""Execute a cell in the notebook."""
return self.collect_outputs(cell, self.nb_client.execute(cell.source))
def collect_outputs(self, cell, msg_id: str):
"""Collect outputs from a cell execution."""
while True:
msg = self.nb_client.get_iopub_msg()
if msg["parent_header"].get("msg_id") != msg_id:
continue
msg_type = msg["msg_type"]
content = msg["content"]
if msg_type == "stream":
output = nbformat.v4.new_output(
output_type="stream",
name=content["name"],
text=content["text"],
)
cell.outputs.append(output)
elif msg_type == "error":
output = nbformat.v4.new_output(
output_type="error",
ename=content["ename"],
evalue=content["evalue"],
traceback=content["traceback"],
)
cell.outputs.append(output)
elif msg_type == "status":
if content["execution_state"] == "idle":
break
elif msg_type == "display_data":
output = nbformat.v4.new_output(
output_type="display_data",
data=content["data"],
metadata=content["metadata"],
)
cell.outputs.append(output)
elif msg_type == "update_display_data":
output = nbformat.v4.new_output(
output_type="display_data",
data=content["data"],
metadata=content["metadata"],
)
cell.outputs.append(output)
@pytest.fixture(scope="session")
def jupyter_server() -> Generator[JupyterServerManager, None, None]:
with JupyterServerManager(server_dir=Path(tempfile.mkdtemp())) as jupyter_server:
yield jupyter_server
@pytest.fixture()
def notebook_client(
jupyter_server: JupyterServerManager,
) -> Generator[Callable[[str], NotebookClient], None, None]:
def _new_notebook_client(notebook_path: str) -> NotebookClient:
session_id, kernel_id = jupyter_server.create_session(
notebook_path=notebook_path
)
return NotebookClient(session_id, kernel_id)
yield _new_notebook_client
| NotebookClient |
python | mwaskom__seaborn | seaborn/external/docscrape.py | {
"start": 1760,
"end": 3579
} | class ____:
"""A line-based string reader.
"""
def __init__(self, data):
"""
Parameters
----------
data : str
String with lines separated by '\n'.
"""
if isinstance(data, list):
self._str = data
else:
self._str = data.split('\n') # store string as list of lines
self.reset()
def __getitem__(self, n):
return self._str[n]
def reset(self):
self._l = 0 # current line nr
def read(self):
if not self.eof():
out = self[self._l]
self._l += 1
return out
else:
return ''
def seek_next_non_empty_line(self):
for l in self[self._l:]:
if l.strip():
break
else:
self._l += 1
def eof(self):
return self._l >= len(self._str)
def read_to_condition(self, condition_func):
start = self._l
for line in self[start:]:
if condition_func(line):
return self[start:self._l]
self._l += 1
if self.eof():
return self[start:self._l+1]
return []
def read_to_next_empty_line(self):
self.seek_next_non_empty_line()
def is_empty(line):
return not line.strip()
return self.read_to_condition(is_empty)
def read_to_next_unindented_line(self):
def is_unindented(line):
return (line.strip() and (len(line.lstrip()) == len(line)))
return self.read_to_condition(is_unindented)
def peek(self, n=0):
if self._l + n < len(self._str):
return self[self._l + n]
else:
return ''
def is_empty(self):
return not ''.join(self._str).strip()
| Reader |
python | fluentpython__example-code-2e | 22-dyn-attr-prop/oscon/schedule_v3.py | {
"start": 541,
"end": 888
} | class ____:
__index = None
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def __repr__(self):
return f'<{self.__class__.__name__} serial={self.serial!r}>'
@staticmethod
def fetch(key):
if Record.__index is None:
Record.__index = load()
return Record.__index[key]
| Record |
python | numba__llvmlite | llvmlite/ir/values.py | {
"start": 17896,
"end": 18150
} | class ____:
"""
A debug information enumeration value that should appear bare in
the emitted metadata.
Use this to wrap known constants, e.g. the DW_* enumerations.
"""
def __init__(self, value):
self.value = value
| DIToken |
python | sympy__sympy | sympy/functions/elementary/trigonometric.py | {
"start": 108900,
"end": 115946
} | class ____(InverseTrigonometricFunction):
r"""
The inverse cosecant function.
Returns the arc cosecant of x (measured in radians).
Explanation
===========
``acsc(x)`` will evaluate automatically in the cases
$x \in \{\infty, -\infty, 0, 1, -1\}$` and for some instances when the
result is a rational multiple of $\pi$ (see the ``eval`` class method).
Examples
========
>>> from sympy import acsc, oo
>>> acsc(1)
pi/2
>>> acsc(-1)
-pi/2
>>> acsc(oo)
0
>>> acsc(-oo) == acsc(oo)
True
>>> acsc(0)
zoo
See Also
========
sympy.functions.elementary.trigonometric.sin
sympy.functions.elementary.trigonometric.csc
sympy.functions.elementary.trigonometric.cos
sympy.functions.elementary.trigonometric.sec
sympy.functions.elementary.trigonometric.tan
sympy.functions.elementary.trigonometric.cot
sympy.functions.elementary.trigonometric.asin
sympy.functions.elementary.trigonometric.acos
sympy.functions.elementary.trigonometric.asec
sympy.functions.elementary.trigonometric.atan
sympy.functions.elementary.trigonometric.acot
sympy.functions.elementary.trigonometric.atan2
References
==========
.. [1] https://en.wikipedia.org/wiki/Inverse_trigonometric_functions
.. [2] https://dlmf.nist.gov/4.23
.. [3] https://functions.wolfram.com/ElementaryFunctions/ArcCsc
"""
@classmethod
def eval(cls, arg):
if arg.is_zero:
return S.ComplexInfinity
if arg.is_Number:
if arg is S.NaN:
return S.NaN
elif arg is S.One:
return pi/2
elif arg is S.NegativeOne:
return -pi/2
if arg in [S.Infinity, S.NegativeInfinity, S.ComplexInfinity]:
return S.Zero
if arg.could_extract_minus_sign():
return -cls(-arg)
if arg.is_infinite:
return S.Zero
if arg.is_number:
acsc_table = cls._acsc_table()
if arg in acsc_table:
return acsc_table[arg]
if isinstance(arg, csc):
ang = arg.args[0]
if ang.is_comparable:
ang %= 2*pi # restrict to [0,2*pi)
if ang > pi: # restrict to (-pi,pi]
ang = pi - ang
# restrict to [-pi/2,pi/2]
if ang > pi/2:
ang = pi - ang
if ang < -pi/2:
ang = -pi - ang
return ang
if isinstance(arg, sec): # asec(x) + acsc(x) = pi/2
ang = arg.args[0]
if ang.is_comparable:
return pi/2 - asec(arg)
def fdiff(self, argindex=1):
if argindex == 1:
return -1/(self.args[0]**2*sqrt(1 - 1/self.args[0]**2))
else:
raise ArgumentIndexError(self, argindex)
def inverse(self, argindex=1):
"""
Returns the inverse of this function.
"""
return csc
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms):
if n == 0:
return pi/2 - S.ImaginaryUnit*log(2) + S.ImaginaryUnit*log(x)
elif n < 0 or n % 2 == 1:
return S.Zero
else:
x = sympify(x)
if len(previous_terms) > 2 and n > 2:
p = previous_terms[-2]
return p * ((n - 1)*(n-2)) * x**2/(4 * (n//2)**2)
else:
k = n // 2
R = RisingFactorial(S.Half, k) * n
F = factorial(k) * n // 2 * n // 2
return S.ImaginaryUnit * R / F * x**n / 4
def _eval_as_leading_term(self, x, logx, cdir):
arg = self.args[0]
x0 = arg.subs(x, 0).cancel()
if x0 is S.NaN:
return self.func(arg.as_leading_term(x))
# Handling branch points
if x0 in (-S.One, S.One, S.Zero):
return self.rewrite(log)._eval_as_leading_term(x, logx=logx, cdir=cdir).expand()
if x0 is S.ComplexInfinity:
return (1/arg).as_leading_term(x)
# Handling points lying on branch cuts (-1, 1)
if x0.is_real and (1 - x0**2).is_positive:
ndir = arg.dir(x, cdir if cdir else 1)
if im(ndir).is_negative:
if x0.is_positive:
return pi - self.func(x0)
elif im(ndir).is_positive:
if x0.is_negative:
return -pi - self.func(x0)
else:
return self.rewrite(log)._eval_as_leading_term(x, logx=logx, cdir=cdir).expand()
return self.func(x0)
def _eval_nseries(self, x, n, logx, cdir=0): # acsc
from sympy.series.order import O
arg0 = self.args[0].subs(x, 0)
# Handling branch points
if arg0 is S.One:
t = Dummy('t', positive=True)
ser = acsc(S.One + t**2).rewrite(log).nseries(t, 0, 2*n)
arg1 = S.NegativeOne + self.args[0]
f = arg1.as_leading_term(x)
g = (arg1 - f)/ f
res1 = sqrt(S.One + g)._eval_nseries(x, n=n, logx=logx)
res = (res1.removeO()*sqrt(f)).expand()
return ser.removeO().subs(t, res).expand().powsimp() + O(x**n, x)
if arg0 is S.NegativeOne:
t = Dummy('t', positive=True)
ser = acsc(S.NegativeOne - t**2).rewrite(log).nseries(t, 0, 2*n)
arg1 = S.NegativeOne - self.args[0]
f = arg1.as_leading_term(x)
g = (arg1 - f)/ f
res1 = sqrt(S.One + g)._eval_nseries(x, n=n, logx=logx)
res = (res1.removeO()*sqrt(f)).expand()
return ser.removeO().subs(t, res).expand().powsimp() + O(x**n, x)
res = super()._eval_nseries(x, n=n, logx=logx)
if arg0 is S.ComplexInfinity:
return res
# Handling points lying on branch cuts (-1, 1)
if arg0.is_real and (1 - arg0**2).is_positive:
ndir = self.args[0].dir(x, cdir if cdir else 1)
if im(ndir).is_negative:
if arg0.is_positive:
return pi - res
elif im(ndir).is_positive:
if arg0.is_negative:
return -pi - res
else:
return self.rewrite(log)._eval_nseries(x, n, logx=logx, cdir=cdir)
return res
def _eval_rewrite_as_log(self, arg, **kwargs):
return -S.ImaginaryUnit*log(S.ImaginaryUnit/arg + sqrt(1 - 1/arg**2))
_eval_rewrite_as_tractable = _eval_rewrite_as_log
def _eval_rewrite_as_asin(self, arg, **kwargs):
return asin(1/arg)
def _eval_rewrite_as_acos(self, arg, **kwargs):
return pi/2 - acos(1/arg)
def _eval_rewrite_as_atan(self, x, **kwargs):
return sqrt(x**2)/x*(pi/2 - atan(sqrt(x**2 - 1)))
def _eval_rewrite_as_acot(self, arg, **kwargs):
return sqrt(arg**2)/arg*(pi/2 - acot(1/sqrt(arg**2 - 1)))
def _eval_rewrite_as_asec(self, arg, **kwargs):
return pi/2 - asec(arg)
| acsc |
python | streamlit__streamlit | lib/streamlit/runtime/caching/cache_utils.py | {
"start": 2201,
"end": 4412
} | class ____(Generic[R]):
"""Function cache interface. Caches persist across script runs."""
def __init__(self) -> None:
self._value_locks: dict[str, threading.Lock] = defaultdict(threading.Lock)
self._value_locks_lock = threading.Lock()
@abstractmethod
def read_result(self, value_key: str) -> CachedResult[R]:
"""Read a value and associated messages from the cache.
Raises
------
CacheKeyNotFoundError
Raised if value_key is not in the cache.
"""
raise NotImplementedError
@abstractmethod
def write_result(self, value_key: str, value: R, messages: list[MsgData]) -> None:
"""Write a value and associated messages to the cache, overwriting any existing
result that uses the value_key.
"""
# We *could* `del self._value_locks[value_key]` here, since nobody will be taking
# a compute_value_lock for this value_key after the result is written.
raise NotImplementedError
def compute_value_lock(self, value_key: str) -> threading.Lock:
"""Return the lock that should be held while computing a new cached value.
In a popular app with a cache that hasn't been pre-warmed, many sessions may try
to access a not-yet-cached value simultaneously. We use a lock to ensure that
only one of those sessions computes the value, and the others block until
the value is computed.
"""
with self._value_locks_lock:
return self._value_locks[value_key]
def clear(self, key: str | None = None) -> None:
"""Clear values from this cache.
If no argument is passed, all items are cleared from the cache.
A key can be passed to clear that key from the cache only.
"""
with self._value_locks_lock:
if not key:
self._value_locks.clear()
elif key in self._value_locks:
del self._value_locks[key]
self._clear(key=key)
@abstractmethod
def _clear(self, key: str | None = None) -> None:
"""Subclasses must implement this to perform cache-clearing logic."""
raise NotImplementedError
| Cache |
python | Pylons__pyramid | tests/test_scripts/test_pshell.py | {
"start": 13126,
"end": 13604
} | class ____(unittest.TestCase):
def _callFUT(self, env, help, interact):
from pyramid.scripts.pshell import python_shell_runner
return python_shell_runner(env, help, interact=interact)
def test_it(self):
interact = dummy.DummyInteractor()
self._callFUT({'foo': 'bar'}, 'a help message', interact)
self.assertEqual(interact.local, {'foo': 'bar'})
self.assertTrue('a help message' in interact.banner)
| Test_python_shell_runner |
python | davidhalter__jedi | test/completion/functions.py | {
"start": 3715,
"end": 7022
} | class ____():
@memoize
def x(self, a, b=1):
return a
#? int()
Something().x(1)
# -----------------
# ** kwargs
# -----------------
def kwargs_func(**kwargs):
#? ['keys']
kwargs.keys
#? dict()
return kwargs
exe = kwargs_func(a=3,b=4.0)
#? dict()
exe
#? int()
exe['a']
#? float()
exe['b']
#? int() float()
exe['c']
a = 'a'
exe2 = kwargs_func(**{a:3,
'b':4.0})
#? int()
exe2['a']
#? float()
exe2['b']
#? int() float()
exe2['c']
exe3 = kwargs_func(**{k: v for k, v in [(a, 3), ('b', 4.0)]})
# Should resolve to the same as 2 but jedi is not smart enough yet
# Here to make sure it doesn't result in crash though
#?
exe3['a']
#?
exe3['b']
#?
exe3['c']
# -----------------
# *args / ** kwargs
# -----------------
def func_without_call(*args, **kwargs):
#? tuple()
args
#? dict()
kwargs
def fu(a=1, b="", *args, **kwargs):
return a, b, args, kwargs
exe = fu(list, 1, "", c=set, d="")
#? list
exe[0]
#? int()
exe[1]
#? tuple()
exe[2]
#? str()
exe[2][0]
#? dict()
exe[3]
#? set
exe[3]['c']
def kwargs_iteration(**kwargs):
return kwargs
for x in kwargs_iteration(d=3):
#? float()
{'d': 1.0, 'c': '1'}[x]
# -----------------
# nested *args
# -----------------
def function_args(a, b, c):
return b
def nested_args(*args):
return function_args(*args)
def nested_args2(*args, **kwargs):
return nested_args(*args)
#? int()
nested_args('', 1, 1.0, list)
#? []
nested_args('').
#? int()
nested_args2('', 1, 1.0)
#? []
nested_args2('').
# -----------------
# nested **kwargs
# -----------------
def nested_kw(**kwargs1):
return function_args(**kwargs1)
def nested_kw2(**kwargs2):
return nested_kw(**kwargs2)
# invalid command, doesn't need to return anything
#?
nested_kw(b=1, c=1.0, list)
#? int()
nested_kw(b=1)
# invalid command, doesn't need to return anything
#?
nested_kw(d=1.0, b=1, list)
#? int()
nested_kw(a=3.0, b=1)
#? int()
nested_kw(b=1, a=r"")
#? []
nested_kw(1, '').
#? []
nested_kw(a='').
#? int()
nested_kw2(b=1)
#? int()
nested_kw2(b=1, c=1.0)
#? int()
nested_kw2(c=1.0, b=1)
#? []
nested_kw2('').
#? []
nested_kw2(a='').
#? []
nested_kw2('', b=1).
# -----------------
# nested *args/**kwargs
# -----------------
def nested_both(*args, **kwargs):
return function_args(*args, **kwargs)
def nested_both2(*args, **kwargs):
return nested_both(*args, **kwargs)
# invalid commands, may return whatever.
#? list
nested_both('', b=1, c=1.0, list)
#? list
nested_both('', c=1.0, b=1, list)
#? []
nested_both('').
#? int()
nested_both2('', b=1, c=1.0)
#? int()
nested_both2('', c=1.0, b=1)
#? []
nested_both2('').
# -----------------
# nested *args/**kwargs with a default arg
# -----------------
def function_def(a, b, c):
return a, b
def nested_def(a, *args, **kwargs):
return function_def(a, *args, **kwargs)
def nested_def2(*args, **kwargs):
return nested_def(*args, **kwargs)
#? str()
nested_def2('', 1, 1.0)[0]
#? str()
nested_def2('', b=1, c=1.0)[0]
#? str()
nested_def2('', c=1.0, b=1)[0]
#? int()
nested_def2('', 1, 1.0)[1]
#? int()
nested_def2('', b=1, c=1.0)[1]
#? int()
nested_def2('', c=1.0, b=1)[1]
#? []
nested_def2('')[1].
# -----------------
# magic methods
# -----------------
def a(): pass
#? ['__closure__']
a.__closure__
| Something |
python | pandas-dev__pandas | pandas/tests/indexing/test_partial.py | {
"start": 335,
"end": 8178
} | class ____:
def test_empty_frame_setitem_index_name_retained(self):
# GH#31368 empty frame has non-None index.name -> retained
df = DataFrame({}, index=pd.RangeIndex(0, name="df_index"))
series = Series(1.23, index=pd.RangeIndex(4, name="series_index"))
df["series"] = series
expected = DataFrame(
{"series": [1.23] * 4},
index=pd.RangeIndex(4, name="df_index"),
columns=Index(["series"]),
)
tm.assert_frame_equal(df, expected)
def test_empty_frame_setitem_index_name_inherited(self):
# GH#36527 empty frame has None index.name -> not retained
df = DataFrame()
series = Series(1.23, index=pd.RangeIndex(4, name="series_index"))
df["series"] = series
expected = DataFrame(
{"series": [1.23] * 4},
index=pd.RangeIndex(4, name="series_index"),
columns=Index(["series"]),
)
tm.assert_frame_equal(df, expected)
def test_loc_setitem_zerolen_series_columns_align(self):
# columns will align
df = DataFrame(columns=["A", "B"])
df.loc[0] = Series(1, index=range(4))
expected = DataFrame(columns=["A", "B"], index=[0], dtype=np.float64)
tm.assert_frame_equal(df, expected)
# columns will align
df = DataFrame(columns=["A", "B"])
df.loc[0] = Series(1, index=["B"])
exp = DataFrame([[np.nan, 1]], columns=["A", "B"], index=[0], dtype="float64")
tm.assert_frame_equal(df, exp)
def test_loc_setitem_zerolen_list_length_must_match_columns(self):
# list-like must conform
df = DataFrame(columns=["A", "B"])
msg = "cannot set a row with mismatched columns"
with pytest.raises(ValueError, match=msg):
df.loc[0] = [1, 2, 3]
df = DataFrame(columns=["A", "B"])
df.loc[3] = [6, 7] # length matches len(df.columns) --> OK!
exp = DataFrame([[6, 7]], index=[3], columns=["A", "B"], dtype=np.int64)
tm.assert_frame_equal(df, exp)
def test_partial_set_empty_frame(self):
# partially set with an empty object
# frame
df = DataFrame()
msg = "cannot set a frame with no defined columns"
with pytest.raises(ValueError, match=msg):
df.loc[1] = 1
with pytest.raises(ValueError, match=msg):
df.loc[1] = Series([1], index=["foo"])
msg = "cannot set a frame with no defined index and a scalar"
with pytest.raises(ValueError, match=msg):
df.loc[:, 1] = 1
def test_partial_set_empty_frame2(self):
# these work as they don't really change
# anything but the index
# GH#5632
expected = DataFrame(columns=Index(["foo"]), index=Index([], dtype="object"))
df = DataFrame(index=Index([], dtype="object"))
df["foo"] = Series([], dtype="object")
tm.assert_frame_equal(df, expected)
df = DataFrame(index=Index([]))
df["foo"] = Series(df.index)
tm.assert_frame_equal(df, expected)
df = DataFrame(index=Index([]))
df["foo"] = df.index
tm.assert_frame_equal(df, expected)
def test_partial_set_empty_frame3(self):
expected = DataFrame(columns=Index(["foo"]), index=Index([], dtype="int64"))
expected["foo"] = expected["foo"].astype("float64")
df = DataFrame(index=Index([], dtype="int64"))
df["foo"] = []
tm.assert_frame_equal(df, expected)
df = DataFrame(index=Index([], dtype="int64"))
df["foo"] = Series(np.arange(len(df)), dtype="float64")
tm.assert_frame_equal(df, expected)
def test_partial_set_empty_frame4(self):
df = DataFrame(index=Index([], dtype="int64"))
df["foo"] = range(len(df))
expected = DataFrame(columns=Index(["foo"]), index=Index([], dtype="int64"))
# range is int-dtype-like, so we get int64 dtype
expected["foo"] = expected["foo"].astype("int64")
tm.assert_frame_equal(df, expected)
def test_partial_set_empty_frame5(self):
df = DataFrame()
tm.assert_index_equal(df.columns, pd.RangeIndex(0))
df2 = DataFrame()
df2[1] = Series([1], index=["foo"])
df.loc[:, 1] = Series([1], index=["foo"])
tm.assert_frame_equal(df, DataFrame([[1]], index=["foo"], columns=[1]))
tm.assert_frame_equal(df, df2)
def test_partial_set_empty_frame_no_index(self):
# no index to start
expected = DataFrame({0: Series(1, index=range(4))}, columns=["A", "B", 0])
df = DataFrame(columns=["A", "B"])
df[0] = Series(1, index=range(4))
tm.assert_frame_equal(df, expected)
df = DataFrame(columns=["A", "B"])
df.loc[:, 0] = Series(1, index=range(4))
tm.assert_frame_equal(df, expected)
def test_partial_set_empty_frame_row(self):
# GH#5720, GH#5744
# don't create rows when empty
expected = DataFrame(columns=["A", "B", "New"], index=Index([], dtype="int64"))
expected["A"] = expected["A"].astype("int64")
expected["B"] = expected["B"].astype("float64")
expected["New"] = expected["New"].astype("float64")
df = DataFrame({"A": [1, 2, 3], "B": [1.2, 4.2, 5.2]})
y = df[df.A > 5]
y["New"] = np.nan
tm.assert_frame_equal(y, expected)
expected = DataFrame(columns=["a", "b", "c c", "d"])
expected["d"] = expected["d"].astype("int64")
df = DataFrame(columns=["a", "b", "c c"])
df["d"] = 3
tm.assert_frame_equal(df, expected)
tm.assert_series_equal(df["c c"], Series(name="c c", dtype=object))
# reindex columns is ok
df = DataFrame({"A": [1, 2, 3], "B": [1.2, 4.2, 5.2]})
y = df[df.A > 5]
result = y.reindex(columns=["A", "B", "C"])
expected = DataFrame(columns=["A", "B", "C"])
expected["A"] = expected["A"].astype("int64")
expected["B"] = expected["B"].astype("float64")
expected["C"] = expected["C"].astype("float64")
tm.assert_frame_equal(result, expected)
def test_partial_set_empty_frame_set_series(self):
# GH#5756
# setting with empty Series
df = DataFrame(Series(dtype=object))
expected = DataFrame({0: Series(dtype=object)})
tm.assert_frame_equal(df, expected)
df = DataFrame(Series(name="foo", dtype=object))
expected = DataFrame({"foo": Series(dtype=object)})
tm.assert_frame_equal(df, expected)
def test_partial_set_empty_frame_empty_copy_assignment(self):
# GH#5932
# copy on empty with assignment fails
df = DataFrame(index=[0])
df = df.copy()
df["a"] = 0
expected = DataFrame(0, index=[0], columns=Index(["a"]))
tm.assert_frame_equal(df, expected)
def test_partial_set_empty_frame_empty_consistencies(self, using_infer_string):
# GH#6171
# consistency on empty frames
df = DataFrame(columns=["x", "y"])
df["x"] = [1, 2]
expected = DataFrame({"x": [1, 2], "y": [np.nan, np.nan]})
tm.assert_frame_equal(df, expected, check_dtype=False)
df = DataFrame(columns=["x", "y"])
df["x"] = ["1", "2"]
expected = DataFrame(
{
"x": Series(
["1", "2"],
dtype=object if not using_infer_string else "str",
),
"y": Series([np.nan, np.nan], dtype=object),
}
)
tm.assert_frame_equal(df, expected)
df = DataFrame(columns=["x", "y"])
df.loc[0, "x"] = 1
expected = DataFrame({"x": [1], "y": [np.nan]})
tm.assert_frame_equal(df, expected, check_dtype=False)
| TestEmptyFrameSetitemExpansion |
python | facebookresearch__faiss | tests/test_rabitq.py | {
"start": 3540,
"end": 5945
} | class ____:
"""straightforward IVF implementation"""
def __init__(self, d, nlist, Bq=4):
self.d = d
self.nlist = nlist
self.invlists = [ReferenceRabitQ(d, Bq) for _ in range(nlist)]
self.quantizer = None
self.nprobe = 1
def train(self, xtrain, P):
if self.quantizer is None:
km = faiss.Kmeans(self.d, self.nlist, niter=10)
km.train(xtrain)
centroids = km.centroids
self.quantizer = faiss.IndexFlatL2(self.d)
self.quantizer.add(centroids)
else:
centroids = self.quantizer.reconstruct_n()
# Override the RabitQ train() to use a common random rotation
# and force centroids from the coarse quantizer
for list_no, rq in enumerate(self.invlists):
rq.centroid = centroids[list_no]
rq.P = P
def add(self, x):
_, keys = self.quantizer.search(x, 1)
keys = keys.ravel()
n_per_invlist = np.bincount(keys, minlength=self.nlist)
order = np.argsort(keys)
i0 = 0
for list_no, rab in enumerate(self.invlists):
i1 = i0 + n_per_invlist[list_no]
rab.list_size = i1 - i0
if i1 > i0:
ids = order[i0:i1]
rab.ids = ids
rab.add(x[ids])
i0 = i1
def search(self, x, k):
nq = len(x)
nprobe = self.nprobe
D = np.zeros((nq, k), dtype="float32")
I = np.zeros((nq, k), dtype=int)
D[:] = np.nan
I[:] = -1
_, Ic = self.quantizer.search(x, nprobe)
for qno, xq in enumerate(x):
# naive top-k implemetation with a full sort
q_dis = []
q_ids = []
for probe in range(nprobe):
rab = self.invlists[Ic[qno, probe]]
if rab.list_size == 0:
continue
# we cannot exploit the batch version
# of the queries (in this form)
dis = rab.distances(xq[None, :])
q_ids.append(rab.ids)
q_dis.append(dis.ravel())
q_dis = np.hstack(q_dis)
q_ids = np.hstack(q_ids)
o = q_dis.argsort()
kq = min(k, len(q_dis))
D[qno, :kq] = q_dis[o[:kq]]
I[qno, :kq] = q_ids[o[:kq]]
return D, I
| ReferenceIVFRabitQ |
python | pytorch__pytorch | test/dynamo/test_dicts.py | {
"start": 1096,
"end": 34914
} | class ____(torch._dynamo.test_case.TestCase):
def test_dict_subclass_instantiation(self):
def fn(x):
sd = SimpleDict(x=5)
return sd["x"] * x
x = torch.randn(4)
opt_fn = torch.compile(fn, backend="eager", fullgraph=True)
self.assertEqual(fn(x), opt_fn(x))
def test_dict_subclass_local_mutation(self):
def fn(x):
sd = SimpleDict(x=5)
z = sd["x"] * x
sd["x"] = 10
return z * sd["x"]
x = torch.randn(4)
opt_fn = torch.compile(fn, backend="eager", fullgraph=True)
self.assertEqual(fn(x), opt_fn(x))
def test_dict_contains_enum(self):
class TensorDim(str, enum.Enum):
DDP = "ddp"
FSDP = "fsdp"
CP = "cp"
TP = "tp"
class Foo(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
val = x.sin()
if TensorDim.DDP in {"ddp"}:
val += x.cos()
if "ddp" in {TensorDim.DDP}:
val += x.cos()
return val
inp = torch.randn(4, 4)
mod = Foo()
opt_f = torch.compile(mod)
self.assertEqual(mod(inp), opt_f(inp))
def test_dict_subclass_local_with_non_dict_method(self):
# Checks that add_1 method is inlined
class MethodDict(dict):
def add_1(self, x):
return x + 1
def fn(x):
sd = MethodDict(x=5)
z = sd["x"] * x
sd["x"] = 10
return sd.add_1(z * sd["x"])
x = torch.randn(4)
opt_fn = torch.compile(fn, backend="eager", fullgraph=True)
self.assertEqual(fn(x), opt_fn(x))
def test_dict_contains(self):
sd = dict()
sd[2] = 5
sd[4] = 10
def fn(x):
if 1 in sd:
x = x * 2
else:
x = x * 3
return x
x = torch.randn(4)
opt_fn = torch.compile(fn, backend="eager", fullgraph=True)
self.assertEqual(fn(x), opt_fn(x))
# Ensure a recompilation
sd[1] = 15
self.assertEqual(fn(x), opt_fn(x))
# Ensure not recompilation because the traced program remains same here.
sd[2] = 10
with unittest.mock.patch("torch._dynamo.config.error_on_recompile", True):
self.assertEqual(fn(x), opt_fn(x))
def test_dict_subclass_methods_fallback_readonly(self):
sd = SimpleDict()
sd[2] = 5
sd[4] = 10
# check that regular attr accesses work well
sd.attr = 4
def fn(x):
for value in sd.values():
x = x * value
for key in sd:
x = x * key
for k, v in sd.items():
x = x * k
x = x * v
# for k in sd:
# x = x * k
if 1 in sd:
x = x * 2
else:
x = x * 3
x = x * sd.get(2, 0)
x = x * sd.get(3, 4)
x = len(sd) * x
x = x * sd.attr
return x
x = torch.randn(4)
opt_fn = torch.compile(fn, backend="eager", fullgraph=True)
self.assertEqual(fn(x), opt_fn(x))
# Ensure a recompilation
sd[6] = 15
self.assertEqual(fn(x), opt_fn(x))
def test_dict_subclass_instantiation_return(self):
def fn(x):
sd = SimpleDict(x=5 * x)
sd["y"] = 10
return sd
x = torch.randn(4)
opt_fn = torch.compile(fn, backend="eager", fullgraph=True)
ref = fn(x)
res = opt_fn(x)
self.assertEqual(type(ref), type(res))
self.assertEqual(ref["x"], res["x"])
self.assertEqual(ref["y"], res["y"])
def test_dict_subclass_methods_fallback_mutation(self):
def fn(sd, x):
for value in sd.values():
x = x * value
sd[6] = 14
for key in sd:
x = x * key
for k, v in sd.items():
x = x * k
x = x * v
# for k in sd:
# x = x * k
if 1 in sd:
x = x * 2
else:
x = x * 3
x = x * sd.get(2, 0)
x = x * sd.get(3, 4)
x = len(sd) * x
return x
x = torch.randn(4)
opt_fn = torch.compile(fn, backend="eager", fullgraph=True)
sd1 = SimpleDict()
sd1[2] = 5
sd1[4] = 10
sd2 = SimpleDict()
sd2[2] = 5
sd2[4] = 10
self.assertTrue(sd1 == sd2)
self.assertEqual(fn(sd1, x), opt_fn(sd2, x))
self.assertTrue(sd1 == sd2)
def test_dict_subclass_setitem(self):
class SetItemDict(dict):
def __setitem__(self, key, value):
super().__setitem__(key, value + 1)
def fn(x):
sd = SetItemDict(x=5 * x)
sd["y"] = 10
return sd
x = torch.randn(4)
opt_fn = torch.compile(fn, backend="eager", fullgraph=True)
ref = fn(x)
res = opt_fn(x)
self.assertEqual(type(ref), type(res))
self.assertEqual(ref["x"], res["x"])
self.assertEqual(ref["y"], res["y"])
def test_custom_iter_dict(self):
class ReversedDict(dict):
def __iter__(self):
return reversed(list(self.keys()))
d = {
"foo": 1,
"bar": 2,
}
d = ReversedDict(d)
@torch.compile(backend="eager")
def fn(x, d):
# Forces side effects attribute reapplication logic
d.sample = 1
d["baz"] = 4
return x * d["foo"] * d["bar"]
fn(torch.randn(4), d)
# This is intentional because the dict is mutated, so we will have a recompilation.
fn(torch.randn(4), d)
with unittest.mock.patch("torch._dynamo.config.error_on_recompile", True):
fn(torch.randn(4), d)
def test_custom_keys_iter_dict(self):
class ReversedDict(dict):
def keys(self):
return ["bar", "foo"]
d = {
"foo": 1,
"bar": 2,
}
d = ReversedDict(d)
@torch.compile(backend="eager")
def fn(x, d):
return x * d["foo"] * d["bar"]
fn(torch.randn(4), d)
with unittest.mock.patch("torch._dynamo.config.error_on_recompile", True):
fn(torch.randn(4), d)
def test_dict_guard_on_keys_order(self):
d = {
2: 4,
3: 5,
}
cnts = torch._dynamo.testing.CompileCounter()
def fn(x, d):
for key, value in d.items():
x = x * key + value
return x
opt_fn = torch.compile(fn, backend=cnts)
opt_fn(torch.randn(4), d)
opt_fn(torch.randn(4), d)
# No recompilation
self.assertEqual(cnts.frame_count, 1)
# move 2 to the end
d[2] = d.pop(2)
x = torch.randn(4)
res = opt_fn(x, d)
# Check recompilation
self.assertEqual(cnts.frame_count, 2)
self.assertEqual(res, fn(x, d))
def test_dict_guard_on_keys_order2(self):
d = {
2: 4,
3: 5,
}
cnts = torch._dynamo.testing.CompileCounter()
def fn(x, d):
for key in d:
value = d[key]
x = x * key + value
return x
opt_fn = torch.compile(fn, backend=cnts)
opt_fn(torch.randn(4), d)
opt_fn(torch.randn(4), d)
# No recompilation
self.assertEqual(cnts.frame_count, 1)
# move 2 to the end
d[2] = d.pop(2)
x = torch.randn(4)
res = opt_fn(x, d)
# Check recompilation
self.assertEqual(cnts.frame_count, 2)
self.assertEqual(res, fn(x, d))
def test_ordered_dict_reordered_keys(self):
d = OrderedDict()
d[2] = 4
d[3] = 5
d.move_to_end(2)
cnts = torch._dynamo.testing.CompileCounter()
def fn(x, d):
y = 0
for idx, value in enumerate(d.values()):
if idx == 0:
y += torch.sin(x * value)
else:
y += torch.cos(x * value)
return y
opt_fn = torch.compile(fn, backend=cnts)
x = torch.randn(4)
self.assertEqual(opt_fn(x, d), fn(x, d))
def test_ordered_dict_subclass_reordered_keys(self):
class ODSubclass(OrderedDict):
def keys(self):
return super().keys()
d = ODSubclass()
d[2] = 4
d[3] = 5
d.move_to_end(2)
cnts = torch._dynamo.testing.CompileCounter()
def fn(x, d):
y = 0
for idx, value in enumerate(d.values()):
if idx == 0:
y += torch.sin(x * value)
else:
y += torch.cos(x * value)
return y
opt_fn = torch.compile(fn, backend=cnts)
x = torch.randn(4)
self.assertEqual(opt_fn(x, d), fn(x, d))
def test_lazy_key_guarding(self):
d = {"a": 2, "b": 3, "c": 5}
def fn(x):
return x * d["a"]
opt_fn = torch.compile(fn, backend="eager", fullgraph=True)
x = torch.randn(4)
ref = fn(x)
res = opt_fn(x)
self.assertEqual(ref, res)
# Since key c was not used, it should not lead to a recompilation
d.pop("c")
d["d"] = 10
with unittest.mock.patch("torch._dynamo.config.error_on_recompile", True):
ref = fn(x)
res = opt_fn(x)
self.assertEqual(ref, res)
def test_lazy_key_non_const_guarding(self):
d = {
list: 2,
dict: 3,
OrderedDict: 5,
namedtuple: 7,
}
def fn(x):
return x * d[list]
opt_fn = torch.compile(fn, backend="eager", fullgraph=True)
x = torch.randn(4)
ref = fn(x)
res = opt_fn(x)
self.assertEqual(ref, res)
# Since key c was not used, it should not lead to a recompilation
d.pop(dict)
d[defaultdict] = 10
with unittest.mock.patch("torch._dynamo.config.error_on_recompile", True):
ref = fn(x)
res = opt_fn(x)
self.assertEqual(ref, res)
def test_dict_mutation_side_effect(self):
def fn(d):
d["c"] = d["a"] + d.pop("b")
return d
args1 = {"a": torch.randn(10), "b": torch.randn(10)}
args2 = dict(args1)
assert fn(args1) is args1
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch.compile(fn, backend=cnts)
self.assertIs(opt_fn(args2), args2)
self.assertTrue(same(args1, args2))
self.assertEqual(cnts.frame_count, 1)
self.assertEqual(cnts.op_count, 1)
def test_dict_copy_alias(self):
@torch.compile(backend="eager", fullgraph=True)
def run(x, d0):
d1 = d0.copy()
d1[0] = 1
return x + 1, d1
d0 = {}
res, d1 = run(torch.zeros(1), d0)
self.assertTrue(same(res, torch.ones(1)))
self.assertEqual(d0, {})
self.assertEqual(d1, {0: 1})
def test_dict_subclass_get_method(self):
class dotdict(dict):
"""dot.notation access to dictionary attributes"""
__getattr__ = dict.get
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
config = dotdict({"a": 1, "b": 2})
def fn(x):
x2 = x * 2 # noqa: F841
x3 = x * config.get("a", 3)
return x3
x = torch.randn(2)
opt_fn = torch.compile(fn, backend="eager", fullgraph=True)
self.assertEqual(fn(x), opt_fn(x))
def test_dict_order_keys(self):
def fn(d):
c = 0
for v in d.values():
c += v
return c
args1 = {}
args1["a"] = torch.rand(10)
args1["b"] = torch.rand(10)
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch.compile(fn, backend=cnts)
self.assertEqual(fn(args1), opt_fn(args1))
self.assertEqual(cnts.frame_count, 1)
self.assertEqual(cnts.op_count, 2)
# A different order of keys recompiles
args2 = {}
args2["b"] = args1["b"]
args2["a"] = args1["a"]
self.assertEqual(fn(args2), opt_fn(args2))
self.assertEqual(cnts.frame_count, 2)
# Extra calls don't recompile
self.assertEqual(cnts.frame_count, 2)
def test_dict_namedtuple(self):
def fn(d):
if namedtuple in d:
return d[3] * 2
else:
return d[3] * 3
args1 = {namedtuple: None, 3: torch.randn(3)}
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch.compile(fn, backend=cnts)
self.assertEqual(fn(args1), opt_fn(args1))
self.assertEqual(cnts.frame_count, 1)
# Test a failing namedtuple guard
args2 = {2: None, 3: torch.randn(3)}
self.assertEqual(fn(args2), opt_fn(args2))
self.assertEqual(cnts.frame_count, 2)
def test_dict_order_keys_tensors(self):
def fn(d, x):
return d[x] + 3
args1 = {}
x = torch.randn(10)
y = torch.randn(10)
z = torch.randn(10)
args1[x] = y
args1[3] = z
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch.compile(fn, backend=cnts)
self.assertEqual(fn(args1, x), opt_fn(args1, x))
self.assertEqual(cnts.frame_count, 1)
# Calling again doesn't recompile (same id and key order)
opt_fn(args1, x)
self.assertEqual(cnts.frame_count, 1)
args2 = {}
args2[3] = z
args2[x] = y
# Different order recompiles
self.assertEqual(fn(args2, x), opt_fn(args2, x))
self.assertEqual(cnts.frame_count, 2)
def test_dict_order_keys_modules(self):
def fn(d, x):
return d[x](torch.ones(2, 2))
args1 = {}
x = torch.nn.Linear(2, 2)
y = torch.nn.Linear(2, 2)
z = torch.nn.Linear(2, 2)
args1[x] = y
args1[3] = z
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch.compile(fn, backend=cnts)
self.assertEqual(fn(args1, x), opt_fn(args1, x))
self.assertEqual(cnts.frame_count, 1)
# Calling again doesn't recompile (same id and key order)
opt_fn(args1, x)
self.assertEqual(cnts.frame_count, 1)
args2 = {}
args2[3] = z
args2[x] = y
# Different order recompiles
self.assertEqual(fn(args2, x), opt_fn(args2, x))
self.assertEqual(cnts.frame_count, 2)
def test_contains_dunder_dict(self):
class UserDefined:
def __init__(self) -> None:
self.a = 3
self.b = 5
def run(self, x):
if "a" in self.__dict__:
x = x * self.a
if "b" in self.__dict__:
x = x * self.b
self.c = 7
if "c" in self.__dict__:
x = x * self.c
return x * self.__dict__.get("a") * self.__dict__.get("z", 2)
obj = UserDefined()
def fn(x):
return obj.run(x)
x = torch.randn(4)
ref = fn(x)
opt_fn = torch.compile(fn, backend="eager", fullgraph=True)
res = opt_fn(x)
self.assertEqual(ref, res)
def test_contains_module_dunder_dict(self):
class MyModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.foo = 1
self.bar = 2
self.baz = 3
def forward(self, x):
if "foo" in self.__dict__:
return x * self.bar
return x * self.baz
mod = MyModule()
x = torch.randn(10)
opt_mod = torch.compile(mod, backend="eager", fullgraph=True)
self.assertEqual(mod(x), opt_mod(x))
def test_update_dunder_dict(self):
class UserDefined:
def run(self, x):
self.__dict__["a"] = 10
return x * self.a + self.__dict__["a"]
obj1 = UserDefined()
obj2 = UserDefined()
def fn(x, obj):
return obj.run(x)
x = torch.randn(4)
opt_fn = torch.compile(fn, backend="eager", fullgraph=True)
ref = fn(x, obj1)
res = opt_fn(x, obj2)
self.assertEqual(ref, res)
# Make sure only `a` is updated.
self.assertEqual(obj1.__dict__, obj2.__dict__)
def test_update_module_dunder_dict(self):
class MyModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, x):
self.__dict__["a"] = 10
return x * self.a + self.__dict__["a"]
mod = MyModule()
x = torch.randn(10)
opt_mod = torch.compile(mod, backend="eager", fullgraph=True)
self.assertEqual(mod(x), opt_mod(x))
def test_dict_reconstruct_keeps_original_order(self):
def fn():
modules = OrderedDict([("act", torch.nn.ReLU())])
module_dict = torch.nn.ModuleDict(modules)
next_modules = {"fc4": torch.nn.Linear(5, 6), "act3": torch.nn.Sigmoid()}
modules.update(next_modules.items())
module_dict.update(next_modules)
return modules, module_dict
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch.compile(fn, backend=cnts)
modules, module_dict = opt_fn()
self.assertEqual(len(module_dict), len(modules))
for k1, m2 in zip(modules, module_dict.children()):
self.assertTrue(modules[k1] is m2)
# FIXME: see comment in torch/_dynamo/polyfills/__init__.py:mutable_mapping_update
@unittest.expectedFailure
def test_dict_construct_from_mapping_like(self):
def fn(x):
fm = FakeMapping(x)
d = dict(fm, x=x)
return d
x = torch.randn(4)
opt_fn = torch.compile(fn, backend="eager", fullgraph=True)
self.assertEqual(fn(x), opt_fn(x))
def test_dict_subclass_initialization_in_graph(self):
for super_class in (
OrderedDict,
dict,
):
class CustomDict(super_class):
def __new__(cls, *args, **kwargs):
return super().__new__(cls, *args, **kwargs)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def fn(x):
c = CustomDict()
c["key"] = x
assert "key" in c
return c["key"] + 1
opt_fn = torch.compile(fn, backend="eager", fullgraph=True)
x = torch.rand(4)
self.assertEqual(fn(x), opt_fn(x))
def test_dict_list_values(self):
def inner_fn(args):
return [x[1].shape for x in args]
@torch.compile(backend="eager")
def fn(tensors):
return inner_fn(zip(itertools.count(), tensors["args"]))
fn({"args": [torch.ones(5, 5), torch.ones(5, 6), torch.ones(5, 7)]})
fn({"args": [torch.ones(5, 5)]})
def test_dict_iter(self):
class MyMod(torch.nn.Module):
def forward(self, x):
z = {"my": 1, "const": 2, "dict": 3, "variable": 4}
tot = 0
for key in z:
tot += z[key]
return tot
x = torch.tensor([0])
model = MyMod()
opt_model = torch.compile(model, backend="eager", fullgraph=True)
y = opt_model(x)
self.assertEqual(y, 10)
def test_dict_subclass_contains(self):
# pattern from huggingface
class ClassInstantier(OrderedDict):
pass
@torch.compile(fullgraph=True, backend="eager")
def f(x, d):
if "key1" in d:
x = x + 2
if "key2" in d:
x = x + 4
x = x + 8
return x
result = f(torch.ones(8), ClassInstantier({"key1": torch.ones(8)}))
self.assertTrue(same(result, torch.full([8], 11.0)))
result = f(torch.ones(8), ClassInstantier({"key2": torch.ones(8)}))
self.assertTrue(same(result, torch.full([8], 13.0)))
def test_dict_tag_guard(self):
class Foo:
def __init__(self) -> None:
self.scalar = 10
def fn(d, x):
return d["a"] * d["b"] * d["c"].scalar * x
foo = Foo()
d = {"a": 2, "b": 3, "c": foo}
opt_fn = torch.compile(fn, backend="eager")
inp = torch.randn(3, 3)
self.assertEqual(fn(d, inp), opt_fn(d, inp))
d["a"] = 4
self.assertEqual(fn(d, inp), opt_fn(d, inp))
# Check that recompilation happens
foo.scalar = 12
self.assertEqual(fn(d, inp), opt_fn(d, inp))
def test_empty_dict_recompilation(self):
def fn(d, x):
if d:
return torch.cos(x)
return torch.sin(x)
opt_fn = torch.compile(fn, backend="eager", fullgraph=True)
x = torch.randn(4)
self.assertEqual(fn({}, x), opt_fn({}, x))
self.assertEqual(fn({"a": 1}, x), opt_fn({"a": 1}, x))
def test_udf_dict_reconstruction(self):
class MyDict(dict):
pass
def fn(x, klass):
x = x * 2
sc_dict = dict.__new__(klass)
sc_dict["x"] = x
if isinstance(sc_dict, MyDict):
sc_dict.attr = 3
return sc_dict
opt_fn = torch.compile(fn, backend="eager", fullgraph=True)
x = torch.randn(4)
ref = fn(x, MyDict)
res = opt_fn(x, MyDict)
self.assertEqual(ref, res)
self.assertTrue(isinstance(res, MyDict))
self.assertEqual(ref.attr, res.attr)
ref = fn(x, dict)
res = opt_fn(x, dict)
self.assertEqual(ref, res)
self.assertTrue(isinstance(res, dict))
def test_weakref_dict(self):
states = weakref.WeakKeyDictionary()
mod1 = torch.nn.Module()
mod2 = torch.nn.Module()
states[mod1] = 2
states[mod2] = 3
def fn(x):
if mod1 in states:
x = torch.sin(x)
if mod2 in states:
x = torch.cos(x)
return x
opt_fn = torch.compile(fn, backend="eager", fullgraph=True)
x = torch.randn(4)
self.assertEqual(fn(x), opt_fn(x))
def test_construct_user_dict_and_return(self):
def fn(x):
return DummyUserDict({"a": x + 1})
x = torch.randn(4)
res = fn(x)
self.assertEqual(res["a"], x + 1)
opt_fn = torch.compile(fn, backend="eager", fullgraph=True)
self.assertEqual(res["a"], opt_fn(x)["a"])
def test_fn_id(self):
def fn(x, f):
d = {id(f): 3}
return x * d[id(f)]
opt_fn = torch.compile(fn, backend="eager", fullgraph=True)
x = torch.randn(4)
def nothing():
pass
f = nothing
self.assertEqual(fn(x, f), opt_fn(x, f))
def test_mapping_proxy_for_local(self):
def fn(x):
d = {"a": 2, "b": 3, "c": 5 * x}
mp = types.MappingProxyType(d)
y = torch.sin(x * mp["a"])
for v in mp.values():
y += torch.cos(x * v)
return mp
opt_fn = torch.compile(fn, backend="eager", fullgraph=True)
x = torch.randn(4)
ref = fn(x)
res = opt_fn(x)
self.assertEqual(ref, res)
self.assertTrue(type(res) is types.MappingProxyType)
def test_mapping_proxy_for_nonlocal(self):
d = {"a": 2, "b": 3, "c": 5}
def fn(x):
mp = types.MappingProxyType(d)
y = torch.sin(x * mp["a"])
for v in mp.values():
y += torch.cos(x * v)
d["d"] = 4
return mp
opt_fn = torch.compile(fn, backend="eager", fullgraph=True)
x = torch.randn(4)
ref = fn(x)
res = opt_fn(x)
self.assertEqual(ref, res)
self.assertTrue(type(res) is types.MappingProxyType)
# check update to d is reflected in res
d["e"] = 5
self.assertEqual(d["e"], res["e"])
def test_mapping_proxy_existing(self):
d = {"a": 2, "b": 3, "c": 5}
def fn(x, mp):
y = torch.sin(x * mp["a"])
for v in mp.values():
y += torch.cos(x * v)
if isinstance(mp, types.MappingProxyType):
y *= 2
return y
opt_fn = torch.compile(fn, backend="eager", fullgraph=True)
x = torch.randn(4)
mp = types.MappingProxyType(d)
ref = fn(x, mp)
res = opt_fn(x, mp)
self.assertEqual(ref, res)
d["a"] = 3
ref = fn(x, mp)
res = opt_fn(x, mp)
self.assertEqual(ref, res)
d.pop("b")
ref = fn(x, mp)
res = opt_fn(x, mp)
self.assertEqual(ref, res)
def test_dict_construction_from_mapping_proxy(self):
d = {"a": 2, "b": 3, "c": 5}
def fn(x, mp):
d = dict(mp)
y = torch.sin(x * d["a"])
return y
opt_fn = torch.compile(fn, backend="eager", fullgraph=True)
x = torch.randn(4)
mp = types.MappingProxyType(d)
ref = fn(x, mp)
res = opt_fn(x, mp)
self.assertEqual(ref, res)
def test_mapping_proxy_existing_mutation(self):
d = {"a": 2, "b": 3, "c": 5}
mp = types.MappingProxyType(d)
def fn(x):
d["d"] = 4
y = torch.sin(x * mp["d"])
return y
opt_fn = torch.compile(fn, backend="eager")
x = torch.randn(4)
ref = torch.sin(x * 4)
res = opt_fn(x)
self.assertEqual(ref, res)
self.assertEqual(d.keys(), mp.keys())
def test_mapping_proxy_existing_local_mutation(self):
d = {"a": 2, "b": 3, "c": 5}
mp = types.MappingProxyType(d)
def fn(x):
# Dynamo should not cause a graph break here because it knows that
# the existing proxy can't point to this new dict
other_dict = {}
other_dict["d"] = 4
y = torch.sin(x * mp["c"])
return y
opt_fn = torch.compile(fn, backend="eager", fullgraph=True)
x = torch.randn(4)
ref = torch.sin(x * mp["c"])
res = opt_fn(x)
self.assertEqual(ref, res)
self.assertEqual(d.keys(), mp.keys())
def test_move_to_end(self):
def fn(x):
d = OrderedDict({"a": torch.cos(x), "b": 3, "c": 5})
d.move_to_end("a")
return d
opt_fn = torch.compile(fn, backend="eager", fullgraph=True)
x = torch.randn(4)
self.assertEqual(["b", "c", "a"], list(opt_fn(x).keys()))
self.assertEqual(fn(x), opt_fn(x))
def test_mapping_proxy_ban_muation_on_dict_realization(self):
def fn(x):
class Foo:
b = 4
d = dict(Foo.__dict__)
y = torch.sin(x) * d["b"]
# This should cause a graph break, because otherwise the
# Foo.__dict__ will not be updated.
Foo.bar = 3
return Foo, y * Foo.__dict__["bar"]
opt_fn = torch.compile(fn, backend="eager")
x = torch.randn(4)
foo1, ref = fn(x)
foo2, res = opt_fn(x)
self.assertEqual(ref, res)
self.assertEqual(foo1.bar, foo2.bar)
def test_overridden_get_item(self):
class MyDict(dict):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.calls = 0
def __getitem__(self, key):
self.calls += 1
return super().__getitem__(key) + 1
def fn(x, d):
d["d"] = 4
return x * d["a"] + d["b"] + d["c"] + d["d"]
opt_fn = torch.compile(fn, backend="eager", fullgraph=True)
x = torch.randn(4)
d1 = MyDict({"a": 2, "b": 3, "c": 5})
ref = fn(x, d1)
d2 = MyDict({"a": 2, "b": 3, "c": 5})
res = opt_fn(x, d2)
self.assertEqual(ref, res)
self.assertEqual(d1.calls, d2.calls)
def test_items_type(self):
def fn():
d = dict({"a": 1, "b": "2", "c": torch.tensor(3)}) # noqa: C418
return d.items()
opt_fn = torch.compile(fn, backend="eager", fullgraph=True)
ref = fn()
res = opt_fn()
self.assertEqual(ref, res)
self.assertEqual(type(res), dict_items)
def test_builtin_or_with_invalid_types(self):
args = (
1, # int
1.0, # float
"a", # str
(1, 2), # tuple
[1, 2], # list
)
@torch.compile(backend="eager", fullgraph=True)
def fn(b: Any):
a = {"one": torch.ones(1)}
return a | b
from torch._dynamo.exc import Unsupported
for arg in args:
with self.assertRaisesRegex(Unsupported, "Observed exception"):
_ = fn(arg)
def test_builtin_or_with_diff_keys(self):
def f():
a = {"one": torch.ones(1)}
b = {"two": torch.ones(2)}
return a, b, a | b, b | a, a.__or__(b), b.__or__(a)
opt_f = torch.compile(f, backend="eager", fullgraph=True)
self.assertEqual(f(), opt_f())
def test_builtin_or_with_same_keys(self):
def f():
a = {"one": torch.ones(1), "two": torch.ones(2)}
b = {"one": torch.ones(1), "three": torch.ones(3)}
return a, b, a | b, b | a, a.__or__(b), b.__or__(a)
opt_f = torch.compile(f, backend="eager", fullgraph=True)
self.assertEqual(f(), opt_f())
def test_builtin_ior_(self):
def f():
a = {"one": torch.ones(1)}
b = {"two": torch.ones(2)}
a |= b
return a, b
opt_f = torch.compile(f, backend="eager", fullgraph=True)
self.assertEqual(f(), opt_f())
def test_newly_constructed_default_dict(self):
def f(x):
d = defaultdict(list)
d[0] = [
42,
]
return x + 1, d
x = torch.ones(2)
ref = f(x)
res = torch.compile(f, backend="eager", fullgraph=True)(x)
self.assertEqual(ref, res)
def test_newly_constructed_default_dict_no_default_factory(self):
def f1(x):
d = defaultdict()
try:
d[1] += 42
except KeyError:
d[1] = 1
return x + 1, d
x = torch.ones(2)
ref = f1(x)
res = torch.compile(f1, backend="eager", fullgraph=True)(x)
self.assertEqual(ref, res)
def f2(x):
d = defaultdict(None)
try:
d[1] += 42
except KeyError:
d[1] = 1
return x + 1, d
ref = f2(x)
res = torch.compile(f2, backend="eager", fullgraph=True)(x)
self.assertEqual(ref, res)
def f3(x):
d = defaultdict(None, {1: 10})
d[1] += 42
try:
d[2] += 24
except KeyError:
d[2] = 1
return x + 1, d
ref = f3(x)
res = torch.compile(f3, backend="eager", fullgraph=True)(x)
self.assertEqual(ref, res)
def test_newly_constructed_default_dict_with_dict(self):
def f(x):
d = dict([("a", 1), ("b", 2)], c=3) # noqa: C406
dd = defaultdict(list, d, d=4, e=5)
dd["x"].append(42)
return x + 1, d, dd
x = torch.ones(2)
ref = f(x)
res = torch.compile(f, backend="eager", fullgraph=True)(x)
self.assertEqual(ref, res)
def test_iter_default_dict(self):
def f(x):
d = defaultdict(list)
d[0] = 42
for k in d:
d[k] += 1
return x + 1, d
x = torch.ones(2)
ref = f(x)
res = torch.compile(f, backend="eager", fullgraph=True)(x)
self.assertEqual(ref, res)
@parametrize("op", ["or_", "and_", "xor", "sub"])
def test_dict_keys_binop(self, op):
op = getattr(operator, op)
def f():
a = {"one": torch.ones(1), "two": torch.ones(2)}
b = {"one": torch.ones(1), "three": torch.ones(3)}
return op(a.keys(), b.keys()), op(b.keys(), a.keys())
opt_f = torch.compile(f, backend="eager", fullgraph=True)
self.assertEqual(f(), opt_f())
@parametrize("op", ["ior", "iand", "ixor", "isub"])
def test_dict_keys_inplace_binop(self, op):
op = getattr(operator, op)
def f():
a = {"one": torch.ones(1), "two": torch.ones(2)}.keys()
b = {"one": torch.ones(1), "three": torch.ones(3)}.keys()
c = {"one": torch.ones(1), "two": torch.ones(2)}.keys()
a = op(a, b)
b = op(b, c)
return a, b
opt_f = torch.compile(f, backend="eager", fullgraph=True)
self.assertEqual(f(), opt_f())
instantiate_parametrized_tests(DictTests)
| DictTests |
python | facebook__pyre-check | client/commands/servers.py | {
"start": 883,
"end": 1005
} | class ____(TypedDict):
pid: int
version: str
configuration: _ServerConfigurationJSONSchema
| _ServerInfoJSONSchema |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/logging_ops_logging_level_test.py | {
"start": 985,
"end": 2510
} | class ____(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
def testPrintOneTensorLogInfo(self):
with self.cached_session():
tensor = math_ops.range(10)
with self.captureWritesToStream(sys.stderr) as printed:
print_op = logging_ops.print_v2(
tensor, output_stream=tf_logging.info)
self.evaluate(print_op)
self.assertTrue("I" in printed.contents())
expected = "[0 1 2 ... 7 8 9]"
self.assertTrue(expected in printed.contents())
@test_util.run_in_graph_and_eager_modes()
def testPrintOneTensorLogWarning(self):
with self.cached_session():
tensor = math_ops.range(10)
with self.captureWritesToStream(sys.stderr) as printed:
print_op = logging_ops.print_v2(
tensor, output_stream=tf_logging.warning)
self.evaluate(print_op)
self.assertTrue("W" in printed.contents())
expected = "[0 1 2 ... 7 8 9]"
self.assertTrue(expected in printed.contents())
@test_util.run_in_graph_and_eager_modes()
def testPrintOneTensorLogError(self):
with self.cached_session():
tensor = math_ops.range(10)
with self.captureWritesToStream(sys.stderr) as printed:
print_op = logging_ops.print_v2(
tensor, output_stream=tf_logging.error)
self.evaluate(print_op)
self.assertTrue("E" in printed.contents())
expected = "[0 1 2 ... 7 8 9]"
self.assertTrue(expected in printed.contents())
if __name__ == "__main__":
test.main()
| PrintV2LoggingLevelTest |
python | tensorflow__tensorflow | tensorflow/python/ops/io_ops.py | {
"start": 7254,
"end": 14749
} | class ____:
"""Base class for different Reader types, that produce a record every step.
Conceptually, Readers convert string 'work units' into records (key,
value pairs). Typically the 'work units' are filenames and the
records are extracted from the contents of those files. We want a
single record produced per step, but a work unit can correspond to
many records.
Therefore we introduce some decoupling using a queue. The queue
contains the work units and the Reader dequeues from the queue when
it is asked to produce a record (via Read()) but it has finished the
last work unit.
@compatibility(eager)
Readers are not compatible with eager execution. Instead, please
use `tf.data` to get data into your model.
@end_compatibility
"""
def __init__(self, reader_ref, supports_serialize=False):
"""Creates a new ReaderBase.
Args:
reader_ref: The operation that implements the reader.
supports_serialize: True if the reader implementation can
serialize its state.
Raises:
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError(
"Readers are not supported when eager execution is enabled. "
"Instead, please use tf.data to get data into your model.")
self._reader_ref = reader_ref
self._supports_serialize = supports_serialize
@property
def reader_ref(self):
"""Op that implements the reader."""
return self._reader_ref
def read(self, queue, name=None):
"""Returns the next record (key, value) pair produced by a reader.
Will dequeue a work unit from queue if necessary (e.g. when the
Reader needs to start reading from a new file since it has
finished with the previous file).
Args:
queue: A Queue or a mutable string Tensor representing a handle
to a Queue, with string work items.
name: A name for the operation (optional).
Returns:
A tuple of Tensors (key, value).
key: A string scalar Tensor.
value: A string scalar Tensor.
"""
if isinstance(queue, tensor_lib.Tensor):
queue_ref = queue
else:
queue_ref = queue.queue_ref
if self._reader_ref.dtype == dtypes.resource:
return gen_io_ops.reader_read_v2(self._reader_ref, queue_ref, name=name)
else:
# For compatibility with pre-resource queues, create a ref(string) tensor
# which can be looked up as the same queue by a resource manager.
old_queue_op = gen_data_flow_ops.fake_queue(queue_ref)
return gen_io_ops.reader_read(self._reader_ref, old_queue_op, name=name)
def read_up_to(self, queue, num_records, # pylint: disable=invalid-name
name=None):
"""Returns up to num_records (key, value) pairs produced by a reader.
Will dequeue a work unit from queue if necessary (e.g., when the
Reader needs to start reading from a new file since it has
finished with the previous file).
It may return less than num_records even before the last batch.
Args:
queue: A Queue or a mutable string Tensor representing a handle
to a Queue, with string work items.
num_records: Number of records to read.
name: A name for the operation (optional).
Returns:
A tuple of Tensors (keys, values).
keys: A 1-D string Tensor.
values: A 1-D string Tensor.
"""
if isinstance(queue, tensor_lib.Tensor):
queue_ref = queue
else:
queue_ref = queue.queue_ref
if self._reader_ref.dtype == dtypes.resource:
return gen_io_ops.reader_read_up_to_v2(self._reader_ref,
queue_ref,
num_records,
name=name)
else:
# For compatibility with pre-resource queues, create a ref(string) tensor
# which can be looked up as the same queue by a resource manager.
old_queue_op = gen_data_flow_ops.fake_queue(queue_ref)
return gen_io_ops.reader_read_up_to(self._reader_ref,
old_queue_op,
num_records,
name=name)
def num_records_produced(self, name=None):
"""Returns the number of records this reader has produced.
This is the same as the number of Read executions that have
succeeded.
Args:
name: A name for the operation (optional).
Returns:
An int64 Tensor.
"""
if self._reader_ref.dtype == dtypes.resource:
return gen_io_ops.reader_num_records_produced_v2(self._reader_ref,
name=name)
else:
return gen_io_ops.reader_num_records_produced(self._reader_ref,
name=name)
def num_work_units_completed(self, name=None):
"""Returns the number of work units this reader has finished processing.
Args:
name: A name for the operation (optional).
Returns:
An int64 Tensor.
"""
if self._reader_ref.dtype == dtypes.resource:
return gen_io_ops.reader_num_work_units_completed_v2(self._reader_ref,
name=name)
else:
return gen_io_ops.reader_num_work_units_completed(self._reader_ref,
name=name)
def serialize_state(self, name=None):
"""Produce a string tensor that encodes the state of a reader.
Not all Readers support being serialized, so this can produce an
Unimplemented error.
Args:
name: A name for the operation (optional).
Returns:
A string Tensor.
"""
if self._reader_ref.dtype == dtypes.resource:
return gen_io_ops.reader_serialize_state_v2(self._reader_ref, name=name)
else:
return gen_io_ops.reader_serialize_state(self._reader_ref, name=name)
def restore_state(self, state, name=None):
"""Restore a reader to a previously saved state.
Not all Readers support being restored, so this can produce an
Unimplemented error.
Args:
state: A string Tensor.
Result of a SerializeState of a Reader with matching type.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
if self._reader_ref.dtype == dtypes.resource:
return gen_io_ops.reader_restore_state_v2(
self._reader_ref, state, name=name)
else:
return gen_io_ops.reader_restore_state(self._reader_ref, state, name=name)
@property
def supports_serialize(self):
"""Whether the Reader implementation can serialize its state."""
return self._supports_serialize
def reset(self, name=None):
"""Restore a reader to its initial clean state.
Args:
name: A name for the operation (optional).
Returns:
The created Operation.
"""
if self._reader_ref.dtype == dtypes.resource:
return gen_io_ops.reader_reset_v2(self._reader_ref, name=name)
else:
return gen_io_ops.reader_reset(self._reader_ref, name=name)
ops.NotDifferentiable("ReaderRead")
ops.NotDifferentiable("ReaderReadUpTo")
ops.NotDifferentiable("ReaderNumRecordsProduced")
ops.NotDifferentiable("ReaderNumWorkUnitsCompleted")
ops.NotDifferentiable("ReaderSerializeState")
ops.NotDifferentiable("ReaderRestoreState")
ops.NotDifferentiable("ReaderReset")
@tf_export(v1=["WholeFileReader"])
| ReaderBase |
python | ansible__ansible | lib/ansible/modules/file.py | {
"start": 8903,
"end": 39902
} | class ____(AnsibleModuleError):
pass
def additional_parameter_handling(params):
"""Additional parameter validation and reformatting"""
# When path is a directory, rewrite the pathname to be the file inside of the directory
# TODO: Why do we exclude link? Why don't we exclude directory? Should we exclude touch?
# I think this is where we want to be in the future:
# when isdir(path):
# if state == absent: Remove the directory
# if state == touch: Touch the directory
# if state == directory: Assert the directory is the same as the one specified
# if state == file: place inside of the directory (use _original_basename)
# if state == link: place inside of the directory (use _original_basename. Fallback to src?)
# if state == hard: place inside of the directory (use _original_basename. Fallback to src?)
if (params['state'] not in ("link", "absent") and os.path.isdir(to_bytes(params['path'], errors='surrogate_or_strict'))):
basename = None
if params['_original_basename']:
basename = params['_original_basename']
elif params['src']:
basename = os.path.basename(params['src'])
if basename:
params['path'] = os.path.join(params['path'], basename)
# state should default to file, but since that creates many conflicts,
# default state to 'current' when it exists.
prev_state = get_state(to_bytes(params['path'], errors='surrogate_or_strict'))
if params['state'] is None:
if prev_state != 'absent':
params['state'] = prev_state
elif params['recurse']:
params['state'] = 'directory'
else:
params['state'] = 'file'
# make sure the target path is a directory when we're doing a recursive operation
if params['recurse'] and params['state'] != 'directory':
module.fail_json(
msg="recurse option requires state to be 'directory'",
path=params["path"]
)
# Fail if 'src' but no 'state' is specified
if params['src'] and params['state'] not in ('link', 'hard'):
module.fail_json(
msg="src option requires state to be 'link' or 'hard'",
path=params['path']
)
def get_state(path):
""" Find out current state """
b_path = to_bytes(path, errors='surrogate_or_strict')
try:
if os.path.lexists(b_path):
if os.path.islink(b_path):
return 'link'
elif os.path.isdir(b_path):
return 'directory'
elif os.stat(b_path).st_nlink > 1:
return 'hard'
# could be many other things, but defaulting to file
return 'file'
return 'absent'
except FileNotFoundError:
return 'absent'
# This should be moved into the common file utilities
def recursive_set_attributes(b_path, follow, file_args, mtime, atime):
changed = False
try:
for b_root, b_dirs, b_files in os.walk(b_path):
for b_fsobj in b_dirs + b_files:
b_fsname = os.path.join(b_root, b_fsobj)
if not os.path.islink(b_fsname):
tmp_file_args = file_args.copy()
tmp_file_args['path'] = to_native(b_fsname, errors='surrogate_or_strict')
changed |= module.set_fs_attributes_if_different(tmp_file_args, changed, expand=False)
changed |= update_timestamp_for_file(tmp_file_args['path'], mtime, atime)
else:
# Change perms on the link
tmp_file_args = file_args.copy()
tmp_file_args['path'] = to_native(b_fsname, errors='surrogate_or_strict')
changed |= module.set_fs_attributes_if_different(tmp_file_args, changed, expand=False)
changed |= update_timestamp_for_file(tmp_file_args['path'], mtime, atime)
if follow:
b_fsname = os.path.join(b_root, os.readlink(b_fsname))
# The link target could be nonexistent
if os.path.exists(b_fsname):
if os.path.isdir(b_fsname):
# Link is a directory so change perms on the directory's contents
changed |= recursive_set_attributes(b_fsname, follow, file_args, mtime, atime)
# Change perms on the file pointed to by the link
tmp_file_args = file_args.copy()
tmp_file_args['path'] = to_native(b_fsname, errors='surrogate_or_strict')
changed |= module.set_fs_attributes_if_different(tmp_file_args, changed, expand=False)
changed |= update_timestamp_for_file(tmp_file_args['path'], mtime, atime)
except RuntimeError as e:
# on Python3 "RecursionError" is raised which is derived from "RuntimeError"
# TODO once this function is moved into the common file utilities, this should probably raise more general exception
module.fail_json(
msg=f"Could not recursively set attributes on {to_native(b_path)}. Original error was: '{to_native(e)}'"
)
return changed
def initial_diff(path, state, prev_state):
diff = {'before': {'path': path},
'after': {'path': path},
}
if prev_state != state:
diff['before']['state'] = prev_state
diff['after']['state'] = state
if state == 'absent' and prev_state == 'directory':
walklist = {
'directories': [],
'files': [],
}
b_path = to_bytes(path, errors='surrogate_or_strict')
for base_path, sub_folders, files in os.walk(b_path):
for folder in sub_folders:
folderpath = os.path.join(base_path, folder)
walklist['directories'].append(folderpath)
for filename in files:
filepath = os.path.join(base_path, filename)
walklist['files'].append(filepath)
diff['before']['path_content'] = walklist
return diff
#
# States
#
def get_timestamp_for_time(formatted_time, time_format):
if formatted_time == 'preserve':
return None
if formatted_time == 'now':
return Sentinel
try:
struct = time.strptime(formatted_time, time_format)
struct_time = time.mktime(struct)
except (ValueError, OverflowError) as e:
module.fail_json(
msg=f"Error while obtaining timestamp for time {formatted_time} using format {time_format}: {to_native(e, nonstring='simplerepr')}",
)
return struct_time
def update_timestamp_for_file(path, mtime, atime, diff=None):
b_path = to_bytes(path, errors='surrogate_or_strict')
try:
# When mtime and atime are set to 'now', rely on utime(path, None) which does not require ownership of the file
# https://github.com/ansible/ansible/issues/50943
if mtime is Sentinel and atime is Sentinel:
# It's not exact but we can't rely on os.stat(path).st_mtime after setting os.utime(path, None) as it may
# not be updated. Just use the current time for the diff values
mtime = atime = time.time()
previous_mtime = os.stat(b_path).st_mtime
previous_atime = os.stat(b_path).st_atime
set_time = None
else:
# If both parameters are None 'preserve', nothing to do
if mtime is None and atime is None:
return False
previous_mtime = os.stat(b_path).st_mtime
previous_atime = os.stat(b_path).st_atime
if mtime is None:
mtime = previous_mtime
elif mtime is Sentinel:
mtime = time.time()
if atime is None:
atime = previous_atime
elif atime is Sentinel:
atime = time.time()
# If both timestamps are already ok, nothing to do
if mtime == previous_mtime and atime == previous_atime:
return False
set_time = (atime, mtime)
if not module.check_mode:
os.utime(b_path, set_time)
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
if 'after' not in diff:
diff['after'] = {}
if mtime != previous_mtime:
diff['before']['mtime'] = previous_mtime
diff['after']['mtime'] = mtime
if atime != previous_atime:
diff['before']['atime'] = previous_atime
diff['after']['atime'] = atime
except OSError as e:
module.fail_json(
msg=f"Error while updating modification or access time: {to_native(e, nonstring='simplerepr')}",
path=path
)
return True
def keep_backward_compatibility_on_timestamps(parameter, state):
if state in ['file', 'hard', 'directory', 'link'] and parameter is None:
return 'preserve'
if state == 'touch' and parameter is None:
return 'now'
return parameter
def execute_diff_peek(path):
"""Take a guess as to whether a file is a binary file"""
b_path = to_bytes(path, errors='surrogate_or_strict')
appears_binary = False
try:
with open(b_path, 'rb') as f:
head = f.read(8192)
except Exception:
# If we can't read the file, we're okay assuming it's text
pass
else:
if b"\x00" in head:
appears_binary = True
return appears_binary
def ensure_absent(path):
b_path = to_bytes(path, errors='surrogate_or_strict')
prev_state = get_state(b_path)
result = {}
if prev_state != 'absent':
diff = initial_diff(path, 'absent', prev_state)
if not module.check_mode:
if prev_state == 'directory':
try:
shutil.rmtree(b_path, ignore_errors=False)
except Exception as e:
module.fail_json(
msg=f"rmtree failed: {to_native(e)}"
)
else:
try:
os.unlink(b_path)
except FileNotFoundError:
pass
except OSError as ex:
module.fail_json(
msg="Unlinking failed.",
path=path,
exception=ex,
)
result.update({'path': path, 'changed': True, 'diff': diff, 'state': 'absent'})
else:
result.update({'path': path, 'changed': False, 'state': 'absent'})
return result
def execute_touch(path, follow, timestamps):
b_path = to_bytes(path, errors='surrogate_or_strict')
prev_state = get_state(b_path)
changed = False
result = {'dest': path}
mtime = get_timestamp_for_time(timestamps['modification_time'], timestamps['modification_time_format'])
atime = get_timestamp_for_time(timestamps['access_time'], timestamps['access_time_format'])
# If the file did not already exist
if prev_state == 'absent':
# if we are in check mode and the file is absent
# we can set the changed status to True and return
if module.check_mode:
result['changed'] = True
return result
# Create an empty file
try:
open(b_path, 'wb').close()
changed = True
except OSError as ex:
module.fail_json(
msg="Error, could not touch target.",
path=path,
exception=ex,
)
# Update the attributes on the file
diff = initial_diff(path, 'touch', prev_state)
file_args = module.load_file_common_arguments(module.params)
try:
changed = module.set_fs_attributes_if_different(file_args, changed, diff, expand=False)
changed |= update_timestamp_for_file(file_args['path'], mtime, atime, diff)
except SystemExit as e:
if e.code: # this is the exit code passed to sys.exit, not a constant -- pylint: disable=using-constant-test
# We take this to mean that fail_json() was called from
# somewhere in basic.py
if prev_state == 'absent':
# If we just created the file we can safely remove it
os.remove(b_path)
raise
result['changed'] = changed
result['diff'] = diff
return result
def ensure_file_attributes(path, follow, timestamps):
b_path = to_bytes(path, errors='surrogate_or_strict')
prev_state = get_state(b_path)
file_args = module.load_file_common_arguments(module.params)
mtime = get_timestamp_for_time(timestamps['modification_time'], timestamps['modification_time_format'])
atime = get_timestamp_for_time(timestamps['access_time'], timestamps['access_time_format'])
if prev_state != 'file':
if follow and prev_state == 'link':
# follow symlink and operate on original
b_path = os.path.realpath(b_path)
path = to_native(b_path, errors='strict')
prev_state = get_state(b_path)
file_args['path'] = path
if prev_state not in ('file', 'hard'):
# file is not absent and any other state is a conflict
module.fail_json(
msg=f"file ({path}) is {prev_state}, cannot continue",
path=path,
state=prev_state
)
diff = initial_diff(path, 'file', prev_state)
changed = module.set_fs_attributes_if_different(file_args, False, diff, expand=False)
changed |= update_timestamp_for_file(file_args['path'], mtime, atime, diff)
return {'path': path, 'changed': changed, 'diff': diff}
def ensure_directory(path, follow, recurse, timestamps):
b_path = to_bytes(path, errors='surrogate_or_strict')
prev_state = get_state(b_path)
file_args = module.load_file_common_arguments(module.params)
mtime = get_timestamp_for_time(timestamps['modification_time'], timestamps['modification_time_format'])
atime = get_timestamp_for_time(timestamps['access_time'], timestamps['access_time_format'])
# For followed symlinks, we need to operate on the target of the link
if follow and prev_state == 'link':
b_path = os.path.realpath(b_path)
path = to_native(b_path, errors='strict')
file_args['path'] = path
prev_state = get_state(b_path)
changed = False
diff = initial_diff(path, 'directory', prev_state)
if prev_state == 'absent':
# Create directory and assign permissions to it
if module.check_mode:
return {'path': path, 'changed': True, 'diff': diff}
curpath = ''
try:
# Split the path so we can apply filesystem attributes recursively
# from the root (/) directory for absolute paths or the base path
# of a relative path. We can then walk the appropriate directory
# path to apply attributes.
# Something like mkdir -p with mode applied to all of the newly created directories
for dirname in path.strip('/').split('/'):
curpath = '/'.join([curpath, dirname])
# Remove leading slash if we're creating a relative path
if not os.path.isabs(path):
curpath = curpath.lstrip('/')
b_curpath = to_bytes(curpath, errors='surrogate_or_strict')
if not os.path.exists(b_curpath):
try:
os.mkdir(b_curpath)
changed = True
except OSError as ex:
# Possibly something else created the dir since the os.path.exists
# check above. As long as it's a dir, we don't need to error out.
if not (ex.errno == errno.EEXIST and os.path.isdir(b_curpath)):
raise
tmp_file_args = file_args.copy()
tmp_file_args['path'] = curpath
changed = module.set_fs_attributes_if_different(tmp_file_args, changed, diff, expand=False)
changed |= update_timestamp_for_file(file_args['path'], mtime, atime, diff)
except Exception as e:
module.fail_json(
msg=f"There was an issue creating {curpath} as requested: {to_native(e)}",
path=path
)
return {'path': path, 'changed': changed, 'diff': diff}
elif prev_state != 'directory':
# We already know prev_state is not 'absent', therefore it exists in some form.
module.fail_json(
msg=f"{path} already exists as a {prev_state}",
path=path
)
#
# previous state == directory
#
changed = module.set_fs_attributes_if_different(file_args, changed, diff, expand=False)
changed |= update_timestamp_for_file(file_args['path'], mtime, atime, diff)
if recurse:
changed |= recursive_set_attributes(b_path, follow, file_args, mtime, atime)
return {'path': path, 'changed': changed, 'diff': diff}
def ensure_symlink(path, src, follow, force, timestamps):
b_path = to_bytes(path, errors='surrogate_or_strict')
b_src = to_bytes(src, errors='surrogate_or_strict')
prev_state = get_state(b_path)
mtime = get_timestamp_for_time(timestamps['modification_time'], timestamps['modification_time_format'])
atime = get_timestamp_for_time(timestamps['access_time'], timestamps['access_time_format'])
# source is both the source of a symlink or an informational passing of the src for a template module
# or copy module, even if this module never uses it, it is needed to key off some things
if src is None:
if follow and os.path.exists(b_path):
# use the current target of the link as the source
src = to_native(os.readlink(b_path), errors='strict')
b_src = to_bytes(src, errors='surrogate_or_strict')
if not os.path.islink(b_path) and os.path.isdir(b_path):
relpath = path
else:
b_relpath = os.path.dirname(b_path)
relpath = to_native(b_relpath, errors='strict')
# If src is None that means we are expecting to update an existing link.
if src is None:
absrc = None
else:
absrc = os.path.join(relpath, src)
b_absrc = to_bytes(absrc, errors='surrogate_or_strict')
if not force and src is not None and not os.path.exists(b_absrc):
module.fail_json(
msg="src file does not exist, use 'force=yes' if you"
f" really want to create the link: {absrc}",
path=path,
src=src
)
if prev_state == 'directory':
if not force:
module.fail_json(
msg=f'refusing to convert from {prev_state} to symlink for {path}',
path=path
)
elif os.listdir(b_path):
# refuse to replace a directory that has files in it
module.fail_json(
msg=f'the directory {path} is not empty, refusing to convert it',
path=path
)
elif prev_state in ('file', 'hard') and not force:
module.fail_json(
msg=f'refusing to convert from {prev_state} to symlink for {path}',
path=path
)
diff = initial_diff(path, 'link', prev_state)
changed = False
if prev_state in ('hard', 'file', 'directory', 'absent'):
if src is None:
module.fail_json(
msg='src is required for creating new symlinks',
)
changed = True
elif prev_state == 'link':
if src is not None:
b_old_src = os.readlink(b_path)
if b_old_src != b_src:
diff['before']['src'] = to_native(b_old_src, errors='strict')
diff['after']['src'] = src
changed = True
else:
module.fail_json(
msg='unexpected position reached',
dest=path,
src=src
)
if changed and not module.check_mode:
if prev_state != 'absent':
# try to replace atomically
b_tmppath = to_bytes(os.path.sep).join(
[os.path.dirname(b_path), to_bytes(".%s.%s.tmp" % (os.getpid(), time.time()))]
)
try:
if prev_state == 'directory':
os.rmdir(b_path)
os.symlink(b_src, b_tmppath)
os.rename(b_tmppath, b_path)
except OSError as e:
if os.path.exists(b_tmppath):
os.unlink(b_tmppath)
module.fail_json(
msg=f"Error while replacing: {to_native(e, nonstring='simplerepr')}",
path=path
)
else:
try:
os.symlink(b_src, b_path)
except OSError as e:
module.fail_json(
msg=f"Error while linking: {to_native(e, nonstring='simplerepr')}",
path=path
)
if module.check_mode and not os.path.exists(b_path):
return {'dest': path, 'src': src, 'changed': changed, 'diff': diff}
# Now that we might have created the symlink, get the arguments.
# We need to do it now so we can properly follow the symlink if needed
# because load_file_common_arguments sets 'path' according
# the value of follow and the symlink existence.
file_args = module.load_file_common_arguments(module.params)
# Whenever we create a link to a nonexistent target we know that the nonexistent target
# cannot have any permissions set on it. Skip setting those and emit a warning (the user
# can set follow=False to remove the warning)
if follow and os.path.islink(b_path) and not os.path.exists(file_args['path']):
module.warn('Cannot set fs attributes on a non-existent symlink target. follow should be'
' set to False to avoid this.')
else:
changed = module.set_fs_attributes_if_different(file_args, changed, diff, expand=False)
changed |= update_timestamp_for_file(file_args['path'], mtime, atime, diff)
return {'dest': path, 'src': src, 'changed': changed, 'diff': diff}
def ensure_hardlink(path, src, follow, force, timestamps):
b_path = to_bytes(path, errors='surrogate_or_strict')
b_src = to_bytes(src, errors='surrogate_or_strict')
prev_state = get_state(b_path)
file_args = module.load_file_common_arguments(module.params)
mtime = get_timestamp_for_time(timestamps['modification_time'], timestamps['modification_time_format'])
atime = get_timestamp_for_time(timestamps['access_time'], timestamps['access_time_format'])
# src is the source of a hardlink. We require it if we are creating a new hardlink.
# We require path in the argument_spec so we know it is present at this point.
if prev_state != 'hard' and src is None:
module.fail_json(
msg='src is required for creating new hardlinks'
)
# Even if the link already exists, if src was specified it needs to exist.
# The inode number will be compared to ensure the link has the correct target.
if src is not None and not os.path.exists(b_src):
module.fail_json(
msg='src does not exist',
dest=path,
src=src
)
diff = initial_diff(path, 'hard', prev_state)
changed = False
if prev_state == 'absent':
changed = True
elif prev_state == 'link':
b_old_src = os.readlink(b_path)
if b_old_src != b_src:
diff['before']['src'] = to_native(b_old_src, errors='strict')
diff['after']['src'] = src
changed = True
elif prev_state == 'hard':
if src is not None and os.stat(b_path).st_ino != os.stat(b_src).st_ino:
changed = True
if not force:
module.fail_json(
msg='Cannot link, different hard link exists at destination',
dest=path,
src=src
)
elif prev_state == 'file':
changed = True
if not force:
module.fail_json(
msg=f'Cannot link, {prev_state} exists at destination',
dest=path,
src=src
)
elif prev_state == 'directory':
changed = True
if os.path.exists(b_path):
if os.stat(b_path).st_ino == os.stat(b_src).st_ino:
return {'path': path, 'changed': False}
elif not force:
module.fail_json(
msg='Cannot link: different hard link exists at destination',
dest=path,
src=src
)
else:
module.fail_json(
msg='unexpected position reached',
dest=path,
src=src
)
if changed and not module.check_mode:
if prev_state != 'absent':
# try to replace atomically
b_tmppath = to_bytes(os.path.sep).join(
[os.path.dirname(b_path), to_bytes(".%s.%s.tmp" % (os.getpid(), time.time()))]
)
try:
if prev_state == 'directory':
if os.path.exists(b_path):
try:
os.unlink(b_path)
except FileNotFoundError:
pass
os.link(b_src, b_tmppath)
os.rename(b_tmppath, b_path)
except OSError as e:
if os.path.exists(b_tmppath):
os.unlink(b_tmppath)
module.fail_json(
msg=f"Error while replacing: {to_native(e, nonstring='simplerepr')}",
path=path
)
else:
try:
if follow and os.path.islink(b_src):
b_src = os.readlink(b_src)
os.link(b_src, b_path)
except OSError as e:
module.fail_json(
msg=f"Error while linking: {to_native(e, nonstring='simplerepr')}",
path=path
)
if module.check_mode and not os.path.exists(b_path):
return {'dest': path, 'src': src, 'changed': changed, 'diff': diff}
changed = module.set_fs_attributes_if_different(file_args, changed, diff, expand=False)
changed |= update_timestamp_for_file(file_args['path'], mtime, atime, diff)
return {'dest': path, 'src': src, 'changed': changed, 'diff': diff}
def check_owner_exists(module, owner):
try:
uid = int(owner)
try:
getpwuid(uid).pw_name
except KeyError:
module.warn('failed to look up user with uid %s. Create user up to this point in real play' % uid)
except ValueError:
try:
getpwnam(owner).pw_uid
except KeyError:
module.warn('failed to look up user %s. Create user up to this point in real play' % owner)
def check_group_exists(module, group):
try:
gid = int(group)
try:
getgrgid(gid).gr_name
except KeyError:
module.warn('failed to look up group with gid %s. Create group up to this point in real play' % gid)
except ValueError:
try:
getgrnam(group).gr_gid
except KeyError:
module.warn('failed to look up group %s. Create group up to this point in real play' % group)
def main():
global module
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', choices=['absent', 'directory', 'file', 'hard', 'link', 'touch']),
path=dict(type='path', required=True, aliases=['dest', 'name']),
_original_basename=dict(type='str'), # Internal use only, for recursive ops
recurse=dict(type='bool', default=False),
force=dict(type='bool', default=False), # Note: Should not be in file_common_args in future
follow=dict(type='bool', default=True), # Note: Different default than file_common_args
_diff_peek=dict(type='bool'), # Internal use only, for internal checks in the action plugins
src=dict(type='path'), # Note: Should not be in file_common_args in future
modification_time=dict(type='str'),
modification_time_format=dict(type='str', default='%Y%m%d%H%M.%S'),
access_time=dict(type='str'),
access_time_format=dict(type='str', default='%Y%m%d%H%M.%S'),
),
add_file_common_args=True,
supports_check_mode=True,
)
try:
additional_parameter_handling(module.params)
params = module.params
state = params['state']
recurse = params['recurse']
force = params['force']
follow = params['follow']
path = params['path']
src = params['src']
if module.check_mode and state != 'absent':
file_args = module.load_file_common_arguments(module.params)
if file_args['owner']:
check_owner_exists(module, file_args['owner'])
if file_args['group']:
check_group_exists(module, file_args['group'])
timestamps = {}
timestamps['modification_time'] = keep_backward_compatibility_on_timestamps(params['modification_time'], state)
timestamps['modification_time_format'] = params['modification_time_format']
timestamps['access_time'] = keep_backward_compatibility_on_timestamps(params['access_time'], state)
timestamps['access_time_format'] = params['access_time_format']
# short-circuit for diff_peek
if params['_diff_peek'] is not None:
appears_binary = execute_diff_peek(to_bytes(path, errors='surrogate_or_strict'))
module.exit_json(path=path, changed=False, appears_binary=appears_binary)
if state == 'file':
result = ensure_file_attributes(path, follow, timestamps)
elif state == 'directory':
result = ensure_directory(path, follow, recurse, timestamps)
elif state == 'link':
result = ensure_symlink(path, src, follow, force, timestamps)
elif state == 'hard':
result = ensure_hardlink(path, src, follow, force, timestamps)
elif state == 'touch':
result = execute_touch(path, follow, timestamps)
elif state == 'absent':
result = ensure_absent(path)
except AnsibleModuleError as ex:
module.fail_json(**ex.results)
if not module._diff:
result.pop('diff', None)
module.exit_json(**result)
if __name__ == '__main__':
main()
| ParameterError |
python | google__pytype | pytype/overlays/abc_overlay.py | {
"start": 1774,
"end": 2642
} | class ____(special_builtins.Property):
"""Implements the @abc.abstractproperty decorator."""
@classmethod
def make(cls, ctx, module):
return super().make_alias("abstractproperty", ctx, module)
def call(self, node, func, args, alias_map=None):
property_args = self._get_args(args)
for v in property_args.values():
for b in v.bindings:
f = b.data
# If this check fails, we will raise a 'property object is not callable'
# error down the line.
# TODO(mdemello): This is in line with what python does, but we could
# have a more precise error message that insisted f was a class method.
if isinstance(f, abstract.Function):
f.is_abstract = True
return node, special_builtins.PropertyInstance(
self.ctx, self.name, self, **property_args
).to_variable(node)
| AbstractProperty |
python | PyCQA__pylint | pylint/extensions/dict_init_mutate.py | {
"start": 597,
"end": 1782
} | class ____(BaseChecker):
name = "dict-init-mutate"
msgs = {
"C3401": (
"Declare all known key/values when initializing the dictionary.",
"dict-init-mutate",
"Dictionaries can be initialized with a single statement "
"using dictionary literal syntax.",
)
}
@only_required_for_messages("dict-init-mutate")
def visit_assign(self, node: nodes.Assign) -> None:
"""
Detect dictionary mutation immediately after initialization.
At this time, detecting nested mutation is not supported.
"""
match node:
case nodes.Assign(
targets=[nodes.AssignName(name=dict_name)], value=nodes.Dict()
):
pass
case _:
return
match node.next_sibling():
case nodes.Assign(
targets=[nodes.Subscript(value=nodes.Name(name=name))]
) if (name == dict_name):
self.add_message("dict-init-mutate", node=node, confidence=HIGH)
def register(linter: PyLinter) -> None:
linter.register_checker(DictInitMutateChecker(linter))
| DictInitMutateChecker |
python | HypothesisWorks__hypothesis | hypothesis-python/src/hypothesis/internal/conjecture/providers.py | {
"start": 10747,
"end": 10918
} | class ____(TypedDict):
type: InfoObservationType
title: str
content: str | dict[str, Any]
# TODO_DOCS: link to choice sequence explanation page
| _BackendInfoMsg |
python | encode__django-rest-framework | tests/test_permissions.py | {
"start": 12016,
"end": 12362
} | class ____(generics.RetrieveUpdateDestroyAPIView):
queryset = BasicPermModel.objects.all()
serializer_class = BasicPermSerializer
authentication_classes = [authentication.BasicAuthentication]
permission_classes = [ViewObjectPermissions]
object_permissions_view = ObjectPermissionInstanceView.as_view()
| ObjectPermissionInstanceView |
python | tqdm__tqdm | tqdm/std.py | {
"start": 1638,
"end": 1985
} | class ____(TqdmWarning, RuntimeWarning):
"""tqdm monitor errors which do not affect external functionality"""
pass
def TRLock(*args, **kwargs):
"""threading RLock"""
try:
from threading import RLock
return RLock(*args, **kwargs)
except (ImportError, OSError): # pragma: no cover
pass
| TqdmMonitorWarning |
python | google__pytype | pytype/test_data/pytree.py | {
"start": 12011,
"end": 15257
} | class ____(Base):
"""Concrete implementation for leaf nodes.
The __str__ value is derived from the prefix and the value. The prefix is
any white space and comments before this item (e.g., Leaf(token.NEWLINE,
value="\n", prefix=" # comment").
The _eq method (see Base.__eq__) compares only type and value.
"""
# Default values for instance variables
_prefix = "" # Whitespace and comments preceding this token in the input
line = 0 # Line where this token starts in the input
column = 0 # Column where this token starts in the input
def __init__(self, type, value, context=None, prefix=None, fixers_applied=[]):
"""Initializer.
Takes a type constant (a token number < 256), a string value, and an
optional context keyword argument (prefix, (line, column)). If the
prefix keyword argument is provided, it overrides the prefix derived
from the context. The prefix is the text that appears before the value
(e.g., blanks and comments).
"""
assert 0 <= type < 256, type
if context is not None:
self._prefix, (self.line, self.column) = context
self.type = type
self.value = value
if prefix is not None:
self._prefix = prefix
self.fixers_applied = fixers_applied[:]
def __repr__(self):
"""Return a canonical string representation."""
return f"{self.__class__.__name__}({self.type_repr}, {self.value!r})"
def __unicode__(self):
"""Return a pretty string representation.
This reproduces the input source exactly.
"""
return self.prefix + str(self.value)
__str__ = __unicode__
def _eq(self, other):
"""Compare two nodes for equality (type and value)."""
return (self.type, self.value) == (other.type, other.value)
def clone(self):
"""Return a cloned (deep) copy of self."""
l = Leaf(
self.type,
self.value,
(self.prefix, (self.line, self.column)),
fixers_applied=self.fixers_applied,
)
try:
l.label = self.label[:]
except AttributeError:
pass # if label_nodes() hasn't been done, quietly do nothing
return l
def leaves(self):
yield self
def post_order(self):
"""Post-order iterator for the tree."""
yield self
def pre_order(self):
"""Pre-order iterator for the tree."""
yield self
def _prefix_getter(self):
"""The whitespace and comments preceding this token in the input."""
return self._prefix
def _prefix_setter(self, prefix):
self.changed()
self._prefix = prefix
prefix = property(_prefix_getter, _prefix_setter)
def convert(gr, raw_node):
"""Convert raw node information to a Node or Leaf instance.
This is passed to the parser driver which calls it whenever a reduction of a
grammar rule produces a new complete node, so that the tree is built
strictly bottom-up.
"""
type, value, context, children = raw_node
if children or type in gr.number2symbol:
# If there's exactly one child, return that child instead of
# creating a new node.
if len(children) == 1:
return children[0]
# Note: context in the following is ignored by the constructor:
return Node(type, children, context=context)
else:
return Leaf(type, value, context=context)
| Leaf |
python | pyca__cryptography | tests/hazmat/primitives/decrepit/test_arc4.py | {
"start": 552,
"end": 1027
} | class ____:
test_rfc = generate_stream_encryption_test(
load_nist_vectors,
os.path.join("ciphers", "ARC4"),
[
"rfc-6229-40.txt",
"rfc-6229-56.txt",
"rfc-6229-64.txt",
"rfc-6229-80.txt",
"rfc-6229-128.txt",
"rfc-6229-192.txt",
"rfc-6229-256.txt",
"arc4.txt",
],
lambda key, **kwargs: algorithms.ARC4(binascii.unhexlify(key)),
)
| TestARC4 |
python | pytorch__pytorch | test/mobile/model_test/nn_ops.py | {
"start": 12635,
"end": 12885
} | class ____(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.shuffle = nn.ChannelShuffle(2)
def forward(self):
return len(
self.shuffle(torch.randn(1, 4, 2, 2)),
)
| NNShuffleModule |
python | astropy__astropy | astropy/io/votable/tree.py | {
"start": 16481,
"end": 19800
} | class ____(SimpleElement, _IDProperty):
"""
LINK_ elements: used to reference external documents and servers through a URI.
The keyword arguments correspond to setting members of the same
name, documented below.
"""
_attr_list = [
"ID",
"content_role",
"content_type",
"title",
"value",
"href",
"action",
]
_element_name = "LINK"
def __init__(
self,
ID=None,
title=None,
value=None,
href=None,
action=None,
id=None,
config=None,
pos=None,
**kwargs,
):
if config is None:
config = {}
self._config = config
self._pos = pos
SimpleElement.__init__(self)
content_role = kwargs.get("content-role") or kwargs.get("content_role")
content_type = kwargs.get("content-type") or kwargs.get("content_type")
if "gref" in kwargs:
warn_or_raise(W11, W11, (), config, pos)
self.ID = resolve_id(ID, id, config, pos)
self.content_role = content_role
self.content_type = content_type
self.title = title
self.value = value
self.href = href
self.action = action
warn_unknown_attrs(
"LINK",
kwargs.keys(),
config,
pos,
["content-role", "content_role", "content-type", "content_type", "gref"],
)
@property
def content_role(self):
"""Defines the MIME role of the referenced object.
Must be one of:
None, 'query', 'hints', 'doc', 'location' or 'type'
"""
return self._content_role
@content_role.setter
def content_role(self, content_role):
if (
content_role == "type" and not self._config.get("version_1_3_or_later")
) or content_role not in (None, "query", "hints", "doc", "location"):
vo_warn(W45, (content_role,), self._config, self._pos)
self._content_role = content_role
@content_role.deleter
def content_role(self):
self._content_role = None
@property
def content_type(self):
"""Defines the MIME content type of the referenced object."""
return self._content_type
@content_type.setter
def content_type(self, content_type):
xmlutil.check_mime_content_type(content_type, self._config, self._pos)
self._content_type = content_type
@content_type.deleter
def content_type(self):
self._content_type = None
@property
def href(self):
"""
A URI to an arbitrary protocol. The vo package only supports
http and anonymous ftp.
"""
return self._href
@href.setter
def href(self, href):
xmlutil.check_anyuri(href, self._config, self._pos)
self._href = href
@href.deleter
def href(self):
self._href = None
def to_table_column(self, column):
meta = {}
for key in self._attr_list:
val = getattr(self, key, None)
if val is not None:
meta[key] = val
column.meta.setdefault("links", [])
column.meta["links"].append(meta)
@classmethod
def from_table_column(cls, d):
return cls(**d)
| Link |
python | networkx__networkx | networkx/classes/tests/test_reportviews.py | {
"start": 6955,
"end": 7300
} | class ____(TestNodeDataViewSetOps):
@classmethod
def setup_class(cls):
cls.G = nx.path_graph(9)
cls.G.nodes[3]["foo"] = "bar"
cls.nv = cls.G.nodes.data("foo", default=1)
def n_its(self, nodes):
return {(node, "bar" if node == 3 else 1) for node in nodes}
# Edges Data View
| TestNodeDataViewDefaultSetOps |
python | huggingface__transformers | tests/models/speecht5/test_modeling_speecht5.py | {
"start": 70411,
"end": 71596
} | class ____:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=False,
num_mel_bins=20,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.num_mel_bins = num_mel_bins
def prepare_config_and_inputs(self):
input_values = floats_tensor([self.seq_length, self.num_mel_bins], scale=1.0)
config = self.get_config()
return config, input_values
def get_config(self):
return SpeechT5HifiGanConfig(
model_in_dim=self.num_mel_bins,
upsample_initial_channel=32,
)
def create_and_check_model(self, config, input_values):
model = SpeechT5HifiGan(config=config).to(torch_device).eval()
result = model(input_values)
self.parent.assertEqual(result.shape, (self.seq_length * 256,))
def prepare_config_and_inputs_for_common(self):
config, input_values = self.prepare_config_and_inputs()
inputs_dict = {"spectrogram": input_values}
return config, inputs_dict
@require_torch
| SpeechT5HifiGanTester |
python | pytorch__pytorch | torch/nn/modules/pooling.py | {
"start": 44756,
"end": 46521
} | class ____(_LPPoolNd):
r"""Applies a 1D power-average pooling over an input signal composed of several input planes.
On each window, the function computed is:
.. math::
f(X) = \sqrt[p]{\sum_{x \in X} x^{p}}
- At p = :math:`\infty`, one gets Max Pooling
- At p = 1, one gets Sum Pooling (which is proportional to Average Pooling)
.. note:: If the sum to the power of `p` is zero, the gradient of this function is
not defined. This implementation will set the gradient to zero in this case.
Args:
kernel_size: a single int, the size of the window
stride: a single int, the stride of the window. Default value is :attr:`kernel_size`
ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape
Note:
When :attr:`ceil_mode` is ``True``, sliding windows may go off-bounds if they start within the
left padding or the input. Sliding windows that would start in the right padded region are ignored.
Shape:
- Input: :math:`(N, C, L_{in})` or :math:`(C, L_{in})`.
- Output: :math:`(N, C, L_{out})` or :math:`(C, L_{out})`, where
.. math::
L_{out} = \left\lfloor\frac{L_{in} - \text{kernel\_size}}{\text{stride}} + 1\right\rfloor
Examples::
>>> # power-2 pool of window of length 3, with stride 2.
>>> m = nn.LPPool1d(2, 3, stride=2)
>>> input = torch.randn(20, 16, 50)
>>> output = m(input)
"""
kernel_size: _size_1_t
stride: _size_1_t
def forward(self, input: Tensor) -> Tensor:
"""Runs the forward pass."""
return F.lp_pool1d(
input, float(self.norm_type), self.kernel_size, self.stride, self.ceil_mode
)
| LPPool1d |
python | cython__cython | Cython/Compiler/ExprNodes.py | {
"start": 79429,
"end": 81222
} | class ____(AtomicExprNode):
# Imaginary number literal
#
# value string imaginary part (float value)
type = PyrexTypes.c_double_complex_type
def calculate_constant_result(self):
self.constant_result = complex(0.0, float(self.value))
def compile_time_value(self, denv):
return complex(0.0, float(self.value))
def analyse_types(self, env):
self.type.create_declaration_utility_code(env)
return self
def may_be_none(self):
return False
def coerce_to(self, dst_type, env):
if self.type is dst_type:
return self
node = ImagNode(self.pos, value=self.value)
if dst_type.is_pyobject:
node.is_temp = 1
node.type = Builtin.complex_type
# We still need to perform normal coerce_to processing on the
# result, because we might be coercing to an extension type,
# in which case a type test node will be needed.
return AtomicExprNode.coerce_to(node, dst_type, env)
gil_message = "Constructing complex number"
def calculate_result_code(self):
if self.type.is_pyobject:
return self.result()
else:
return "%s(0, %r)" % (self.type.from_parts, float(self.value))
def generate_result_code(self, code):
if self.type.is_pyobject:
code.putln(
"%s = PyComplex_FromDoubles(0.0, %r); %s" % (
self.result(),
float(self.value),
code.error_goto_if_null(self.result(), self.pos)))
self.generate_gotref(code)
#-------------------------------------------------------------------
#
# Simple expressions
#
#-------------------------------------------------------------------
| ImagNode |
python | ansible__ansible | lib/ansible/_internal/_templating/_jinja_common.py | {
"start": 8609,
"end": 8870
} | class ____(_captured.AnsibleCapturedError):
"""Template-external error raised by VaultExceptionMarker when an undecryptable variable is accessed."""
context = 'vault'
_default_message = "Attempt to use undecryptable variable."
| UndecryptableVaultError |
python | jazzband__django-model-utils | model_utils/managers.py | {
"start": 12503,
"end": 13098
} | class ____(Generic[ModelT]):
"""
QuerySet for SoftDeletableModel. Instead of removing instance sets
its ``is_removed`` field to True.
"""
def delete(self) -> tuple[int, dict[str, int]]:
"""
Soft delete objects from queryset (set their ``is_removed``
field to True)
"""
model: type[ModelT] = self.model # type: ignore[attr-defined]
number_of_deleted_objects = cast(QuerySet[ModelT], self).update(is_removed=True)
return number_of_deleted_objects, {model._meta.label: number_of_deleted_objects}
| SoftDeletableQuerySetMixin |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/memberAccess4.py | {
"start": 1506,
"end": 1817
} | class ____(Protocol):
@curry
def __call__[A, B](
self, a: Callable[[A], B], b: Callable[[Callable[[A], B]], A]
) -> B: ...
# This should generate an error and not hang.
EvilProto1.__call__
# This should generate an error and not hang.
p: EvilProto1 = curry(lambda a, b: a(b(a)))
| EvilProto1 |
python | simonw__datasette | datasette/filters.py | {
"start": 8443,
"end": 9064
} | class ____(Filter):
key = "in"
display = "in"
def split_value(self, value):
if value.startswith("["):
return json.loads(value)
else:
return [v.strip() for v in value.split(",")]
def where_clause(self, table, column, value, param_counter):
values = self.split_value(value)
params = [f":p{param_counter + i}" for i in range(len(values))]
sql = f"{escape_sqlite(column)} in ({', '.join(params)})"
return sql, values
def human_clause(self, column, value):
return f"{column} in {json.dumps(self.split_value(value))}"
| InFilter |
python | pytorch__pytorch | test/jit/test_jit_utils.py | {
"start": 476,
"end": 3711
} | class ____(JitTestCase):
# Tests that POSITIONAL_OR_KEYWORD arguments are captured.
def test_get_callable_argument_names_positional_or_keyword(self):
def fn_positional_or_keyword_args_only(x, y):
return x + y
self.assertEqual(
["x", "y"],
torch._jit_internal.get_callable_argument_names(
fn_positional_or_keyword_args_only
),
)
# Tests that POSITIONAL_ONLY arguments are ignored.
def test_get_callable_argument_names_positional_only(self):
code = dedent(
"""
def fn_positional_only_arg(x, /, y):
return x + y
"""
)
fn_positional_only_arg = jit_utils._get_py3_code(code, "fn_positional_only_arg")
self.assertEqual(
["y"],
torch._jit_internal.get_callable_argument_names(fn_positional_only_arg),
)
# Tests that VAR_POSITIONAL arguments are ignored.
def test_get_callable_argument_names_var_positional(self):
# Tests that VAR_POSITIONAL arguments are ignored.
def fn_var_positional_arg(x, *arg):
return x + arg[0]
self.assertEqual(
["x"],
torch._jit_internal.get_callable_argument_names(fn_var_positional_arg),
)
# Tests that KEYWORD_ONLY arguments are ignored.
def test_get_callable_argument_names_keyword_only(self):
def fn_keyword_only_arg(x, *, y):
return x + y
self.assertEqual(
["x"], torch._jit_internal.get_callable_argument_names(fn_keyword_only_arg)
)
# Tests that VAR_KEYWORD arguments are ignored.
def test_get_callable_argument_names_var_keyword(self):
def fn_var_keyword_arg(**args):
return args["x"] + args["y"]
self.assertEqual(
[], torch._jit_internal.get_callable_argument_names(fn_var_keyword_arg)
)
# Tests that a function signature containing various different types of
# arguments are ignored.
def test_get_callable_argument_names_hybrid(self):
code = dedent(
"""
def fn_hybrid_args(x, /, y, *args, **kwargs):
return x + y + args[0] + kwargs['z']
"""
)
fn_hybrid_args = jit_utils._get_py3_code(code, "fn_hybrid_args")
self.assertEqual(
["y"], torch._jit_internal.get_callable_argument_names(fn_hybrid_args)
)
def test_checkscriptassertraisesregex(self):
def fn():
tup = (1, 2)
return tup[2]
self.checkScriptRaisesRegex(fn, (), Exception, "range", name="fn")
s = dedent(
"""
def fn():
tup = (1, 2)
return tup[2]
"""
)
self.checkScriptRaisesRegex(s, (), Exception, "range", name="fn")
def test_no_tracer_warn_context_manager(self):
torch._C._jit_set_tracer_state_warn(True)
with jit_utils.NoTracerWarnContextManager():
self.assertEqual(False, torch._C._jit_get_tracer_state_warn())
self.assertEqual(True, torch._C._jit_get_tracer_state_warn())
if __name__ == "__main__":
raise_on_run_directly("test/test_jit.py")
| TestJitUtils |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pylint/no_self_use.py | {
"start": 367,
"end": 703
} | class ____:
name = "Paris"
def __init__(self):
pass
def __cmp__(self, other):
print(24)
def __repr__(self):
return "Person"
def func(self):
...
def greeting_1(self):
print(f"Hello from {self.name} !")
@staticmethod
def greeting_2():
print("Hi!")
| Person |
python | celery__celery | t/unit/utils/test_threads.py | {
"start": 1492,
"end": 1778
} | class ____:
def test_stack(self):
x = _FastLocalStack()
x.push(['foo'])
x.push(['bar'])
assert x.top == ['bar']
assert len(x) == 2
x.pop()
assert x.top == ['foo']
x.pop()
assert x.top is None
| test_FastLocalStack |
python | pydantic__pydantic | pydantic-core/tests/serializers/test_format.py | {
"start": 4030,
"end": 4659
} | class ____:
def __str__(self):
raise ValueError('broken')
def __repr__(self):
return 'BrokenToString()'
def test_to_string():
s = SchemaSerializer(core_schema.any_schema(serialization={'type': 'to-string'}))
assert s.to_python(123, mode='json') == '123'
assert s.to_python(None, mode='json') is None
uuid = UUID('ebcdab58-6eb8-46fb-a190-d07a33e9eac8')
assert s.to_python(uuid, mode='json') == str(uuid)
assert s.to_json(uuid) == b'"%s"' % str(uuid).encode('utf-8')
with pytest.raises(ValueError, match='broken'):
s.to_python(BrokenToString(), mode='json')
| BrokenToString |
python | django__django | tests/test_runner/tests.py | {
"start": 33486,
"end": 34150
} | class ____(unittest.TestCase):
def test_empty_default_database(self):
"""
An empty default database in settings does not raise an
ImproperlyConfigured error when running a unit test that does not use a
database.
"""
tested_connections = db.ConnectionHandler({"default": {}})
with mock.patch("django.db.connections", new=tested_connections):
connection = tested_connections[db.utils.DEFAULT_DB_ALIAS]
self.assertEqual(
connection.settings_dict["ENGINE"], "django.db.backends.dummy"
)
connections_support_transactions()
| EmptyDefaultDatabaseTest |
python | sympy__sympy | sympy/physics/vector/printing.py | {
"start": 3930,
"end": 11790
} | class ____(PrettyPrinter):
"""Pretty Printer for vectorialexpressions. """
def _print_Derivative(self, deriv):
from sympy.physics.vector.functions import dynamicsymbols
# XXX use U('PARTIAL DIFFERENTIAL') here ?
t = dynamicsymbols._t
dot_i = 0
syms = list(reversed(deriv.variables))
while len(syms) > 0:
if syms[-1] == t:
syms.pop()
dot_i += 1
else:
return super()._print_Derivative(deriv)
if not (isinstance(type(deriv.expr), UndefinedFunction) and
(deriv.expr.args == (t,))):
return super()._print_Derivative(deriv)
else:
pform = self._print_Function(deriv.expr)
# the following condition would happen with some sort of non-standard
# dynamic symbol I guess, so we'll just print the SymPy way
if len(pform.picture) > 1:
return super()._print_Derivative(deriv)
# There are only special symbols up to fourth-order derivatives
if dot_i >= 5:
return super()._print_Derivative(deriv)
# Deal with special symbols
dots = {0: "",
1: "\N{COMBINING DOT ABOVE}",
2: "\N{COMBINING DIAERESIS}",
3: "\N{COMBINING THREE DOTS ABOVE}",
4: "\N{COMBINING FOUR DOTS ABOVE}"}
d = pform.__dict__
# if unicode is false then calculate number of apostrophes needed and
# add to output
if not self._use_unicode:
apostrophes = ""
for i in range(0, dot_i):
apostrophes += "'"
d['picture'][0] += apostrophes + "(t)"
else:
d['picture'] = [center_accent(d['picture'][0], dots[dot_i])]
return pform
def _print_Function(self, e):
from sympy.physics.vector.functions import dynamicsymbols
t = dynamicsymbols._t
# XXX works only for applied functions
func = e.func
args = e.args
func_name = func.__name__
pform = self._print_Symbol(Symbol(func_name))
# If this function is an Undefined function of t, it is probably a
# dynamic symbol, so we'll skip the (t). The rest of the code is
# identical to the normal PrettyPrinter code
if not (isinstance(func, UndefinedFunction) and (args == (t,))):
return super()._print_Function(e)
return pform
def vprint(expr, **settings):
r"""Function for printing of expressions generated in the
sympy.physics vector package.
Extends SymPy's StrPrinter, takes the same setting accepted by SymPy's
:func:`~.sstr`, and is equivalent to ``print(sstr(foo))``.
Parameters
==========
expr : valid SymPy object
SymPy expression to print.
settings : args
Same as the settings accepted by SymPy's sstr().
Examples
========
>>> from sympy.physics.vector import vprint, dynamicsymbols
>>> u1 = dynamicsymbols('u1')
>>> print(u1)
u1(t)
>>> vprint(u1)
u1
"""
outstr = vsprint(expr, **settings)
import builtins
if (outstr != 'None'):
builtins._ = outstr
print(outstr)
def vsstrrepr(expr, **settings):
"""Function for displaying expression representation's with vector
printing enabled.
Parameters
==========
expr : valid SymPy object
SymPy expression to print.
settings : args
Same as the settings accepted by SymPy's sstrrepr().
"""
p = VectorStrReprPrinter(settings)
return p.doprint(expr)
def vsprint(expr, **settings):
r"""Function for displaying expressions generated in the
sympy.physics vector package.
Returns the output of vprint() as a string.
Parameters
==========
expr : valid SymPy object
SymPy expression to print
settings : args
Same as the settings accepted by SymPy's sstr().
Examples
========
>>> from sympy.physics.vector import vsprint, dynamicsymbols
>>> u1, u2 = dynamicsymbols('u1 u2')
>>> u2d = dynamicsymbols('u2', level=1)
>>> print("%s = %s" % (u1, u2 + u2d))
u1(t) = u2(t) + Derivative(u2(t), t)
>>> print("%s = %s" % (vsprint(u1), vsprint(u2 + u2d)))
u1 = u2 + u2'
"""
string_printer = VectorStrPrinter(settings)
return string_printer.doprint(expr)
def vpprint(expr, **settings):
r"""Function for pretty printing of expressions generated in the
sympy.physics vector package.
Mainly used for expressions not inside a vector; the output of running
scripts and generating equations of motion. Takes the same options as
SymPy's :func:`~.pretty_print`; see that function for more information.
Parameters
==========
expr : valid SymPy object
SymPy expression to pretty print
settings : args
Same as those accepted by SymPy's pretty_print.
"""
pp = VectorPrettyPrinter(settings)
# Note that this is copied from sympy.printing.pretty.pretty_print:
# XXX: this is an ugly hack, but at least it works
use_unicode = pp._settings['use_unicode']
from sympy.printing.pretty.pretty_symbology import pretty_use_unicode
uflag = pretty_use_unicode(use_unicode)
try:
return pp.doprint(expr)
finally:
pretty_use_unicode(uflag)
def vlatex(expr, **settings):
r"""Function for printing latex representation of sympy.physics.vector
objects.
For latex representation of Vectors, Dyadics, and dynamicsymbols. Takes the
same options as SymPy's :func:`~.latex`; see that function for more
information;
Parameters
==========
expr : valid SymPy object
SymPy expression to represent in LaTeX form
settings : args
Same as latex()
Examples
========
>>> from sympy.physics.vector import vlatex, ReferenceFrame, dynamicsymbols
>>> N = ReferenceFrame('N')
>>> q1, q2 = dynamicsymbols('q1 q2')
>>> q1d, q2d = dynamicsymbols('q1 q2', 1)
>>> q1dd, q2dd = dynamicsymbols('q1 q2', 2)
>>> vlatex(N.x + N.y)
'\\mathbf{\\hat{n}_x} + \\mathbf{\\hat{n}_y}'
>>> vlatex(q1 + q2)
'q_{1} + q_{2}'
>>> vlatex(q1d)
'\\dot{q}_{1}'
>>> vlatex(q1 * q2d)
'q_{1} \\dot{q}_{2}'
>>> vlatex(q1dd * q1 / q1d)
'\\frac{q_{1} \\ddot{q}_{1}}{\\dot{q}_{1}}'
"""
latex_printer = VectorLatexPrinter(settings)
return latex_printer.doprint(expr)
def init_vprinting(**kwargs):
"""Initializes time derivative printing for all SymPy objects, i.e. any
functions of time will be displayed in a more compact notation. The main
benefit of this is for printing of time derivatives; instead of
displaying as ``Derivative(f(t),t)``, it will display ``f'``. This is
only actually needed for when derivatives are present and are not in a
physics.vector.Vector or physics.vector.Dyadic object. This function is a
light wrapper to :func:`~.init_printing`. Any keyword
arguments for it are valid here.
{0}
Examples
========
>>> from sympy import Function, symbols
>>> t, x = symbols('t, x')
>>> omega = Function('omega')
>>> omega(x).diff()
Derivative(omega(x), x)
>>> omega(t).diff()
Derivative(omega(t), t)
Now use the string printer:
>>> from sympy.physics.vector import init_vprinting
>>> init_vprinting(pretty_print=False)
>>> omega(x).diff()
Derivative(omega(x), x)
>>> omega(t).diff()
omega'
"""
kwargs['str_printer'] = vsstrrepr
kwargs['pretty_printer'] = vpprint
kwargs['latex_printer'] = vlatex
init_printing(**kwargs)
params = init_printing.__doc__.split('Examples\n ========')[0] # type: ignore
init_vprinting.__doc__ = init_vprinting.__doc__.format(params) # type: ignore
| VectorPrettyPrinter |
python | run-llama__llama_index | llama-index-integrations/embeddings/llama-index-embeddings-netmind/llama_index/embeddings/netmind/base.py | {
"start": 216,
"end": 3514
} | class ____(BaseEmbedding):
api_base: str = Field(
default="https://api.netmind.ai/inference-api/openai/v1",
description="The base URL for the Netmind API.",
)
api_key: str = Field(
default="",
description="The API key for the Netmind API. If not set, will attempt to use the NETMIND_API_KEY environment variable.",
)
timeout: float = Field(
default=120, description="The timeout for the API request in seconds.", ge=0
)
max_retries: int = Field(
default=3,
description="The maximum number of retries for the API request.",
ge=0,
)
def __init__(
self,
model_name: str,
timeout: Optional[float] = 120,
max_retries: Optional[int] = 3,
api_key: Optional[str] = None,
api_base: str = "https://api.netmind.ai/inference-api/openai/v1",
**kwargs: Any,
) -> None:
api_key = api_key or os.environ.get("NETMIND_API_KEY", None)
super().__init__(
model_name=model_name,
api_key=api_key,
api_base=api_base,
**kwargs,
)
self._client = OpenAI(
api_key=api_key,
base_url=self.api_base,
timeout=timeout,
max_retries=max_retries,
)
self._aclient = AsyncOpenAI(
api_key=api_key,
base_url=self.api_base,
timeout=timeout,
max_retries=max_retries,
)
def _get_text_embedding(self, text: str) -> Embedding:
"""Get text embedding."""
return (
self._client.embeddings.create(
input=[text],
model=self.model_name,
)
.data[0]
.embedding
)
def _get_query_embedding(self, query: str) -> Embedding:
"""Get query embedding."""
return (
self._client.embeddings.create(
input=[query],
model=self.model_name,
)
.data[0]
.embedding
)
def _get_text_embeddings(self, texts: List[str]) -> List[Embedding]:
"""Get text embeddings."""
data = self._client.embeddings.create(
input=texts,
model=self.model_name,
).data
return [d.embedding for d in data]
async def _aget_text_embedding(self, text: str) -> Embedding:
"""Async get text embedding."""
return (
(await self._aclient.embeddings.create(input=[text], model=self.model_name))
.data[0]
.embedding
)
async def _aget_query_embedding(self, query: str) -> Embedding:
"""Async get query embedding."""
return (
(
await self._aclient.embeddings.create(
input=[query], model=self.model_name
)
)
.data[0]
.embedding
)
async def _aget_text_embeddings(self, texts: List[str]) -> List[Embedding]:
"""Async get text embeddings."""
data = (
await self._aclient.embeddings.create(
input=texts,
model=self.model_name,
)
).data
return [d.embedding for d in data]
| NetmindEmbedding |
python | pypa__warehouse | warehouse/subscriptions/services.py | {
"start": 667,
"end": 11520
} | class ____:
def __init__(self, api, publishable_key, webhook_secret, domain):
self.api = api
self.publishable_key = publishable_key
self.webhook_secret = webhook_secret
self.domain = domain
@classmethod
def create_service(cls, context, request):
"""
Create appropriate billing service based on environment
"""
raise NotImplementedError
def get_checkout_session(self, session_id, **kwargs):
"""
Fetch the Checkout Session to based on the session_id passed to the success page
"""
checkout_session = self.api.checkout.Session.retrieve(
session_id,
expand=["customer", "line_items", "subscription"],
)
return checkout_session
def get_customer(self, subscription_id):
"""
Fetch the Customer resource attached to the Subscription
"""
subscription = self.api.Subscription.retrieve(
subscription_id,
expand=["customer"],
)
return subscription.customer
def create_customer(self, name, description):
"""
Create the Customer resource via Billing API with the given name and description
"""
return self.api.Customer.create(
name=name,
description=description[:300],
metadata={"billing_service": "pypi", "domain": self.domain},
)
def update_customer(self, customer_id, name, description):
return self.api.Customer.modify(
customer_id,
name=name,
description=description[:300],
)
def create_checkout_session(self, customer_id, price_ids, success_url, cancel_url):
"""
# Create new Checkout Session for the order
# For full details see https://stripe.com/docs/api/checkout/sessions/create
"""
checkout_session = self.api.checkout.Session.create(
customer=customer_id,
success_url=success_url,
cancel_url=cancel_url,
mode="subscription",
line_items=[{"price": price_id} for price_id in price_ids],
metadata={"billing_service": "pypi", "domain": self.domain},
# Uncomment `automatic_tax` to calculate tax automatically.
# Requires active tax settings on Stripe Dashboard.
# https://dashboard.stripe.com/settings/tax/activate
# automatic_tax={"enabled": True},
)
return checkout_session
def create_portal_session(self, customer_id, return_url):
"""
Return customer portal session to allow customer to managing their subscription
"""
portal_session = self.api.billing_portal.Session.create(
customer=customer_id,
return_url=return_url,
)
return portal_session
# See Stripe webhook documentation:
# https://stripe.com/docs/api/webhook_endpoints/create#create_webhook_endpoint-enabled_events
# https://stripe.com/docs/webhooks/quickstart
def webhook_received(self, payload, sig_header):
"""
Return parsed webhook event from Stripe
"""
return stripe.Webhook.construct_event(payload, sig_header, self.webhook_secret)
def create_or_update_product(self, name, description, tax_code, unit_label):
"""
Create product resource via Billing API, or update an active
product resource with the same name
"""
# Search for active product with the given name.
# (a) Search exact or substring match for name as supported by Stripe API.
# https://stripe.com/docs/search#query-fields-for-products
product_search = self.search_products(f'active:"true" name:"{name}"')
# (b) Make sure name is an exact match.
products = [
product for product in product_search["data"] if product["name"] == name
]
if products:
product = max(products, key=lambda p: p["created"])
return self.update_product(
product["id"], name, description, tax_code, unit_label
)
else:
return self.create_product(name, description, tax_code, unit_label)
def create_product(self, name, description, tax_code, unit_label):
"""
Create and return a product resource via Billing API
"""
return self.api.Product.create(
name=name,
description=description,
tax_code=tax_code,
unit_label=unit_label,
metadata={"billing_service": "pypi", "domain": self.domain},
)
def retrieve_product(self, product_id):
"""
Get a product resource by id via Billing API
"""
return self.api.Product.retrieve(product_id)
def update_product(self, product_id, name, description, tax_code, unit_label):
"""
Update a product resource via Billing API
only allowing update of those attributes we use
return the updated product
"""
return self.api.Product.modify(
product_id,
name=name,
description=description,
tax_code=tax_code,
unit_label=unit_label,
)
def list_all_products(self, limit=10):
"""
Get list of all price resources via Billing API
Limit can range between 1 and 100, default is 10
"""
return self.api.Product.list(limit=limit)
def delete_product(self, product_id):
"""
Delete a product resource via Billing API
"""
return self.api.Product.delete(product_id)
def search_products(self, query, limit=10):
"""
Search for product resources via Billing API
example: query="active:'true'"
"""
return self.api.Product.search(query=query, limit=limit)
def sync_product(self, subscription_product):
"""
Synchronize a product resource via Billing API with a
subscription product from the database.
"""
product = self.create_or_update_product(
name=subscription_product.product_name,
description=subscription_product.description,
# See Stripe docs for tax codes. https://stripe.com/docs/tax/tax-categories
tax_code=subscription_product.tax_code,
unit_label="user",
)
subscription_product.product_id = product["id"]
def create_or_update_price(self, unit_amount, currency, product_id, tax_behavior):
"""
Create price resource via Billing API, or update an active price
resource with the same product and currency
"""
# Search for active price that match all non-updatable fields.
# (a) Use query fields supported by Stripe API.
# https://stripe.com/docs/search#query-fields-for-prices
price_search = self.search_prices(
f'active:"true" product:"{product_id}" currency:"{currency}"'
)
# (b) Filter for other fields not supported by Stripe API.
prices = [p for p in price_search["data"] if p["unit_amount"] == unit_amount]
# Create new price if no match found.
if not prices:
return self.create_price(
unit_amount,
currency,
product_id,
tax_behavior,
)
# Update most recent matching price and archive other matching prices.
# https://stripe.com/docs/api/prices/update
[*others, price] = sorted(prices, key=lambda p: p["created"])
for other in others:
self.update_price(other["id"], active=False)
return self.update_price(price["id"], tax_behavior=tax_behavior)
def create_price(self, unit_amount, currency, product_id, tax_behavior):
"""
Create and return a price resource via Billing API
"""
return self.api.Price.create(
unit_amount=unit_amount,
currency=currency,
recurring={
# Hardcode 1 month. Different interval does not make sense with metered.
"interval": "month",
# Set "metered" and "max" to enable Stripe usage records.
# https://stripe.com/docs/products-prices/pricing-models#aggregate-metered-usage
"usage_type": "metered",
"aggregate_usage": "max",
},
product=product_id,
tax_behavior=tax_behavior,
metadata={"billing_service": "pypi", "domain": self.domain},
)
def retrieve_price(self, price_id):
"""
Get a price resource via Billing API
"""
return self.api.Price.retrieve(price_id)
def update_price(self, price_id, **parameters):
"""
Update a price resource by id via Billing API
only allowing update of those attributes we use
return the updated price
"""
return self.api.Price.modify(price_id, **parameters)
def list_all_prices(self, limit=10):
"""
Get list of all price resources via Billing API
Limit can range between 1 and 100, default is 10
"""
return self.api.Price.list(limit=limit)
def search_prices(self, query, limit=10):
"""
Search for price resources via Billing API
example: query="active:'true'"
"""
return self.api.Price.search(query=query, limit=limit)
def sync_price(self, subscription_price):
"""
Synchronize a price resource via Billing API with a
subscription price from the database.
"""
price = self.create_or_update_price(
unit_amount=subscription_price.unit_amount,
currency=subscription_price.currency,
product_id=subscription_price.subscription_product.product_id,
tax_behavior=subscription_price.tax_behavior,
)
subscription_price.price_id = price["id"]
def cancel_subscription(self, subscription_id):
"""
Cancels a customer’s subscription immediately.
The customer will not be charged again for the subscription.
"""
return self.api.Subscription.delete(subscription_id)
def create_or_update_usage_record(
self, subscription_item_id, organization_member_count
):
"""
Creates a usage record via Billing API
for a specified subscription item and date with default=now,
and fills it with a quantity=number of members in the org.
"""
return self.api.SubscriptionItem.create_usage_record(
subscription_item_id,
action="set",
quantity=organization_member_count,
)
@implementer(IBillingService)
| GenericBillingService |
python | huggingface__transformers | src/transformers/models/sew/modeling_sew.py | {
"start": 20522,
"end": 28863
} | class ____(PreTrainedModel):
config: SEWConfig
base_model_prefix = "sew"
main_input_name = "input_values"
input_modalities = "audio"
supports_gradient_checkpointing = True
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = False # needs a proper look into the mask creation
@torch.no_grad()
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, SEWPositionalConvEmbedding):
init.normal_(
module.conv.weight,
mean=0,
std=2 * math.sqrt(1 / (module.conv.kernel_size[0] * module.conv.in_channels)),
)
init.constant_(module.conv.bias, 0)
elif isinstance(module, nn.Linear):
init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
elif isinstance(module, (nn.LayerNorm, nn.GroupNorm)):
init.zeros_(module.bias)
init.ones_(module.weight)
elif isinstance(module, nn.Conv1d):
if is_deepspeed_zero3_enabled():
import deepspeed
if hasattr(module, "weight_v") and hasattr(module, "weight_g"):
with deepspeed.zero.GatheredParameters([module.weight_v, module.weight_g], modifier_rank=0):
init.kaiming_normal_(module.weight)
else:
with deepspeed.zero.GatheredParameters(module.weight, modifier_rank=0):
init.kaiming_normal_(module.weight)
else:
init.kaiming_normal_(module.weight)
if isinstance(module, (nn.Linear, nn.Conv1d)) and module.bias is not None:
init.zeros_(module.bias)
def _get_feat_extract_output_lengths(self, input_lengths: Union[torch.LongTensor, int]):
"""
Computes the output length of the convolutional layers
"""
def _conv_out_length(input_length, kernel_size, stride):
# 1D convolutional layer output length formula taken
# from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html
return torch.div(input_length - kernel_size, stride, rounding_mode="floor") + 1
for kernel_size, stride in zip(self.config.conv_kernel, self.config.conv_stride):
input_lengths = _conv_out_length(input_lengths, kernel_size, stride)
return input_lengths
def _get_feature_vector_attention_mask(self, feature_vector_length: int, attention_mask: torch.LongTensor):
output_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1)).to(torch.long)
batch_size = attention_mask.shape[0]
attention_mask = torch.zeros(
(batch_size, feature_vector_length), dtype=attention_mask.dtype, device=attention_mask.device
)
# these two operations makes sure that all values before the output lengths idxs are attended to
attention_mask[(torch.arange(attention_mask.shape[0], device=attention_mask.device), output_lengths - 1)] = 1
attention_mask = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool()
return attention_mask
def _compute_mask_indices(
shape: tuple[int, int],
mask_prob: float,
mask_length: int,
attention_mask: Optional[torch.LongTensor] = None,
min_masks: int = 0,
) -> np.ndarray:
"""
Computes random mask spans for a given shape. Used to implement [SpecAugment: A Simple Data Augmentation Method for
ASR](https://huggingface.co/papers/1904.08779). Note that this method is not optimized to run on TPU and should be run on
CPU as part of the preprocessing during training.
Args:
shape: The shape for which to compute masks. This should be of a tuple of size 2 where
the first element is the batch size and the second element is the length of the axis to span.
mask_prob: The percentage of the whole axis (between 0 and 1) which will be masked. The number of
independently generated mask spans of length `mask_length` is computed by
`mask_prob*shape[1]/mask_length`. Note that due to overlaps, `mask_prob` is an upper bound and the
actual percentage will be smaller.
mask_length: size of the mask
min_masks: minimum number of masked spans
attention_mask: A (right-padded) attention mask which independently shortens the feature axis of
each batch dimension.
"""
batch_size, sequence_length = shape
if mask_length < 1:
raise ValueError("`mask_length` has to be bigger than 0.")
if mask_length > sequence_length:
raise ValueError(
f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length}"
f" and `sequence_length`: {sequence_length}`"
)
# epsilon is used for probabilistic rounding
epsilon = np.random.rand(1).item()
def compute_num_masked_span(input_length):
"""Given input length, compute how many spans should be masked"""
num_masked_span = int(mask_prob * input_length / mask_length + epsilon)
num_masked_span = max(num_masked_span, min_masks)
# make sure num masked span <= sequence_length
if num_masked_span * mask_length > sequence_length:
num_masked_span = sequence_length // mask_length
# make sure num_masked span is also <= input_length - (mask_length - 1)
if input_length - (mask_length - 1) < num_masked_span:
num_masked_span = max(input_length - (mask_length - 1), 0)
return num_masked_span
# compute number of masked spans in batch
input_lengths = (
attention_mask.detach().sum(-1).tolist()
if attention_mask is not None
else [sequence_length for _ in range(batch_size)]
)
# SpecAugment mask to fill
spec_aug_mask = np.zeros((batch_size, sequence_length), dtype=bool)
spec_aug_mask_idxs = []
max_num_masked_span = compute_num_masked_span(sequence_length)
if max_num_masked_span == 0:
return spec_aug_mask
for input_length in input_lengths:
# compute num of masked spans for this input
num_masked_span = compute_num_masked_span(input_length)
# get random indices to mask
spec_aug_mask_idx = np.random.choice(
np.arange(input_length - (mask_length - 1)), num_masked_span, replace=False
)
# pick first sampled index that will serve as a dummy index to pad vector
# to ensure same dimension for all batches due to probabilistic rounding
# Picking first sample just pads those vectors twice.
if len(spec_aug_mask_idx) == 0:
# this case can only happen if `input_length` is strictly smaller then
# `sequence_length` in which case the last token has to be a padding
# token which we can use as a dummy mask id
dummy_mask_idx = sequence_length - 1
else:
dummy_mask_idx = spec_aug_mask_idx[0]
spec_aug_mask_idx = np.concatenate(
[spec_aug_mask_idx, np.ones(max_num_masked_span - num_masked_span, dtype=np.int32) * dummy_mask_idx]
)
spec_aug_mask_idxs.append(spec_aug_mask_idx)
spec_aug_mask_idxs = np.array(spec_aug_mask_idxs)
# expand masked indices to masked spans
spec_aug_mask_idxs = np.broadcast_to(
spec_aug_mask_idxs[:, :, None], (batch_size, max_num_masked_span, mask_length)
)
spec_aug_mask_idxs = spec_aug_mask_idxs.reshape(batch_size, max_num_masked_span * mask_length)
# add offset to the starting indexes so that indexes now create a span
offsets = np.arange(mask_length)[None, None, :]
offsets = np.broadcast_to(offsets, (batch_size, max_num_masked_span, mask_length)).reshape(
batch_size, max_num_masked_span * mask_length
)
spec_aug_mask_idxs = spec_aug_mask_idxs + offsets
# ensure that we cannot have indices larger than sequence_length
if spec_aug_mask_idxs.max() > sequence_length - 1:
spec_aug_mask_idxs[spec_aug_mask_idxs > sequence_length - 1] = sequence_length - 1
# scatter indices to mask
np.put_along_axis(spec_aug_mask, spec_aug_mask_idxs, 1, -1)
return spec_aug_mask
@auto_docstring
| SEWPreTrainedModel |
python | doocs__leetcode | solution/2100-2199/2186.Minimum Number of Steps to Make Two Strings Anagram II/Solution.py | {
"start": 0,
"end": 181
} | class ____:
def minSteps(self, s: str, t: str) -> int:
cnt = Counter(s)
for c in t:
cnt[c] -= 1
return sum(abs(v) for v in cnt.values())
| Solution |
python | matplotlib__matplotlib | lib/matplotlib/_mathtext.py | {
"start": 6643,
"end": 10859
} | class ____(abc.ABC):
"""
An abstract base class for a system of fonts to use for mathtext.
The class must be able to take symbol keys and font file names and
return the character metrics. It also delegates to a backend class
to do the actual drawing.
"""
def __init__(self, default_font_prop: FontProperties, load_glyph_flags: LoadFlags):
"""
Parameters
----------
default_font_prop : `~.font_manager.FontProperties`
The default non-math font, or the base font for Unicode (generic)
font rendering.
load_glyph_flags : `.ft2font.LoadFlags`
Flags passed to the glyph loader (e.g. ``FT_Load_Glyph`` and
``FT_Load_Char`` for FreeType-based fonts).
"""
self.default_font_prop = default_font_prop
self.load_glyph_flags = load_glyph_flags
def get_kern(self, font1: str, fontclass1: str, sym1: str, fontsize1: float,
font2: str, fontclass2: str, sym2: str, fontsize2: float,
dpi: float) -> float:
"""
Get the kerning distance for font between *sym1* and *sym2*.
See `~.Fonts.get_metrics` for a detailed description of the parameters.
"""
return 0.
def _get_font(self, font: str) -> FT2Font:
raise NotImplementedError
def _get_info(self, font: str, font_class: str, sym: str, fontsize: float,
dpi: float) -> FontInfo:
raise NotImplementedError
def get_metrics(self, font: str, font_class: str, sym: str, fontsize: float,
dpi: float) -> FontMetrics:
r"""
Parameters
----------
font : str
One of the TeX font names: "tt", "it", "rm", "cal", "sf", "bf",
"default", "regular", "bb", "frak", "scr". "default" and "regular"
are synonyms and use the non-math font.
font_class : str
One of the TeX font names (as for *font*), but **not** "bb",
"frak", or "scr". This is used to combine two font classes. The
only supported combination currently is ``get_metrics("frak", "bf",
...)``.
sym : str
A symbol in raw TeX form, e.g., "1", "x", or "\sigma".
fontsize : float
Font size in points.
dpi : float
Rendering dots-per-inch.
Returns
-------
FontMetrics
"""
info = self._get_info(font, font_class, sym, fontsize, dpi)
return info.metrics
def render_glyph(self, output: Output, ox: float, oy: float, font: str,
font_class: str, sym: str, fontsize: float, dpi: float) -> None:
"""
At position (*ox*, *oy*), draw the glyph specified by the remaining
parameters (see `get_metrics` for their detailed description).
"""
info = self._get_info(font, font_class, sym, fontsize, dpi)
output.glyphs.append((ox, oy, info))
def render_rect_filled(self, output: Output,
x1: float, y1: float, x2: float, y2: float) -> None:
"""
Draw a filled rectangle from (*x1*, *y1*) to (*x2*, *y2*).
"""
output.rects.append((x1, y1, x2, y2))
def get_xheight(self, font: str, fontsize: float, dpi: float) -> float:
"""
Get the xheight for the given *font* and *fontsize*.
"""
raise NotImplementedError()
def get_underline_thickness(self, font: str, fontsize: float, dpi: float) -> float:
"""
Get the line thickness that matches the given font. Used as a
base unit for drawing lines such as in a fraction or radical.
"""
raise NotImplementedError()
def get_sized_alternatives_for_symbol(self, fontname: str,
sym: str) -> list[tuple[str, str]]:
"""
Override if your font provides multiple sizes of the same
symbol. Should return a list of symbols matching *sym* in
various sizes. The expression renderer will select the most
appropriate size for a given situation from this list.
"""
return [(fontname, sym)]
| Fonts |
python | doocs__leetcode | solution/2400-2499/2406.Divide Intervals Into Minimum Number of Groups/Solution.py | {
"start": 0,
"end": 251
} | class ____:
def minGroups(self, intervals: List[List[int]]) -> int:
q = []
for left, right in sorted(intervals):
if q and q[0] < left:
heappop(q)
heappush(q, right)
return len(q)
| Solution |
python | walkccc__LeetCode | solutions/2583. Kth Largest Sum in a Binary Tree/2583.py | {
"start": 0,
"end": 470
} | class ____:
def kthLargestLevelSum(self, root: TreeNode | None, k: int) -> int:
levelSums = []
def dfs(root: TreeNode | None, level: int) -> None:
if not root:
return
if len(levelSums) == level:
levelSums.append(0)
levelSums[level] += root.val
dfs(root.left, level + 1)
dfs(root.right, level + 1)
dfs(root, 0)
if len(levelSums) < k:
return -1
return sorted(levelSums, reverse=True)[k - 1]
| Solution |
python | wandb__wandb | wandb/sdk/artifacts/_generated/artifact_file_urls.py | {
"start": 458,
"end": 618
} | class ____(GQLResult):
page_info: PageInfoFragment = Field(alias="pageInfo")
edges: List[ArtifactFileUrlsArtifactFilesEdges]
| ArtifactFileUrlsArtifactFiles |
python | django__django | tests/admin_views/admin.py | {
"start": 9738,
"end": 9827
} | class ____(admin.ModelAdmin):
inlines = (FooAccountAdmin, BarAccountAdmin)
| PersonaAdmin |
python | imageio__imageio | imageio/plugins/_freeimage.py | {
"start": 10632,
"end": 10888
} | class ____(object):
FIMD_COMMENTS = 0
FIMD_EXIF_MAIN = 1
FIMD_EXIF_EXIF = 2
FIMD_EXIF_GPS = 3
FIMD_EXIF_MAKERNOTE = 4
FIMD_EXIF_INTEROP = 5
FIMD_IPTC = 6
FIMD_XMP = 7
FIMD_GEOTIFF = 8
FIMD_ANIMATION = 9
| METADATA_MODELS |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/django/toystore/test_given_models.py | {
"start": 5588,
"end": 5766
} | class ____(TransactionTestCase):
def test_can_get_examples(self):
for _ in range(200):
check_can_generate_examples(from_model(Company))
| TestsNeedingRollback |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 1506053,
"end": 1506787
} | class ____(Transform):
"""
AggregateTransform schema wrapper.
Parameters
----------
aggregate : Sequence[dict, :class:`AggregatedFieldDef`]
Array of objects that define fields to aggregate.
groupby : Sequence[str, :class:`FieldName`]
The data fields to group by. If not specified, a single group containing all data
objects will be used.
"""
_schema = {"$ref": "#/definitions/AggregateTransform"}
def __init__(
self,
aggregate: Optional[Sequence[SchemaBase | Map]] = Undefined,
groupby: Optional[Sequence[str | SchemaBase]] = Undefined,
**kwds,
):
super().__init__(aggregate=aggregate, groupby=groupby, **kwds)
| AggregateTransform |
python | apache__airflow | dev/breeze/src/airflow_breeze/commands/developer_commands.py | {
"start": 6975,
"end": 42047
} | class ____(threading.Thread):
def __init__(self, max_time: int):
super().__init__(daemon=True)
self.max_time = max_time
def run(self):
get_console().print(f"[info]Setting timer to fail after {self.max_time} s.")
sleep(self.max_time)
get_console().print(f"[error]The command took longer than {self.max_time} s. Failing!")
os.killpg(os.getpgid(0), SIGTERM)
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# Make sure that whatever you add here as an option is also
# Added in the "main" command in breeze.py. The min command above
# Is used for a shorthand of shell and except the extra
# Args it should have the same parameters.
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
option_celery_broker = click.option(
"--celery-broker",
type=click.Choice(ALLOWED_CELERY_BROKERS, case_sensitive=False),
help="Specify the celery message broker",
default=DEFAULT_CELERY_BROKER,
show_default=True,
)
option_celery_flower = click.option("--celery-flower", help="Start celery flower", is_flag=True)
option_executor_shell = click.option(
"--executor",
type=click.Choice(ALLOWED_EXECUTORS, case_sensitive=False),
help="Specify the executor to use with shell command.",
default=DEFAULT_ALLOWED_EXECUTOR,
show_default=True,
)
option_force_build = click.option(
"--force-build", help="Force image build no matter if it is determined as needed.", is_flag=True
)
option_include_mypy_volume = click.option(
"--include-mypy-volume",
help="Whether to include mounting of the mypy volume (useful for debugging mypy).",
is_flag=True,
envvar="INCLUDE_MYPY_VOLUME",
)
option_restart = click.option(
"--restart",
"--remove-orphans",
help="Restart all containers before entering shell (also removes orphan containers).",
is_flag=True,
envvar="RESTART",
)
option_skip_environment_initialization = click.option(
"--skip-environment-initialization",
help="Skip running breeze entrypoint initialization - no user output, no db checks.",
is_flag=True,
envvar="SKIP_ENVIRONMENT_INITIALIZATION",
)
option_skip_image_upgrade_check = click.option(
"--skip-image-upgrade-check",
help="Skip checking if the CI image is up to date.",
is_flag=True,
envvar="SKIP_IMAGE_UPGRADE_CHECK",
)
option_warn_image_upgrade_needed = click.option(
"--warn-image-upgrade-needed",
help="Warn when image upgrade is needed even if --skip-upgrade-check is used.",
is_flag=True,
envvar="WARN_IMAGE_UPGRADE_NEEDED",
)
option_install_airflow_python_client = click.option(
"--install-airflow-python-client",
is_flag=True,
help="Install airflow python client packages (--distribution-format determines type) from 'dist' folder "
"when entering breeze.",
envvar="INSTALL_AIRFLOW_PYTHON_CLIENT",
)
option_start_api_server_with_examples = click.option(
"--start-api-server-with-examples",
is_flag=True,
help="Start minimal airflow api-server with examples (for testing purposes) when entering breeze.",
envvar="START_API_SERVER_WITH_EXAMPLES",
)
option_load_example_dags = click.option(
"-e",
"--load-example-dags",
help="Enable configuration to load example DAGs when starting Airflow.",
is_flag=True,
envvar="LOAD_EXAMPLES",
)
option_load_default_connections = click.option(
"-c",
"--load-default-connections",
help="Enable configuration to load default connections when starting Airflow.",
is_flag=True,
envvar="LOAD_DEFAULT_CONNECTIONS",
)
@main.command()
@click.argument("extra-args", nargs=-1, type=click.UNPROCESSED)
@click.option("--quiet", is_flag=True, envvar="QUIET", help="Suppress initialization output when starting.")
@option_tty
@click.option(
"--verbose-commands",
help="Show details of commands executed.",
is_flag=True,
envvar="VERBOSE_COMMANDS",
)
@option_install_airflow_python_client
@option_start_api_server_with_examples
@option_airflow_constraints_location
@option_airflow_constraints_mode_ci
@option_airflow_constraints_reference
@option_airflow_extras
@option_answer
@option_auth_manager
@option_backend
@option_builder
@option_celery_broker
@option_celery_flower
@option_clean_airflow_installation
@option_db_reset
@option_docker_host
@option_downgrade_sqlalchemy
@option_downgrade_pendulum
@option_dry_run
@option_executor_shell
@option_excluded_providers
@option_force_build
@option_force_lowest_dependencies
@option_forward_credentials
@option_github_repository
@option_include_mypy_volume
@option_install_airflow_with_constraints_default_true
@option_install_selected_providers
@option_installation_distribution_format
@option_load_example_dags
@option_load_default_connections
@option_all_integration
@option_keep_env_variables
@option_max_time
@option_mount_sources
@option_mysql_version
@option_no_db_cleanup
@option_platform_single
@option_postgres_version
@option_project_name
@option_providers_constraints_location
@option_providers_constraints_mode_ci
@option_providers_constraints_reference
@option_providers_skip_constraints
@option_python
@option_restart
@option_run_db_tests_only
@option_skip_db_tests
@option_skip_environment_initialization
@option_skip_image_upgrade_check
@option_test_type
@option_warn_image_upgrade_needed
@option_standalone_dag_processor
@option_upgrade_boto
@option_upgrade_sqlalchemy
@option_use_airflow_version
@option_allow_pre_releases
@option_use_distributions_from_dist
@option_use_uv
@option_uv_http_timeout
@option_verbose
def shell(
airflow_constraints_location: str,
airflow_constraints_mode: str,
airflow_constraints_reference: str,
airflow_extras: str,
auth_manager: str,
backend: str,
builder: str,
celery_broker: str,
celery_flower: bool,
clean_airflow_installation: bool,
db_reset: bool,
downgrade_sqlalchemy: bool,
downgrade_pendulum: bool,
docker_host: str | None,
executor: str,
extra_args: tuple,
excluded_providers: str,
force_build: bool,
force_lowest_dependencies: bool,
forward_credentials: bool,
github_repository: str,
include_mypy_volume: bool,
install_selected_providers: str,
install_airflow_with_constraints: bool,
install_airflow_python_client: bool,
integration: tuple[str, ...],
keep_env_variables: bool,
load_example_dags: bool,
load_default_connections: bool,
max_time: int | None,
mount_sources: str,
mysql_version: str,
no_db_cleanup: bool,
distribution_format: str,
platform: str | None,
postgres_version: str,
project_name: str,
providers_constraints_location: str,
providers_constraints_mode: str,
providers_constraints_reference: str,
providers_skip_constraints: bool,
python: str,
quiet: bool,
restart: bool,
run_db_tests_only: bool,
skip_environment_initialization: bool,
skip_db_tests: bool,
skip_image_upgrade_check: bool,
standalone_dag_processor: bool,
start_api_server_with_examples: bool,
test_type: str | None,
tty: str,
upgrade_boto: bool,
upgrade_sqlalchemy: bool,
use_airflow_version: str | None,
allow_pre_releases: bool,
use_distributions_from_dist: bool,
use_uv: bool,
uv_http_timeout: int,
verbose_commands: bool,
warn_image_upgrade_needed: bool,
):
"""Enter breeze environment. this is the default command use when no other is selected."""
if get_verbose() or get_dry_run() and not quiet:
get_console().print("\n[success]Welcome to breeze.py[/]\n")
get_console().print(f"\n[success]Root of Airflow Sources = {AIRFLOW_ROOT_PATH}[/]\n")
if max_time:
TimerThread(max_time=max_time).start()
set_forced_answer("yes")
airflow_constraints_reference = _determine_constraint_branch_used(
airflow_constraints_reference, use_airflow_version
)
platform = get_normalized_platform(platform)
shell_params = ShellParams(
airflow_constraints_location=airflow_constraints_location,
airflow_constraints_mode=airflow_constraints_mode,
airflow_constraints_reference=airflow_constraints_reference,
airflow_extras=airflow_extras,
allow_pre_releases=allow_pre_releases,
auth_manager=auth_manager,
backend=backend,
builder=builder,
celery_broker=celery_broker,
celery_flower=celery_flower,
clean_airflow_installation=clean_airflow_installation,
db_reset=db_reset,
downgrade_sqlalchemy=downgrade_sqlalchemy,
downgrade_pendulum=downgrade_pendulum,
docker_host=docker_host,
excluded_providers=excluded_providers,
executor=executor,
extra_args=extra_args if not max_time else ["exit"],
force_build=force_build,
force_lowest_dependencies=force_lowest_dependencies,
forward_credentials=forward_credentials,
github_repository=github_repository,
include_mypy_volume=include_mypy_volume,
install_airflow_with_constraints=install_airflow_with_constraints,
install_airflow_python_client=install_airflow_python_client,
install_selected_providers=install_selected_providers,
integration=integration,
keep_env_variables=keep_env_variables,
load_example_dags=load_example_dags,
load_default_connections=load_default_connections,
mount_sources=mount_sources,
mysql_version=mysql_version,
no_db_cleanup=no_db_cleanup,
distribution_format=distribution_format,
platform=platform,
postgres_version=postgres_version,
project_name=project_name,
providers_constraints_location=providers_constraints_location,
providers_constraints_mode=providers_constraints_mode,
providers_constraints_reference=providers_constraints_reference,
providers_skip_constraints=providers_skip_constraints,
python=python,
quiet=quiet,
restart=restart,
run_db_tests_only=run_db_tests_only,
skip_db_tests=skip_db_tests,
skip_image_upgrade_check=skip_image_upgrade_check,
skip_environment_initialization=skip_environment_initialization,
standalone_dag_processor=standalone_dag_processor,
start_api_server_with_examples=start_api_server_with_examples,
test_type=test_type,
tty=tty,
upgrade_boto=upgrade_boto,
upgrade_sqlalchemy=upgrade_sqlalchemy,
use_airflow_version=use_airflow_version,
use_distributions_from_dist=use_distributions_from_dist,
use_uv=use_uv,
uv_http_timeout=uv_http_timeout,
verbose_commands=verbose_commands,
warn_image_upgrade_needed=warn_image_upgrade_needed,
)
perform_environment_checks()
rebuild_or_pull_ci_image_if_needed(command_params=shell_params)
result = enter_shell(shell_params=shell_params)
fix_ownership_using_docker()
sys.exit(result.returncode)
option_executor_start_airflow = click.option(
"--executor",
type=click.Choice(START_AIRFLOW_ALLOWED_EXECUTORS, case_sensitive=False),
help="Specify the executor to use with start-airflow (defaults to LocalExecutor "
"or CeleryExecutor depending on the integration used).",
)
@main.command(name="start-airflow")
@click.option(
"--skip-assets-compilation",
help="Skips compilation of assets when starting airflow even if the content of www changed "
"(mutually exclusive with --dev-mode).",
is_flag=True,
)
@click.option(
"--dev-mode",
help="Starts api-server in dev mode (assets are always recompiled in this case when starting) "
"(mutually exclusive with --skip-assets-compilation).",
is_flag=True,
)
@click.option(
"--create-all-roles",
help="Creates all user roles for testing with FabAuthManager (viewer, user, op, admin). "
"SimpleAuthManager always has all roles available.",
is_flag=True,
)
@click.argument("extra-args", nargs=-1, type=click.UNPROCESSED)
@option_airflow_constraints_location
@option_airflow_constraints_mode_ci
@option_airflow_constraints_reference
@option_airflow_extras
@option_auth_manager
@option_answer
@option_backend
@option_builder
@option_clean_airflow_installation
@option_celery_broker
@option_celery_flower
@option_db_reset
@option_debug_components
@option_debugger
@option_docker_host
@option_dry_run
@option_executor_start_airflow
@option_force_build
@option_forward_credentials
@option_github_repository
@option_installation_distribution_format
@option_install_selected_providers
@option_install_airflow_with_constraints_default_true
@option_all_integration
@option_load_default_connections
@option_load_example_dags
@option_mount_sources
@option_mysql_version
@option_platform_single
@option_postgres_version
@option_project_name
@option_providers_constraints_location
@option_providers_constraints_mode_ci
@option_providers_constraints_reference
@option_providers_skip_constraints
@option_python
@option_restart
@option_standalone_dag_processor
@option_use_mprocs
@option_use_uv
@option_uv_http_timeout
@option_use_airflow_version
@option_allow_pre_releases
@option_use_distributions_from_dist
@option_verbose
def start_airflow(
airflow_constraints_mode: str,
airflow_constraints_location: str,
airflow_constraints_reference: str,
airflow_extras: str,
install_airflow_with_constraints: bool,
allow_pre_releases: bool,
auth_manager: str,
backend: str,
builder: str,
celery_broker: str,
celery_flower: bool,
clean_airflow_installation: bool,
db_reset: bool,
debug_components: tuple[str, ...],
debugger: str,
dev_mode: bool,
create_all_roles: bool,
docker_host: str | None,
executor: str | None,
extra_args: tuple,
force_build: bool,
forward_credentials: bool,
github_repository: str,
integration: tuple[str, ...],
install_selected_providers: str,
load_default_connections: bool,
load_example_dags: bool,
mount_sources: str,
mysql_version: str,
distribution_format: str,
platform: str | None,
postgres_version: str,
project_name: str,
providers_constraints_location: str,
providers_constraints_mode: str,
providers_constraints_reference: str,
providers_skip_constraints: bool,
python: str,
restart: bool,
skip_assets_compilation: bool,
standalone_dag_processor: bool,
use_mprocs: bool,
use_airflow_version: str | None,
use_distributions_from_dist: bool,
use_uv: bool,
uv_http_timeout: int,
):
"""
Enter breeze environment and starts all Airflow components in the tmux or mprocs session.
Compile assets if contents of www directory changed.
"""
if dev_mode and skip_assets_compilation:
get_console().print(
"[warning]You cannot skip asset compilation in dev mode! Assets will be compiled!"
)
skip_assets_compilation = True
# Automatically enable file polling for hot reloading under WSL
if dev_mode and is_wsl():
os.environ["CHOKIDAR_USEPOLLING"] = "true"
get_console().print(
"[info]Detected WSL environment. Automatically enabled CHOKIDAR_USEPOLLING for hot reloading."
)
if use_airflow_version is None and not skip_assets_compilation:
# Now with the /ui project, lets only do a static build of /www and focus on the /ui
run_compile_ui_assets(dev=dev_mode, run_in_background=True, force_clean=False)
airflow_constraints_reference = _determine_constraint_branch_used(
airflow_constraints_reference, use_airflow_version
)
if not executor:
if CELERY_INTEGRATION in integration:
# Default to a celery executor if that's the integration being used
executor = ALLOWED_CELERY_EXECUTORS[0]
else:
# Otherwise default to LocalExecutor
executor = START_AIRFLOW_DEFAULT_ALLOWED_EXECUTOR
get_console().print(f"[info]Airflow will be using: {executor} to execute the tasks.")
platform = get_normalized_platform(platform)
shell_params = ShellParams(
airflow_constraints_location=airflow_constraints_location,
airflow_constraints_mode=airflow_constraints_mode,
airflow_constraints_reference=airflow_constraints_reference,
airflow_extras=airflow_extras,
allow_pre_releases=allow_pre_releases,
auth_manager=auth_manager,
backend=backend,
builder=builder,
celery_broker=celery_broker,
celery_flower=celery_flower,
clean_airflow_installation=clean_airflow_installation,
debug_components=debug_components,
debugger=debugger,
db_reset=db_reset,
dev_mode=dev_mode,
create_all_roles=create_all_roles,
docker_host=docker_host,
executor=executor,
extra_args=extra_args,
force_build=force_build,
forward_credentials=forward_credentials,
github_repository=github_repository,
integration=integration,
install_selected_providers=install_selected_providers,
install_airflow_with_constraints=install_airflow_with_constraints,
load_default_connections=load_default_connections,
load_example_dags=load_example_dags,
mount_sources=mount_sources,
mysql_version=mysql_version,
distribution_format=distribution_format,
platform=platform,
postgres_version=postgres_version,
project_name=project_name,
providers_constraints_location=providers_constraints_location,
providers_constraints_mode=providers_constraints_mode,
providers_constraints_reference=providers_constraints_reference,
providers_skip_constraints=providers_skip_constraints,
python=python,
restart=restart,
standalone_dag_processor=standalone_dag_processor,
start_airflow=True,
use_airflow_version=use_airflow_version,
use_distributions_from_dist=use_distributions_from_dist,
use_mprocs=use_mprocs,
use_uv=use_uv,
uv_http_timeout=uv_http_timeout,
)
rebuild_or_pull_ci_image_if_needed(command_params=shell_params)
result = enter_shell(shell_params=shell_params)
fix_ownership_using_docker()
if CELERY_INTEGRATION in integration and executor not in ALLOWED_CELERY_EXECUTORS:
get_console().print(
"[warning]A non-Celery executor was used with start-airflow in combination with the Celery "
"integration, this will lead to some processes failing to start (e.g. celery worker)\n"
)
sys.exit(result.returncode)
@main.command(name="build-docs")
@option_builder
@click.option(
"--clean-build",
is_flag=True,
help="Cleans the build directory before building the documentation and removes all inventory "
"cache (including external inventories).",
)
@click.option(
"--refresh-airflow-inventories",
is_flag=True,
help="When set, only airflow package inventories will be refreshed, regardless "
"if they are already downloaded. With `--clean-build` - everything is cleaned..",
)
@click.option("-d", "--docs-only", help="Only build documentation.", is_flag=True)
@click.option(
"--include-commits", help="Include commits in the documentation.", is_flag=True, envvar="INCLUDE_COMMITS"
)
@option_dry_run
@option_github_repository
@option_include_not_ready_providers
@option_include_removed_providers
@click.option(
"--one-pass-only",
help="Builds documentation in one pass only. This is useful for debugging sphinx errors.",
is_flag=True,
)
@click.option(
"--package-filter",
help="Filter(s) to use more than one can be specified. You can use glob pattern matching the "
"full package name, for example `apache-airflow-providers-*`. Useful when you want to select"
"several similarly named packages together.",
type=str,
multiple=True,
)
@click.option(
"--distributions-list",
envvar="DISTRIBUTIONS_LIST",
type=str,
help="Optional, contains space separated list of package ids that are processed for documentation "
"building, and document publishing. It is an easier alternative to adding individual packages as"
" arguments to every command. This overrides the packages passed as arguments.",
)
@click.option("-s", "--spellcheck-only", help="Only run spell checking.", is_flag=True)
@option_verbose
@option_answer
@argument_doc_packages
def build_docs(
builder: str,
clean_build: bool,
refresh_airflow_inventories: bool,
docs_only: bool,
github_repository: str,
include_not_ready_providers: bool,
include_removed_providers: bool,
include_commits: bool,
one_pass_only: bool,
package_filter: tuple[str, ...],
distributions_list: str,
spellcheck_only: bool,
doc_packages: tuple[str, ...],
):
"""
Build documents.
"""
perform_environment_checks()
fix_ownership_using_docker()
cleanup_python_generated_files()
build_params = BuildCiParams(
github_repository=github_repository,
python=DEFAULT_PYTHON_MAJOR_MINOR_VERSION,
builder=builder,
)
rebuild_or_pull_ci_image_if_needed(command_params=build_params)
if clean_build:
directories_to_clean = ["_build", "_doctrees", "_inventory_cache", "apis"]
else:
directories_to_clean = ["apis"]
generated_path = AIRFLOW_ROOT_PATH / "generated"
for dir_name in directories_to_clean:
get_console().print("Removing all generated dirs.")
for directory in generated_path.rglob(dir_name):
get_console().print(f"[info]Removing {directory}")
shutil.rmtree(directory, ignore_errors=True)
if refresh_airflow_inventories and not clean_build:
get_console().print("Removing airflow inventories.")
package_globs = ["helm-chart", "docker-stack", "apache-airflow*"]
for package_glob in package_globs:
for directory in (generated_path / "_inventory_cache").rglob(package_glob):
get_console().print(f"[info]Removing {directory}")
shutil.rmtree(directory, ignore_errors=True)
docs_list_as_tuple: tuple[str, ...] = ()
if distributions_list and len(distributions_list):
get_console().print(
f"\n[info]Populating provider list from DISTRIBUTIONS_LIST env as {distributions_list}"
)
# Override doc_packages with values from DISTRIBUTIONS_LIST
docs_list_as_tuple = tuple(distributions_list.split(" "))
if doc_packages and docs_list_as_tuple:
get_console().print(
f"[warning]Both package arguments and --distributions-list / DISTRIBUTIONS_LIST passed. "
f"Overriding to {docs_list_as_tuple}"
)
doc_packages = docs_list_as_tuple or doc_packages
doc_builder = DocBuildParams(
package_filter=package_filter,
docs_only=docs_only,
spellcheck_only=spellcheck_only,
one_pass_only=one_pass_only,
include_commits=include_commits,
short_doc_packages=expand_all_provider_distributions(
short_doc_packages=doc_packages,
include_removed=include_removed_providers,
include_not_ready=include_not_ready_providers,
),
)
cmd = "/opt/airflow/scripts/in_container/run_docs_build.sh " + " ".join(
[shlex.quote(arg) for arg in doc_builder.args_doc_builder]
)
shell_params = ShellParams(
github_repository=github_repository,
python=DEFAULT_PYTHON_MAJOR_MINOR_VERSION,
mount_sources=MOUNT_ALL,
)
result = execute_command_in_shell(shell_params, project_name="docs", command=cmd)
fix_ownership_using_docker()
if result.returncode == 0:
get_console().print(
"Run ./docs/start_doc_server.sh for a lighter resource option and view "
"the built docs at http://localhost:8000"
)
sys.exit(result.returncode)
@main.command(name="down", help="Stop running breeze environment.")
@click.option(
"-p",
"--preserve-volumes",
help="Skip removing database volumes when stopping Breeze.",
is_flag=True,
)
@click.option(
"-c",
"--cleanup-mypy-cache",
help="Additionally cleanup MyPy cache.",
is_flag=True,
)
@click.option(
"-b",
"--cleanup-build-cache",
help="Additionally cleanup Build (pip/uv) cache.",
is_flag=True,
)
@option_verbose
@option_dry_run
def down(preserve_volumes: bool, cleanup_mypy_cache: bool, cleanup_build_cache: bool):
perform_environment_checks()
shell_params = ShellParams(backend="all", include_mypy_volume=cleanup_mypy_cache)
bring_compose_project_down(preserve_volumes=preserve_volumes, shell_params=shell_params)
if cleanup_mypy_cache:
command_to_execute = ["docker", "volume", "rm", "--force", "mypy-cache-volume"]
run_command(command_to_execute)
if cleanup_build_cache:
command_to_execute = ["docker", "volume", "rm", "--force", "airflow-cache-volume"]
run_command(command_to_execute)
@main.command(name="exec", help="Joins the interactive shell of running airflow container.")
@option_verbose
@option_dry_run
@click.argument("exec_args", nargs=-1, type=click.UNPROCESSED)
def exec(exec_args: tuple):
perform_environment_checks()
container_running = find_airflow_container()
if container_running:
cmd_to_run = [
"docker",
"exec",
"-it",
container_running,
"/opt/airflow/scripts/docker/entrypoint_exec.sh",
]
if exec_args:
cmd_to_run.extend(exec_args)
process = run_command(
cmd_to_run,
check=False,
no_output_dump_on_exception=False,
text=True,
)
if not process:
sys.exit(1)
sys.exit(process.returncode)
else:
get_console().print("[error]No airflow containers are running[/]")
sys.exit(1)
def stop_exec_on_error(returncode: int):
get_console().print("\n[error]ERROR in finding the airflow docker-compose process id[/]\n")
sys.exit(returncode)
def find_airflow_container() -> str | None:
shell_params = ShellParams()
check_docker_resources(shell_params.airflow_image_name)
shell_params.print_badge_info()
cmd = [
"docker",
"compose",
"--project-name",
shell_params.project_name,
"ps",
"--all",
"--filter",
"status=running",
"airflow",
]
docker_compose_ps_command = run_command(
cmd, text=True, capture_output=True, check=False, env=shell_params.env_variables_for_docker_commands
)
if get_dry_run():
return "CONTAINER_ID"
if docker_compose_ps_command.returncode != 0:
if get_verbose():
get_console().print(docker_compose_ps_command.stdout)
get_console().print(docker_compose_ps_command.stderr)
stop_exec_on_error(docker_compose_ps_command.returncode)
return None
output = docker_compose_ps_command.stdout
container_info = output.strip().splitlines()
if container_info:
container_running = container_info[-1].split(" ")[0]
if container_running.startswith("-"):
# On docker-compose v1 we get '--------' as output here
stop_exec_on_error(docker_compose_ps_command.returncode)
return container_running
stop_exec_on_error(1)
return None
@main.command(
name="generate-migration-file", help="Autogenerate the alembic migration file for the ORM changes."
)
@option_builder
@option_github_repository
@click.option(
"-m",
"--message",
help="Message to use for the migration",
default="Empty message",
show_default=True,
)
def autogenerate(
builder: str,
github_repository: str,
message: str,
):
"""Autogenerate the alembic migration file."""
perform_environment_checks()
fix_ownership_using_docker()
build_params = BuildCiParams(
github_repository=github_repository, python=DEFAULT_PYTHON_MAJOR_MINOR_VERSION, builder=builder
)
rebuild_or_pull_ci_image_if_needed(command_params=build_params)
shell_params = ShellParams(
github_repository=github_repository,
python=DEFAULT_PYTHON_MAJOR_MINOR_VERSION,
)
cmd = f"/opt/airflow/scripts/in_container/run_generate_migration.sh '{message}'"
execute_command_in_shell(shell_params, project_name="db", command=cmd)
fix_ownership_using_docker()
@main.command(name="doctor", help="Auto-healing of breeze")
@option_answer
@option_verbose
@option_dry_run
@click.pass_context
def doctor(ctx):
shell_params = ShellParams()
check_docker_resources(shell_params.airflow_image_name)
shell_params.print_badge_info()
perform_environment_checks()
fix_ownership_using_docker(quiet=False)
given_answer = user_confirm("Are you sure with the removal of temporary Python files and Python cache?")
if not get_dry_run() and given_answer == Answer.YES:
cleanup_python_generated_files()
shell_params = ShellParams(backend="all", include_mypy_volume=True)
bring_compose_project_down(preserve_volumes=False, shell_params=shell_params)
given_answer = user_confirm("Are you sure with the removal of mypy cache and build cache dir?")
if given_answer == Answer.YES:
get_console().print("\n[info]Cleaning mypy cache...\n")
command_to_execute = ["docker", "volume", "rm", "--force", "mypy-cache-volume"]
run_command(command_to_execute)
get_console().print("\n[info]Cleaning build cache...\n")
command_to_execute = ["docker", "volume", "rm", "--force", "airflow-cache-volume"]
run_command(command_to_execute)
get_console().print("\n[info]Deleting .build cache dir...\n")
dirpath = Path(".build")
if not get_dry_run() and dirpath.exists() and dirpath.is_dir():
shutil.rmtree(dirpath)
given_answer = user_confirm(
"Proceed with breeze cleanup to remove all docker volumes, images and networks?"
)
if given_answer == Answer.YES:
get_console().print("\n[info]Executing breeze cleanup...\n")
ctx.forward(cleanup)
elif given_answer == Answer.QUIT:
sys.exit(0)
@main.command(
name="run",
help="Run a command in the Breeze environment without entering the interactive shell.",
context_settings={"ignore_unknown_options": True, "allow_extra_args": True},
)
@click.argument("command", required=True)
@click.argument("command_args", nargs=-1, type=click.UNPROCESSED)
@option_answer
@option_backend
@option_builder
@option_docker_host
@option_dry_run
@option_force_build
@option_forward_credentials
@option_github_repository
@option_mysql_version
@option_platform_single
@option_postgres_version
@option_project_name
@option_python
@option_skip_image_upgrade_check
@option_tty
@option_use_uv
@option_uv_http_timeout
@option_verbose
def run(
command: str,
command_args: tuple,
backend: str,
builder: str,
docker_host: str | None,
force_build: bool,
forward_credentials: bool,
github_repository: str,
mysql_version: str,
platform: str | None,
postgres_version: str,
project_name: str,
python: str,
skip_image_upgrade_check: bool,
tty: str,
use_uv: bool,
uv_http_timeout: int,
):
"""
Run a command in the Breeze environment without entering the interactive shell.
This is useful for automated testing, CI workflows, and one-off command execution.
The command will be executed in a fresh container that is automatically cleaned up.
Each run uses a unique project name to avoid conflicts with other instances.
Examples:
# Run a specific test
breeze run pytest providers/google/tests/unit/google/cloud/operators/test_dataflow.py -v
# Check version compatibility
breeze run python -c "from airflow.providers.google.version_compat import AIRFLOW_V_3_0_PLUS; print(AIRFLOW_V_3_0_PLUS)"
# Run bash commands
breeze run bash -c "cd /opt/airflow && python -m pytest providers/google/tests/"
# Run with different Python version
breeze run --python 3.11 pytest providers/standard/tests/unit/operators/test_bash.py
# Run with PostgreSQL backend
breeze run --backend postgres pytest providers/postgres/tests/
"""
import uuid
from airflow_breeze.commands.ci_image_commands import rebuild_or_pull_ci_image_if_needed
from airflow_breeze.params.shell_params import ShellParams
from airflow_breeze.utils.ci_group import ci_group
from airflow_breeze.utils.docker_command_utils import execute_command_in_shell
from airflow_breeze.utils.platforms import get_normalized_platform
# Generate a unique project name to avoid conflicts with other running instances
unique_project_name = f"{project_name}-run-{uuid.uuid4().hex[:8]}"
# Build the full command string with proper escaping
import shlex
if command_args:
# Use shlex.join to properly escape arguments
full_command = f"{command} {shlex.join(command_args)}"
else:
full_command = command
platform = get_normalized_platform(platform)
# Create shell parameters optimized for non-interactive command execution
shell_params = ShellParams(
backend=backend,
builder=builder,
docker_host=docker_host,
force_build=force_build,
forward_credentials=forward_credentials,
github_repository=github_repository,
mysql_version=mysql_version,
platform=platform,
postgres_version=postgres_version,
project_name=unique_project_name,
python=python,
skip_image_upgrade_check=skip_image_upgrade_check,
use_uv=use_uv,
uv_http_timeout=uv_http_timeout,
# Optimizations for non-interactive execution
quiet=True,
skip_environment_initialization=True,
tty=tty,
# Set extra_args to empty tuple since we'll pass the command directly
extra_args=(),
)
if get_verbose():
get_console().print(f"[info]Running command in Breeze: {full_command}[/]")
get_console().print(f"[info]Using project name: {unique_project_name}[/]")
# Build or pull the CI image if needed
rebuild_or_pull_ci_image_if_needed(command_params=shell_params)
# Execute the command in the shell
with ci_group(f"Running command: {command}"):
result = execute_command_in_shell(
shell_params=shell_params,
project_name=unique_project_name,
command=full_command,
# Always preserve the backend specified by user (or resolved from default)
preserve_backend=True,
)
# Clean up ownership
from airflow_breeze.utils.docker_command_utils import fix_ownership_using_docker
fix_ownership_using_docker()
# Exit with the same code as the command
sys.exit(result.returncode)
| TimerThread |
python | viewflow__viewflow | viewflow/forms/renderers.py | {
"start": 18510,
"end": 18989
} | class ____(FormLayout):
"""Customizable form layout."""
def __init__(self, *elements, **kwargs):
self.children = _convert_to_children(elements)
super().__init__(**kwargs)
def append_visible_fields(self, form: forms.Form, root: ElementTree.Element):
wrapper = ElementTree.SubElement(
root, "div", {"class": "vf-form__visiblefields mdc-layout-grid__inner"}
)
Column(*self.children).append(self, form, wrapper)
| Layout |
python | huggingface__transformers | src/transformers/models/smolvlm/processing_smolvlm.py | {
"start": 4218,
"end": 19972
} | class ____(ProcessorMixin):
r"""
Constructs a SmolVLM processor which wraps a LLama tokenizer and SmolVLM image processor into a single processor.
[`SmolVLMProcessor`] offers all the functionalities of [`SmolVLMImageProcessor`] and [`SmolVLMTokenizerFast`]. See
the docstring of [`~IdeficsProcessor.__call__`] and [`~IdeficsProcessor.decode`] for more information.
Args:
image_processor (`SmolVLMImageProcessor`):
An instance of [`SmolVLMImageProcessor`]. The image processor is a required input.
tokenizer (`PreTrainedTokenizerBase`):
An instance of [`PreTrainedTokenizerBase`]. This should correspond with the model's text model. The tokenizer is a required input.
video_processor (`SmolVLMImageProcessor`):
n instance of [`SmolVLMImageProcessor`]. The video processor is a required input.
image_seq_len (`int`, *optional*, defaults to 169):
The length of the image sequence i.e. the number of <image> tokens per image in the input.
This parameter is used to build the string from the input prompt and image tokens and should match the
value the model used. It is computed as: image_seq_len = int(((image_size // patch_size) ** 2) / (scale_factor**2))
chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages
in a chat into a tokenizable string.
"""
def __init__(
self,
image_processor,
tokenizer,
video_processor,
image_seq_len: int = 169,
chat_template: Optional[str] = None,
**kwargs,
):
self.fake_image_token = getattr(tokenizer, "fake_image_token", "<fake_token_around_image>")
self.image_token = getattr(tokenizer, "image_token", "<image>")
self.image_token_id = tokenizer.convert_tokens_to_ids(self.image_token)
self.end_of_utterance_token = getattr(tokenizer, "end_of_utterance_token", "<end_of_utterance>")
self.global_image_token = getattr(tokenizer, "global_image_token", "<global-img>")
self.image_seq_len = image_seq_len
self.video_token = getattr(tokenizer, "video_token", "<video>")
if not num2words:
raise ImportError(
"Package `num2words` is required to run SmolVLM processor. Install it with `pip install num2words`."
)
super().__init__(image_processor, tokenizer, video_processor, chat_template=chat_template, **kwargs)
def expand_text_with_image_tokens(self, text, image_rows, image_cols):
prompt_strings = []
for sample, sample_rows, sample_cols in zip(text, image_rows, image_cols):
# Replace the image token with fake tokens around the expanded image token sequence of length `image_seq_len`
image_prompt_strings = []
for n_rows, n_cols in zip(sample_rows, sample_cols):
image_prompt_string = get_image_prompt_string(
n_rows,
n_cols,
self.image_seq_len,
image_token=self.image_token,
fake_token_around_image=self.fake_image_token,
global_image_token=self.global_image_token,
)
image_prompt_strings.append(image_prompt_string)
split_sample = sample.split(self.image_token)
if len(split_sample) == 0:
raise ValueError("The image token should be present in the text.")
# Place in the image prompt strings where the image tokens are
sample = split_sample[0]
for i, image_prompt_string in enumerate(image_prompt_strings):
sample += image_prompt_string + split_sample[i + 1]
prompt_strings.append(sample)
return prompt_strings
def expand_text_with_video_tokens(self, text, video_inputs):
num_frames = video_inputs["pixel_values"].shape[1]
video_metadata = iter(video_inputs["video_metadata"])
prompt_strings = []
for sample in text:
while self.video_token in sample:
metadata = next(video_metadata)
if metadata.fps is None:
logger.warning_once(
"SmolVLM requires frame timestamps to construct prompts, but the `fps` of the input video could not be inferred. "
"Probably `video_metadata` was missing from inputs and you passed pre-sampled frames. "
"Defaulting to `fps=24`. Please provide `video_metadata` for more accurate results."
)
metadata.fps = 24 # Set the default fps to 24 for BC, otherwise `timestamps` can't be inferred
timestamps = [(int(second // 60), int(second % 60)) for second in metadata.timestamps]
duration = int(metadata.duration) if metadata.duration is not None else int(metadata.timestamps[-1])
duration_td = timedelta(seconds=int(duration))
image_prompt_strings = DEFAULT_VIDEO_INTRO.format(
frame_count=num2words(num_frames), video_duration=str(duration_td)
)
for timestamp in timestamps:
image_prompt_string = _prompt_single_image(
self.image_seq_len,
image_token=self.image_token,
fake_token_around_image=self.fake_image_token,
global_image_token=self.global_image_token,
)
timestamp = f"{timestamp[0]:02d}:{timestamp[1]:02d}"
image_prompt_string = FRAME_TIMESTAMP_MESSAGE.format(timestamp=timestamp) + image_prompt_string
image_prompt_strings += image_prompt_string
image_prompt_strings += DEFAULT_MEDIA_OUTTRO
sample = sample.replace(self.video_token, image_prompt_strings, 1)
prompt_strings.append(sample)
return prompt_strings
def __call__(
self,
images: Union[ImageInput, list[ImageInput], list[list[ImageInput]]] = None,
text: Union[TextInput, "PreTokenizedInput", list[TextInput], list["PreTokenizedInput"]] = None,
videos: Optional[VideoInput] = None,
**kwargs: Unpack[SmolVLMProcessorKwargs],
) -> BatchEncoding:
"""
Processes the input prompts and returns a BatchEncoding.
Example:
```python
>>> import requests
>>> from transformers import SmolVLMProcessor
>>> from transformers.image_utils import load_image
>>> processor = SmolVLMProcessor.from_pretrained("HuggingFaceM4/SmolVLM2-256M-Video-Instruct")
>>> processor.image_processor.do_image_splitting = False # Force as False to simplify the example
>>> url1 = "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
>>> url2 = "https://cdn.britannica.com/59/94459-050-DBA42467/Skyline-Chicago.jpg"
>>> image1, image2 = load_image(url1), load_image(url2)
>>> images = [[image1], [image2]]
>>> text = [
... "<image>In this image, we see",
... "bla bla bla<image>",
... ]
>>> outputs = processor(images=images, text=text, return_tensors="pt", padding=True)
>>> input_ids = outputs.input_ids
>>> input_tokens = processor.tokenizer.batch_decode(input_ids)
>>> print(input_tokens)
['<|begin_of_text|><fake_token_around_image><global-img>((<image>)*169)<fake_token_around_image> In this image, we see', '<|reserved_special_token_0|><|reserved_special_token_0|><|reserved_special_token_0|><|begin_of_text|>bla bla bla<fake_token_around_image><global-img>((<image>)*169)<fake_token_around_image>']
```
Args:
images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`, *optional*):
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
tensor. If is of type `list[ImageInput]`, it's assumed that this is for a single prompt i.e. of batch size 1.
text (`Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]`, *optional*):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
Wherever an image token, `<image>` is encountered it is expanded to
`<fake_token_around_image>` + `<row_x_col_y>` + `<image>` * `image_seq_len` * <fake_token_around_image>`.
videos (`list[PIL.Image.Image]`, `np.ndarray`, `torch.Tensor`, `list[np.ndarray]`, `list[torch.Tensor]`, *optional*):
The video or batch of videos to be prepared. Each video can be a list of PIL frames, NumPy array or PyTorch
tensor. If is of type `list[VideoInput]`, it's assumed that this is for a single prompt i.e. of batch size 1.
return_tensors (`Union[str, TensorType]`, *optional*):
If set, will return tensors of a particular framework. See [`PreTrainedTokenizerFast.__call__`] for more
information.
"""
if text is None and images is None and videos is None:
raise ValueError("You must provide one of `text`, `images` or `videos'.")
if text is None and ((images is None) ^ (videos is not None)):
raise ValueError("You must specify exactly one of `images` or `videos`")
output_kwargs = self._merge_kwargs(
SmolVLMProcessorKwargs,
tokenizer_init_kwargs=self.tokenizer.init_kwargs,
**kwargs,
)
if text is not None:
if isinstance(text, str):
text = [text]
elif not isinstance(text, list) and not isinstance(text[0], str):
raise ValueError("Invalid input text. Please provide a string, or a list of strings")
n_images_in_text = sum(sample.count(self.image_token) for sample in text)
if n_images_in_text > 0 and (images is None and videos is None):
raise ValueError(f"We detected {n_images_in_text} tokens in the text but no images/videos were passed")
inputs = {}
# Images and videos are mutually exclusive, so process one which is present
if images is not None:
images = self.image_processor.fetch_images(images)
images = make_nested_list_of_images(images)
vision_inputs = self.image_processor(images, **output_kwargs["images_kwargs"])
image_rows = vision_inputs.pop("rows", None)
image_cols = vision_inputs.pop("cols", None)
inputs.update(vision_inputs)
if text is not None:
n_images_in_text = [sample.count(self.image_token) for sample in text]
n_images_in_images = [len(sublist) for sublist in images]
if n_images_in_images != n_images_in_text:
raise ValueError(
f"The number of images in the text {n_images_in_text} and images {n_images_in_images} should be the same."
)
# Set default values for image_rows and image_cols if not provided
if image_rows is None:
image_rows = [[0] * n_images for n_images in n_images_in_text]
if image_cols is None:
image_cols = [[0] * n_images for n_images in n_images_in_text]
text = self.expand_text_with_image_tokens(text, image_rows=image_rows, image_cols=image_cols)
elif videos is not None:
vision_inputs = self.video_processor(videos, **output_kwargs["videos_kwargs"])
if text is not None:
n_videos_in_text = [sample.count(self.video_token) for sample in text]
n_videos_in_videos = [len(sublist) for sublist in videos]
if n_videos_in_videos != n_videos_in_text:
raise ValueError(
f"The number of videos in the text {n_videos_in_text} and videos {n_videos_in_videos} should be the same."
)
text = self.expand_text_with_video_tokens(text, vision_inputs)
# If user has not requested video metadata, pop it. By default metadata
# is always returned to expand video tokens correctly
if not kwargs.get("return_metadata"):
vision_inputs.pop("video_metadata")
inputs.update(vision_inputs)
return_tensors = output_kwargs["text_kwargs"].pop("return_tensors", None)
if text is not None:
text_inputs = self.tokenizer(text, **output_kwargs["text_kwargs"])
self._check_special_mm_tokens(text, text_inputs, modalities=["image"])
inputs.update(text_inputs)
return BatchFeature(inputs, tensor_type=return_tensors)
def apply_chat_template(
self,
conversation: Union[list[dict[str, str]], list[list[dict[str, str]]]],
chat_template: Optional[str] = None,
**kwargs: Unpack[AllKwargsForChatTemplate],
) -> str:
"""
Similar to the `apply_chat_template` method on tokenizers, this method applies a Jinja template to input
conversations to turn them into a single tokenizable string.
The input is expected to be in the following format, where each message content is a list consisting of text and
optionally image or video inputs. One can also provide an image, video, URL or local path which will be used to form
`pixel_values` when `return_dict=True`. If not provided, one will get only the formatted text, optionally tokenized text.
conversation = [
{
"role": "user",
"content": [
{"type": "image", "url": "https://www.ilankelman.org/stopsigns/australia.jpg"},
{"type": "text", "text": "Please describe this image in detail."},
],
},
]
Args:
conversation (`Union[list[Dict, [str, str]], list[list[dict[str, str]]]]`):
The conversation to format.
chat_template (`Optional[str]`, *optional*):
The Jinja template to use for formatting the conversation. If not provided, the tokenizer's
chat template is used.
"""
if isinstance(conversation, (list, tuple)) and (
isinstance(conversation[0], (list, tuple)) or hasattr(conversation[0], "content")
):
conversations = conversation
else:
conversations = [conversation]
has_video = any(
(isinstance(content, dict) and content["type"] == "video")
for conversation in conversations
for message in conversation
for content in message["content"]
)
if chat_template is None and has_video:
# re-assign to the correct default template for BC, if user is not requesting their own template
chat_template = DEFAULT_CHAT_TEMPLATE
kwargs.setdefault("num_frames", self.video_processor.num_frames)
kwargs.setdefault("fps", self.video_processor.fps)
return super().apply_chat_template(conversation, chat_template, **kwargs)
__all__ = ["SmolVLMProcessor"]
| SmolVLMProcessor |
python | viewflow__viewflow | viewflow/workflow/chart.py | {
"start": 1040,
"end": 1863
} | class ____(object):
__slots__ = [
"col",
"row",
"x",
"y",
"width",
"height",
"node",
"shape",
"title",
"status",
]
def __init__(
self,
node,
col=-1,
row=-1,
x=-1,
y=-1,
width=-1,
height=-1,
shape=None,
title=None,
status=None,
):
self.node = node
self.col = col
self.row = row
self.x = x
self.y = y
self.width = width
self.height = height
self.shape = shape if shape is not None else Shape()
self.title = title
self.status = status
def incoming(self):
return self.node._incoming()
def outgoing(self):
return self.node._outgoing()
| Cell |
python | scipy__scipy | scipy/signal/tests/test_windows.py | {
"start": 8889,
"end": 10999
} | class ____:
def test_basic(self, xp):
xp_assert_close(windows.boxcar(6, xp=xp),
xp.asarray([1.0, 1, 1, 1, 1, 1], dtype=xp.float64))
xp_assert_close(windows.boxcar(7, xp=xp),
xp.asarray([1.0, 1, 1, 1, 1, 1, 1], dtype=xp.float64))
xp_assert_close(windows.boxcar(6, False, xp=xp),
xp.asarray([1.0, 1, 1, 1, 1, 1], dtype=xp.float64))
cheb_odd_true = [0.200938, 0.107729, 0.134941, 0.165348,
0.198891, 0.235450, 0.274846, 0.316836,
0.361119, 0.407338, 0.455079, 0.503883,
0.553248, 0.602637, 0.651489, 0.699227,
0.745266, 0.789028, 0.829947, 0.867485,
0.901138, 0.930448, 0.955010, 0.974482,
0.988591, 0.997138, 1.000000, 0.997138,
0.988591, 0.974482, 0.955010, 0.930448,
0.901138, 0.867485, 0.829947, 0.789028,
0.745266, 0.699227, 0.651489, 0.602637,
0.553248, 0.503883, 0.455079, 0.407338,
0.361119, 0.316836, 0.274846, 0.235450,
0.198891, 0.165348, 0.134941, 0.107729,
0.200938]
cheb_even_true = [0.203894, 0.107279, 0.133904,
0.163608, 0.196338, 0.231986,
0.270385, 0.311313, 0.354493,
0.399594, 0.446233, 0.493983,
0.542378, 0.590916, 0.639071,
0.686302, 0.732055, 0.775783,
0.816944, 0.855021, 0.889525,
0.920006, 0.946060, 0.967339,
0.983557, 0.994494, 1.000000,
1.000000, 0.994494, 0.983557,
0.967339, 0.946060, 0.920006,
0.889525, 0.855021, 0.816944,
0.775783, 0.732055, 0.686302,
0.639071, 0.590916, 0.542378,
0.493983, 0.446233, 0.399594,
0.354493, 0.311313, 0.270385,
0.231986, 0.196338, 0.163608,
0.133904, 0.107279, 0.203894]
@make_xp_test_case(windows.chebwin)
| TestBoxcar |
python | getsentry__sentry | src/sentry/tempest/endpoints/tempest_credentials.py | {
"start": 891,
"end": 2613
} | class ____(ProjectEndpoint):
publish_status = {
"GET": ApiPublishStatus.PRIVATE,
"POST": ApiPublishStatus.PRIVATE,
}
owner = ApiOwner.GDX
permission_classes = (TempestCredentialsPermission,)
def get(self, request: Request, project: Project) -> Response:
if not has_tempest_access(project.organization):
raise NotFound
tempest_credentials_qs = TempestCredentials.objects.filter(project=project)
return self.paginate(
request=request,
queryset=tempest_credentials_qs,
on_results=lambda x: serialize(x, request.user, TempestCredentialsSerializer()),
paginator_cls=OffsetPaginator,
)
def post(self, request: Request, project: Project) -> Response:
if not has_tempest_access(project.organization):
raise NotFound
serializer = DRFTempestCredentialsSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
try:
credentials = serializer.save(created_by_id=request.user.id, project=project)
# Make initial call to determine the latest item ID
fetch_latest_item_id.delay(credentials.id)
except IntegrityError:
return Response(
{"detail": "A credential with this client ID already exists."}, status=400
)
self.create_audit_entry(
request,
organization=project.organization,
target_object=credentials.id,
event=audit_log.get_event_id("TEMPEST_CLIENT_ID_ADD"),
data=credentials.get_audit_log_data(),
)
return Response(serializer.data, status=201)
| TempestCredentialsEndpoint |
python | astropy__astropy | astropy/io/misc/ecsv.py | {
"start": 10337,
"end": 11305
} | class ____(ECSVEngine):
"""ECSV reader engine using PyArrow."""
name = "pyarrow"
format = "pyarrow.csv"
def convert_np_type(self, np_type: str) -> str:
# PyArrow does not support float128 and there is no workaround (unlike float16).
if np_type == "float128":
raise TypeError(
"pyarrow engine does not support float128, choose a different engine"
)
# PyArrow does not support float16, so we need to convert it to float32.
# The final output is still cast as float16.
return "float32" if np_type == "float16" else np_type
def get_data_kwargs(
self,
header: ECSVHeader,
null_values: list[str],
) -> dict[str, Any]:
# See base method for details.
kw = {}
kw["null_values"] = null_values
kw["header_start"] = header.n_header
kw["dtypes"] = self.get_converters(header)
return kw
| ECSVEnginePyArrow |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/linalg/linear_operator_low_rank_update_test.py | {
"start": 11593,
"end": 15696
} | class ____(test.TestCase):
"""Test that the operator's shape is the broadcast of arguments."""
def test_static_shape_broadcasts_up_from_operator_to_other_args(self):
base_operator = linalg.LinearOperatorIdentity(num_rows=3)
u = array_ops.ones(shape=[2, 3, 2])
diag = array_ops.ones(shape=[2, 2])
operator = linalg.LinearOperatorLowRankUpdate(base_operator, u, diag)
# domain_dimension is 3
self.assertAllEqual([2, 3, 3], operator.shape)
self.assertAllEqual([2, 3, 3], self.evaluate(operator.to_dense()).shape)
@test_util.run_deprecated_v1
def test_dynamic_shape_broadcasts_up_from_operator_to_other_args(self):
num_rows_ph = array_ops.placeholder(dtypes.int32)
base_operator = linalg.LinearOperatorIdentity(num_rows=num_rows_ph)
u_shape_ph = array_ops.placeholder(dtypes.int32)
u = array_ops.ones(shape=u_shape_ph)
v_shape_ph = array_ops.placeholder(dtypes.int32)
v = array_ops.ones(shape=v_shape_ph)
diag_shape_ph = array_ops.placeholder(dtypes.int32)
diag_update = array_ops.ones(shape=diag_shape_ph)
operator = linalg.LinearOperatorLowRankUpdate(base_operator,
u=u,
diag_update=diag_update,
v=v)
feed_dict = {
num_rows_ph: 3,
u_shape_ph: [1, 1, 2, 3, 2], # batch_shape = [1, 1, 2]
v_shape_ph: [1, 2, 1, 3, 2], # batch_shape = [1, 2, 1]
diag_shape_ph: [2, 1, 1, 2] # batch_shape = [2, 1, 1]
}
with self.cached_session():
shape_tensor = operator.shape_tensor().eval(feed_dict=feed_dict)
self.assertAllEqual([2, 2, 2, 3, 3], shape_tensor)
dense = operator.to_dense().eval(feed_dict=feed_dict)
self.assertAllEqual([2, 2, 2, 3, 3], dense.shape)
def test_u_and_v_incompatible_batch_shape_raises(self):
base_operator = linalg.LinearOperatorIdentity(num_rows=3, dtype=np.float64)
u = rng.rand(5, 3, 2)
v = rng.rand(4, 3, 2)
with self.assertRaisesRegex(ValueError, "Incompatible shapes"):
linalg.LinearOperatorLowRankUpdate(base_operator, u=u, v=v)
def test_u_and_base_operator_incompatible_batch_shape_raises(self):
base_operator = linalg.LinearOperatorIdentity(
num_rows=3, batch_shape=[4], dtype=np.float64)
u = rng.rand(5, 3, 2)
with self.assertRaisesRegex(ValueError, "Incompatible shapes"):
linalg.LinearOperatorLowRankUpdate(base_operator, u=u)
def test_u_and_base_operator_incompatible_domain_dimension(self):
base_operator = linalg.LinearOperatorIdentity(num_rows=3, dtype=np.float64)
u = rng.rand(5, 4, 2)
with self.assertRaisesRegex(ValueError, "not compatible"):
linalg.LinearOperatorLowRankUpdate(base_operator, u=u)
def test_u_and_diag_incompatible_low_rank_raises(self):
base_operator = linalg.LinearOperatorIdentity(num_rows=3, dtype=np.float64)
u = rng.rand(5, 3, 2)
diag = rng.rand(5, 4) # Last dimension should be 2
with self.assertRaisesRegex(ValueError, "not compatible"):
linalg.LinearOperatorLowRankUpdate(base_operator, u=u, diag_update=diag)
def test_diag_incompatible_batch_shape_raises(self):
base_operator = linalg.LinearOperatorIdentity(num_rows=3, dtype=np.float64)
u = rng.rand(5, 3, 2)
diag = rng.rand(4, 2) # First dimension should be 5
with self.assertRaisesRegex(ValueError, "Incompatible shapes"):
linalg.LinearOperatorLowRankUpdate(base_operator, u=u, diag_update=diag)
if __name__ == "__main__":
linear_operator_test_util.add_tests(
LinearOperatorLowRankUpdatetestWithDiagUseCholesky)
linear_operator_test_util.add_tests(
LinearOperatorLowRankUpdatetestWithDiagCannotUseCholesky)
linear_operator_test_util.add_tests(
LinearOperatorLowRankUpdatetestNoDiagUseCholesky)
linear_operator_test_util.add_tests(
LinearOperatorLowRankUpdatetestNoDiagCannotUseCholesky)
linear_operator_test_util.add_tests(
LinearOperatorLowRankUpdatetestWithDiagNotSquare)
test.main()
| LinearOperatorLowRankUpdateBroadcastsShape |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pylint/invalid_return_type_hash.py | {
"start": 831,
"end": 904
} | class ____:
def __hash__(self):
raise NotImplementedError
| Hash5 |
python | getsentry__sentry | tests/sentry/sentry_apps/models/test_sentryappinstallation.py | {
"start": 459,
"end": 2292
} | class ____(TestCase):
def setUp(self) -> None:
self.user = self.create_user()
self.proxy = self.create_user()
self.org = self.create_organization()
self.application = ApiApplication.objects.create(owner=self.proxy)
self.sentry_app = SentryApp.objects.create(
application=self.application,
name="NullDB",
proxy_user=self.proxy,
owner_id=self.org.id,
scope_list=("project:read",),
webhook_url="http://example.com",
)
self.install = SentryAppInstallation(
sentry_app=self.sentry_app, organization_id=self.org.id
)
def test_paranoid(self) -> None:
self.install.save()
self.install.delete()
assert self.install.date_deleted is not None
assert self.install not in SentryAppInstallation.objects.all()
def test_date_updated(self) -> None:
self.install.save()
date_updated = self.install.date_updated
self.install.save()
assert not self.install.date_updated == date_updated
def test_related_names(self) -> None:
self.install.save()
assert self.install in self.install.sentry_app.installations.all()
assert self.install in SentryAppInstallation.objects.filter(
organization_id=self.install.organization_id
)
def test_handle_async_replication_clears_region_cache(self) -> None:
with mock.patch.object(caching_module, "region_caching_service") as mock_caching_service:
self.install.save()
region = get_region_for_organization(self.org.slug)
mock_caching_service.clear_key.assert_any_call(
key=f"app_service.get_installation:{self.install.id}", region_name=region.name
)
| SentryAppInstallationTest |
python | huggingface__transformers | src/transformers/models/dots1/modular_dots1.py | {
"start": 1216,
"end": 1261
} | class ____(Qwen3RMSNorm):
pass
| Dots1RMSNorm |
python | pytorch__pytorch | test/jit/test_builtins.py | {
"start": 462,
"end": 9984
} | class ____(JitTestCase):
"""
Tests for TorchScript support of Python builtin functions.
"""
def test_has_attr(self):
class HasA(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.a = 0
class HasB(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.b = 1
class Mod(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.mods = torch.nn.ModuleList([HasA(), HasB()])
def forward(self):
# use a list to encode hasattr results
l = torch.jit.annotate(List[int], [])
for mod in self.mods:
l.append(int(hasattr(mod, "a")))
l.append(int(hasattr(mod, "b")))
# actually retrieve the attr to test static refinement
if hasattr(mod, "a"):
l.append(mod.a)
if hasattr(mod, "b"):
l.append(mod.b)
return l
self.checkModule(Mod(), ())
def test_has_attr_invalid_args(self):
class Mod(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.mod = torch.nn.Linear(1, 1)
def forward(self, name):
# not allowed, `name` must be static.
return hasattr(self.mod, name)
with self.assertRaisesRegexWithHighlight(RuntimeError, "hasattr", "name"):
torch.jit.script(Mod())
class Mod(torch.nn.Module):
def forward(self, name):
# not allowed, `torch.rand` is not a class type
return hasattr(torch.rand(2, 3), name)
with self.assertRaisesRegexWithHighlight(RuntimeError, "hasattr", "name"):
torch.jit.script(Mod())
def test_del(self):
def fn(x: List[int]) -> List[int]:
a = x * 2
del a
return x
self.checkScript(fn, ([1, 2, 3],))
with self.assertRaisesRegexWithHighlight(RuntimeError, "undefined value", "a"):
@torch.jit.script
def fn(x):
a = x**2
del a
return a # noqa: F821
with self.assertRaisesRegexWithHighlight(RuntimeError, "undefined value", "a"):
@torch.jit.script
def fn(x):
a = x**2
if a:
del a
return a
with self.assertRaisesRegexWithHighlight(RuntimeError, "undefined value", "b"):
@torch.jit.script
def fn(x):
a = x**2
del b # noqa: F821
return a
def test_del_multiple_operands(self):
def fn(x: List[int]) -> List[int]:
a, b, c = x[0], x[1], x[2]
del a, b, c
return x
self.checkScript(fn, ([1, 2, 3],))
def del_list_multiple_operands(x: List[int]) -> List[int]:
del x[0], x[1]
return x
py_out = del_list_multiple_operands([0, 1, 2])
jit_out = torch.jit.script(del_list_multiple_operands)([0, 1, 2])
self.assertEqual(py_out, jit_out)
def del_dict_multiple_operands(x: Dict[str, int]) -> Dict[str, int]:
del x["hi"], x["there"]
return x
py_out = del_dict_multiple_operands({"hi": 5, "there": 6})
jit_out = torch.jit.script(del_dict_multiple_operands)({"hi": 5, "there": 6})
self.assertEqual(py_out, jit_out)
def test_torch_check(self):
"""Test torch._check functionality with flexible argument handling"""
def test_check_basic(x):
torch._check(x.sum().item() > -1000)
return x
def test_check_with_message(x):
torch._check(x.sum().item() > -1000, "Tensor sum must be reasonable")
return x
def test_check_with_kwarg_message(x):
torch._check(
x.sum().item() > -1000, message="Tensor sum must be reasonable"
)
return x
def test_check_cond_kwarg(x):
torch._check(cond=x.sum().item() > -1000)
return x
def test_check_both_kwargs(x):
torch._check(cond=x.sum().item() > -1000, message="Both as kwargs")
return x
def test_check_kwargs_reversed(x):
torch._check(message="Reversed order", cond=x.sum().item() > -1000)
return x
def test_check_in_loop(x):
sizes = torch.jit.annotate(List[int], x.tolist())
for s in sizes:
torch._check(s > -100)
return x
test_tensor = torch.tensor([1, 2, 3])
# Test all variations
self.checkScript(test_check_basic, (test_tensor,))
self.checkScript(test_check_with_message, (test_tensor,))
self.checkScript(test_check_with_kwarg_message, (test_tensor,))
self.checkScript(test_check_cond_kwarg, (test_tensor,))
self.checkScript(test_check_both_kwargs, (test_tensor,))
self.checkScript(test_check_kwargs_reversed, (test_tensor,))
self.checkScript(test_check_in_loop, (test_tensor,))
# Test that the compiled functions work correctly
scripted_basic = torch.jit.script(test_check_basic)
scripted_with_message = torch.jit.script(test_check_with_message)
scripted_with_kwarg = torch.jit.script(test_check_with_kwarg_message)
scripted_cond_kwarg = torch.jit.script(test_check_cond_kwarg)
scripted_both_kwargs = torch.jit.script(test_check_both_kwargs)
scripted_kwargs_reversed = torch.jit.script(test_check_kwargs_reversed)
scripted_in_loop = torch.jit.script(test_check_in_loop)
# These should all succeed without throwing
result1 = scripted_basic(test_tensor)
result2 = scripted_with_message(test_tensor)
result3 = scripted_with_kwarg(test_tensor)
result4 = scripted_cond_kwarg(test_tensor)
result5 = scripted_both_kwargs(test_tensor)
result6 = scripted_kwargs_reversed(test_tensor)
result7 = scripted_in_loop(test_tensor)
# Results should be the same as input
for result in [result1, result2, result3, result4, result5, result6, result7]:
self.assertEqual(result, test_tensor)
# Check that the message constants are present in the graphs
FileCheck().check("Tensor sum must be reasonable").run(
scripted_with_message.graph
)
FileCheck().check("Tensor sum must be reasonable").run(
scripted_with_kwarg.graph
)
FileCheck().check("Both as kwargs").run(scripted_both_kwargs.graph)
FileCheck().check("Reversed order").run(scripted_kwargs_reversed.graph)
# Verify the graphs contain some computation (not just empty)
basic_graph_str = str(scripted_basic.graph)
self.assertTrue(
len(basic_graph_str) > 100, "Basic graph should contain some computation"
)
# Verify the loop case contains a loop
FileCheck().check("prim::Loop").run(scripted_in_loop.graph)
for scripted_func in [
scripted_basic,
scripted_with_message,
scripted_with_kwarg,
scripted_cond_kwarg,
scripted_both_kwargs,
scripted_kwargs_reversed,
]:
FileCheck().check("prim::If").check("prim::RaiseException").run(
scripted_func.graph
)
def test_torch_check_invalid_args(self):
"""Test torch._check with invalid arguments"""
# Test too many arguments
with self.assertRaisesRegex(
RuntimeError, "torch._check\\(\\) expects 1 or 2 arguments"
):
@torch.jit.script
def too_many_args(x):
torch._check(True, "msg", "extra")
return x
# Test invalid keyword argument
with self.assertRaisesRegex(RuntimeError, "unexpected keyword argument"):
@torch.jit.script
def invalid_kwarg(x):
torch._check(True, invalid_arg="msg")
return x
# Test duplicate cond argument (positional + keyword)
with self.assertRaisesRegex(
RuntimeError, "multiple values for argument 'cond'"
):
@torch.jit.script
def duplicate_cond(x):
torch._check(True, cond=False)
return x
# Test missing required cond argument
with self.assertRaisesRegex(RuntimeError, "missing required argument 'cond'"):
@torch.jit.script
def missing_cond(x):
torch._check(message="msg only")
return x
# Test no arguments at all
with self.assertRaisesRegex(
RuntimeError, "torch._check\\(\\) expects 1 or 2 arguments"
):
@torch.jit.script
def no_args(x):
torch._check()
return x
# Test too many total arguments (positional + keyword)
with self.assertRaisesRegex(
RuntimeError, "torch._check\\(\\) expects 1 or 2 arguments"
):
@torch.jit.script
def too_many_total_args(x):
torch._check(True, "msg", cond=False)
return x
| TestBuiltins |
python | redis__redis-py | tests/test_asyncio/test_search.py | {
"start": 83662,
"end": 126558
} | class ____(AsyncSearchTestsBase):
async def _create_hybrid_search_index(self, decoded_r: redis.Redis, dim=4):
await decoded_r.ft().create_index(
(
TextField("description"),
NumericField("price"),
TagField("color"),
TagField("item_type"),
NumericField("size"),
VectorField(
"embedding",
"FLAT",
{
"TYPE": "FLOAT32",
"DIM": dim,
"DISTANCE_METRIC": "L2",
},
),
VectorField(
"embedding-hnsw",
"HNSW",
{
"TYPE": "FLOAT32",
"DIM": dim,
"DISTANCE_METRIC": "L2",
},
),
),
definition=IndexDefinition(prefix=["item:"]),
)
await AsyncSearchTestsBase.waitForIndex(decoded_r, "idx")
@staticmethod
def _generate_random_vector(dim):
return [random.random() for _ in range(dim)]
@staticmethod
def _generate_random_str_data(dim):
chars = "abcdefgh12345678"
return "".join(random.choice(chars) for _ in range(dim))
@staticmethod
async def _add_data_for_hybrid_search(
client: redis.Redis,
items_sets=1,
randomize_data=False,
dim_for_random_data=4,
use_random_str_data=False,
):
if randomize_data or use_random_str_data:
generate_data_func = (
TestHybridSearch._generate_random_str_data
if use_random_str_data
else TestHybridSearch._generate_random_vector
)
dim_for_random_data = (
dim_for_random_data * 4 if use_random_str_data else dim_for_random_data
)
items = [
(generate_data_func(dim_for_random_data), "red shoes"),
(generate_data_func(dim_for_random_data), "green shoes with red laces"),
(generate_data_func(dim_for_random_data), "red dress"),
(generate_data_func(dim_for_random_data), "orange dress"),
(generate_data_func(dim_for_random_data), "black shoes"),
]
else:
items = [
([1.0, 2.0, 7.0, 8.0], "red shoes"),
([1.0, 4.0, 7.0, 8.0], "green shoes with red laces"),
([1.0, 2.0, 6.0, 5.0], "red dress"),
([2.0, 3.0, 6.0, 5.0], "orange dress"),
([5.0, 6.0, 7.0, 8.0], "black shoes"),
]
items = items * items_sets
pipeline = client.pipeline()
for i, vec in enumerate(items):
vec, description = vec
mapping = {
"description": description,
"embedding": np.array(vec, dtype=np.float32).tobytes()
if not use_random_str_data
else vec,
"embedding-hnsw": np.array(vec, dtype=np.float32).tobytes()
if not use_random_str_data
else vec,
"price": 15 + i % 4,
"color": description.split(" ")[0],
"item_type": description.split(" ")[1],
"size": 10 + i % 3,
}
pipeline.hset(f"item:{i}", mapping=mapping)
await pipeline.execute() # Execute all at once
@staticmethod
def _convert_dict_values_to_str(list_of_dicts):
res = []
for d in list_of_dicts:
res_dict = {}
for k, v in d.items():
if isinstance(v, list):
res_dict[k] = [safe_str(x) for x in v]
else:
res_dict[k] = safe_str(v)
res.append(res_dict)
return res
@staticmethod
def compare_list_of_dicts(actual, expected):
assert len(actual) == len(expected), (
f"List of dicts length mismatch: {len(actual)} != {len(expected)}. "
f"Full dicts: actual:{actual}; expected:{expected}"
)
for expected_dict_item in expected:
found = False
for actual_dict_item in actual:
if actual_dict_item == expected_dict_item:
found = True
break
if not found:
assert False, (
f"Dict {expected_dict_item} not found in actual list of dicts: {actual}. "
f"All expected:{expected}"
)
@pytest.mark.redismod
@skip_if_server_version_lt("8.3.224")
async def test_basic_hybrid_search(self, decoded_r):
# Create index and add data
await self._create_hybrid_search_index(decoded_r)
await self._add_data_for_hybrid_search(decoded_r, items_sets=5)
# set search query
search_query = HybridSearchQuery("@color:{red} @color:{green}")
vsim_query = HybridVsimQuery(
vector_field_name="@embedding",
vector_data=np.array([-100, -200, -200, -300], dtype=np.float32).tobytes(),
)
hybrid_query = HybridQuery(search_query, vsim_query)
res = await decoded_r.ft().hybrid_search(query=hybrid_query)
# the default results count limit is 10
if is_resp2_connection(decoded_r):
assert res.total_results == 10
assert len(res.results) == 10
assert res.warnings == []
assert res.execution_time > 0
assert all(isinstance(res.results[i]["__score"], bytes) for i in range(10))
assert all(isinstance(res.results[i]["__key"], bytes) for i in range(10))
else:
assert res["total_results"] == 10
assert len(res["results"]) == 10
assert res["warnings"] == []
assert res["execution_time"] > 0
assert all(isinstance(res["results"][i]["__score"], str) for i in range(10))
assert all(isinstance(res["results"][i]["__key"], str) for i in range(10))
@pytest.mark.redismod
@skip_if_server_version_lt("8.3.224")
async def test_hybrid_search_query_with_scorer(self, decoded_r):
# Create index and add data
await self._create_hybrid_search_index(decoded_r)
await self._add_data_for_hybrid_search(decoded_r, items_sets=10)
# set search query
search_query = HybridSearchQuery("shoes")
search_query.scorer("TFIDF")
vsim_query = HybridVsimQuery(
vector_field_name="@embedding",
vector_data=np.array([1, 2, 2, 3], dtype=np.float32).tobytes(),
)
hybrid_query = HybridQuery(search_query, vsim_query)
combine_method = CombineResultsMethod(
CombinationMethods.LINEAR, ALPHA=1, BETA=0
)
postprocessing_config = HybridPostProcessingConfig()
postprocessing_config.load(
"@description", "@color", "@price", "@size", "@__score", "@__item"
)
postprocessing_config.limit(0, 2)
res = await decoded_r.ft().hybrid_search(
query=hybrid_query,
combine_method=combine_method,
post_processing=postprocessing_config,
timeout=10,
)
expected_results_tfidf = [
{
"description": b"red shoes",
"color": b"red",
"price": b"15",
"size": b"10",
"__score": b"2",
},
{
"description": b"green shoes with red laces",
"color": b"green",
"price": b"16",
"size": b"11",
"__score": b"2",
},
]
if is_resp2_connection(decoded_r):
assert res.total_results >= 2
assert len(res.results) == 2
assert res.results == expected_results_tfidf
assert res.warnings == []
else:
assert res["total_results"] >= 2
assert len(res["results"]) == 2
assert res["results"] == self._convert_dict_values_to_str(
expected_results_tfidf
)
assert res["warnings"] == []
search_query.scorer("BM25")
res = await decoded_r.ft().hybrid_search(
query=hybrid_query,
combine_method=combine_method,
post_processing=postprocessing_config,
timeout=10,
)
expected_results_bm25 = [
{
"description": b"red shoes",
"color": b"red",
"price": b"15",
"size": b"10",
"__score": b"0.657894719299",
},
{
"description": b"green shoes with red laces",
"color": b"green",
"price": b"16",
"size": b"11",
"__score": b"0.657894719299",
},
]
if is_resp2_connection(decoded_r):
assert res.total_results >= 2
assert len(res.results) == 2
assert res.results == expected_results_bm25
assert res.warnings == []
else:
assert res["total_results"] >= 2
assert len(res["results"]) == 2
assert res["results"] == self._convert_dict_values_to_str(
expected_results_bm25
)
assert res["warnings"] == []
@pytest.mark.redismod
@skip_if_server_version_lt("8.3.224")
async def test_hybrid_search_query_with_vsim_filter(self, decoded_r):
# Create index and add data
await self._create_hybrid_search_index(decoded_r)
await self._add_data_for_hybrid_search(
decoded_r, items_sets=5, use_random_str_data=True
)
search_query = HybridSearchQuery("@color:{missing}")
vsim_query = HybridVsimQuery(
vector_field_name="@embedding",
vector_data="abcd1234efgh5678",
)
vsim_query.filter(HybridFilter("@price:[15 16] @size:[10 11]"))
hybrid_query = HybridQuery(search_query, vsim_query)
postprocessing_config = HybridPostProcessingConfig()
postprocessing_config.load("@price", "@size")
res = await decoded_r.ft().hybrid_search(
query=hybrid_query, post_processing=postprocessing_config, timeout=10
)
if is_resp2_connection(decoded_r):
assert len(res.results) > 0
assert res.warnings == []
for item in res.results:
assert item["price"] in [b"15", b"16"]
assert item["size"] in [b"10", b"11"]
else:
assert len(res["results"]) > 0
assert res["warnings"] == []
for item in res["results"]:
assert item["price"] in ["15", "16"]
assert item["size"] in ["10", "11"]
@pytest.mark.redismod
@skip_if_server_version_lt("8.3.224")
async def test_hybrid_search_query_with_vsim_knn(self, decoded_r):
# Create index and add data
await self._create_hybrid_search_index(decoded_r)
await self._add_data_for_hybrid_search(decoded_r, items_sets=10)
# set search query
# this query won't have results, so we will be able to validate vsim results
search_query = HybridSearchQuery("@color:{none}")
vsim_query = HybridVsimQuery(
vector_field_name="@embedding",
vector_data=np.array([1, 2, 2, 3], dtype=np.float32).tobytes(),
)
vsim_query.vsim_method_params(VectorSearchMethods.KNN, K=3)
hybrid_query = HybridQuery(search_query, vsim_query)
postprocessing_config = HybridPostProcessingConfig()
res = await decoded_r.ft().hybrid_search(
query=hybrid_query, post_processing=postprocessing_config, timeout=10
)
expected_results = [
{"__key": b"item:2", "__score": b"0.016393442623"},
{"__key": b"item:7", "__score": b"0.0161290322581"},
{"__key": b"item:12", "__score": b"0.015873015873"},
]
if is_resp2_connection(decoded_r):
assert res.total_results == 3 # KNN top-k value
assert len(res.results) == 3
assert res.results == expected_results
assert res.warnings == []
assert res.execution_time > 0
else:
assert res["total_results"] == 3 # KNN top-k value
assert len(res["results"]) == 3
assert res["results"] == self._convert_dict_values_to_str(expected_results)
assert res["warnings"] == []
assert res["execution_time"] > 0
vsim_query_with_hnsw = HybridVsimQuery(
vector_field_name="@embedding-hnsw",
vector_data=np.array([1, 2, 2, 3], dtype=np.float32).tobytes(),
)
vsim_query_with_hnsw.vsim_method_params(
VectorSearchMethods.KNN, K=3, EF_RUNTIME=1
)
hybrid_query_with_hnsw = HybridQuery(search_query, vsim_query_with_hnsw)
res2 = await decoded_r.ft().hybrid_search(
query=hybrid_query_with_hnsw, timeout=10
)
expected_results2 = [
{"__key": b"item:12", "__score": b"0.016393442623"},
{"__key": b"item:22", "__score": b"0.0161290322581"},
{"__key": b"item:27", "__score": b"0.015873015873"},
]
if is_resp2_connection(decoded_r):
assert res2.total_results == 3 # KNN top-k value
assert len(res2.results) == 3
assert res2.results == expected_results2
assert res2.warnings == []
assert res2.execution_time > 0
else:
assert res2["total_results"] == 3 # KNN top-k value
assert len(res2["results"]) == 3
assert res2["results"] == self._convert_dict_values_to_str(
expected_results2
)
assert res2["warnings"] == []
assert res2["execution_time"] > 0
@pytest.mark.redismod
@skip_if_server_version_lt("8.3.224")
async def test_hybrid_search_query_with_vsim_range(self, decoded_r):
# Create index and add data
await self._create_hybrid_search_index(decoded_r)
await self._add_data_for_hybrid_search(decoded_r, items_sets=10)
# set search query
# this query won't have results, so we will be able to validate vsim results
search_query = HybridSearchQuery("@color:{none}")
vsim_query = HybridVsimQuery(
vector_field_name="@embedding",
vector_data=np.array([1, 2, 7, 6], dtype=np.float32).tobytes(),
)
vsim_query.vsim_method_params(VectorSearchMethods.RANGE, RADIUS=2)
hybrid_query = HybridQuery(search_query, vsim_query)
postprocessing_config = HybridPostProcessingConfig()
postprocessing_config.limit(0, 3)
res = await decoded_r.ft().hybrid_search(
query=hybrid_query, post_processing=postprocessing_config, timeout=10
)
expected_results = [
{"__key": b"item:2", "__score": b"0.016393442623"},
{"__key": b"item:7", "__score": b"0.0161290322581"},
{"__key": b"item:12", "__score": b"0.015873015873"},
]
if is_resp2_connection(decoded_r):
assert res.total_results >= 3 # at least 3 results
assert len(res.results) == 3
assert res.results == expected_results
assert res.warnings == []
assert res.execution_time > 0
else:
assert res["total_results"] >= 3
assert len(res["results"]) == 3
assert res["results"] == self._convert_dict_values_to_str(expected_results)
assert res["warnings"] == []
assert res["execution_time"] > 0
vsim_query_with_hnsw = HybridVsimQuery(
vector_field_name="@embedding-hnsw",
vector_data=np.array([1, 2, 7, 6], dtype=np.float32).tobytes(),
)
vsim_query_with_hnsw.vsim_method_params(
VectorSearchMethods.RANGE, RADIUS=2, EPSILON=0.5
)
hybrid_query_with_hnsw = HybridQuery(search_query, vsim_query_with_hnsw)
res = await decoded_r.ft().hybrid_search(
query=hybrid_query_with_hnsw,
post_processing=postprocessing_config,
timeout=10,
)
expected_results_hnsw = [
{"__key": b"item:27", "__score": b"0.016393442623"},
{"__key": b"item:12", "__score": b"0.0161290322581"},
{"__key": b"item:22", "__score": b"0.015873015873"},
]
if is_resp2_connection(decoded_r):
assert res.total_results >= 3
assert len(res.results) == 3
assert res.results == expected_results_hnsw
assert res.warnings == []
assert res.execution_time > 0
else:
assert res["total_results"] >= 3
assert len(res["results"]) == 3
assert res["results"] == self._convert_dict_values_to_str(
expected_results_hnsw
)
assert res["warnings"] == []
assert res["execution_time"] > 0
@pytest.mark.redismod
@skip_if_server_version_lt("8.3.224")
async def test_hybrid_search_query_with_combine_all_score_aliases(self, decoded_r):
# Create index and add data
await self._create_hybrid_search_index(decoded_r)
await self._add_data_for_hybrid_search(
decoded_r, items_sets=1, use_random_str_data=True
)
search_query = HybridSearchQuery("shoes")
search_query.yield_score_as("search_score")
vsim_query = HybridVsimQuery(
vector_field_name="@embedding-hnsw",
vector_data="abcd1234efgh5678",
vsim_search_method=VectorSearchMethods.KNN,
vsim_search_method_params={"K": 3, "EF_RUNTIME": 1},
yield_score_as="vsim_score",
)
hybrid_query = HybridQuery(search_query, vsim_query)
combine_method = CombineResultsMethod(
CombinationMethods.LINEAR,
ALPHA=0.5,
BETA=0.5,
YIELD_SCORE_AS="combined_score",
)
res = await decoded_r.ft().hybrid_search(
query=hybrid_query, combine_method=combine_method, timeout=10
)
if is_resp2_connection(decoded_r):
assert len(res.results) > 0
assert res.warnings == []
for item in res.results:
assert item["combined_score"] is not None
assert "__score" not in item
if item["__key"] in [b"item:0", b"item:1", b"item:4"]:
assert item["search_score"] is not None
else:
assert "search_score" not in item
if item["__key"] in [b"item:0", b"item:1", b"item:2"]:
assert item["vsim_score"] is not None
else:
assert "vsim_score" not in item
else:
assert len(res["results"]) > 0
assert res["warnings"] == []
for item in res["results"]:
assert item["combined_score"] is not None
assert "__score" not in item
if item["__key"] in ["item:0", "item:1", "item:4"]:
assert item["search_score"] is not None
else:
assert "search_score" not in item
if item["__key"] in ["item:0", "item:1", "item:2"]:
assert item["vsim_score"] is not None
else:
assert "vsim_score" not in item
@pytest.mark.redismod
@skip_if_server_version_lt("8.3.224")
async def test_hybrid_search_query_with_combine(self, decoded_r):
# Create index and add data
await self._create_hybrid_search_index(decoded_r)
await self._add_data_for_hybrid_search(decoded_r, items_sets=10)
# set search query
search_query = HybridSearchQuery("@color:{red}")
vsim_query = HybridVsimQuery(
vector_field_name="@embedding",
vector_data=np.array([1, 2, 7, 6], dtype=np.float32).tobytes(),
)
hybrid_query = HybridQuery(search_query, vsim_query)
combine_method_linear = CombineResultsMethod(
CombinationMethods.LINEAR, ALPHA=0.5, BETA=0.5
)
postprocessing_config = HybridPostProcessingConfig()
postprocessing_config.limit(0, 3)
res = await decoded_r.ft().hybrid_search(
query=hybrid_query,
combine_method=combine_method_linear,
post_processing=postprocessing_config,
timeout=10,
)
expected_results = [
{"__key": b"item:2", "__score": b"0.166666666667"},
{"__key": b"item:7", "__score": b"0.166666666667"},
{"__key": b"item:12", "__score": b"0.166666666667"},
]
if is_resp2_connection(decoded_r):
assert res.total_results >= 3
assert len(res.results) == 3
assert res.results == expected_results
assert res.warnings == []
assert res.execution_time > 0
else:
assert res["total_results"] >= 3
assert len(res["results"]) == 3
assert res["results"] == self._convert_dict_values_to_str(expected_results)
assert res["warnings"] == []
assert res["execution_time"] > 0
# combine with RRF and WINDOW + CONSTANT
combine_method_rrf = CombineResultsMethod(
CombinationMethods.RRF, WINDOW=3, CONSTANT=0.5
)
res = await decoded_r.ft().hybrid_search(
query=hybrid_query,
combine_method=combine_method_rrf,
post_processing=postprocessing_config,
timeout=10,
)
expected_results = [
{"__key": b"item:2", "__score": b"1.06666666667"},
{"__key": b"item:0", "__score": b"0.666666666667"},
{"__key": b"item:7", "__score": b"0.4"},
]
if is_resp2_connection(decoded_r):
assert res.total_results >= 3
assert len(res.results) == 3
assert res.results == expected_results
assert res.warnings == []
assert res.execution_time > 0
else:
assert res["total_results"] >= 3
assert len(res["results"]) == 3
assert res["results"] == self._convert_dict_values_to_str(expected_results)
assert res["warnings"] == []
assert res["execution_time"] > 0
# combine with RRF, not all possible params provided
combine_method_rrf_2 = CombineResultsMethod(CombinationMethods.RRF, WINDOW=3)
res = await decoded_r.ft().hybrid_search(
query=hybrid_query,
combine_method=combine_method_rrf_2,
post_processing=postprocessing_config,
timeout=10,
)
expected_results = [
{"__key": b"item:2", "__score": b"0.032522474881"},
{"__key": b"item:0", "__score": b"0.016393442623"},
{"__key": b"item:7", "__score": b"0.0161290322581"},
]
if is_resp2_connection(decoded_r):
assert res.total_results >= 3
assert len(res.results) == 3
assert res.results == expected_results
assert res.warnings == []
assert res.execution_time > 0
else:
assert res["total_results"] >= 3
assert len(res["results"]) == 3
assert res["results"] == self._convert_dict_values_to_str(expected_results)
assert res["warnings"] == []
assert res["execution_time"] > 0
@pytest.mark.redismod
@skip_if_server_version_lt("8.3.224")
async def test_hybrid_search_query_with_load(self, decoded_r):
# Create index and add data
await self._create_hybrid_search_index(decoded_r)
await self._add_data_for_hybrid_search(decoded_r, items_sets=10)
# set search query
search_query = HybridSearchQuery("@color:{red|green|black}")
vsim_query = HybridVsimQuery(
vector_field_name="@embedding",
vector_data=np.array([1, 2, 7, 6], dtype=np.float32).tobytes(),
)
hybrid_query = HybridQuery(search_query, vsim_query)
combine_method = CombineResultsMethod(
CombinationMethods.LINEAR, ALPHA=0.5, BETA=0.5
)
postprocessing_config = HybridPostProcessingConfig()
postprocessing_config.load(
"@description", "@color", "@price", "@size", "@__key AS item_key"
)
postprocessing_config.limit(0, 1)
res = await decoded_r.ft().hybrid_search(
query=hybrid_query,
combine_method=combine_method,
post_processing=postprocessing_config,
timeout=10,
)
expected_results = [
{
"description": b"red dress",
"color": b"red",
"price": b"17",
"size": b"12",
"item_key": b"item:2",
}
]
if is_resp2_connection(decoded_r):
assert res.total_results >= 1
assert len(res.results) == 1
self.compare_list_of_dicts(res.results, expected_results)
assert res.warnings == []
assert res.execution_time > 0
else:
assert res["total_results"] >= 1
assert len(res["results"]) == 1
self.compare_list_of_dicts(
res["results"], self._convert_dict_values_to_str(expected_results)
)
assert res["warnings"] == []
assert res["execution_time"] > 0
@pytest.mark.redismod
@skip_if_server_version_lt("8.3.224")
async def test_hybrid_search_query_with_load_and_apply(self, decoded_r):
# Create index and add data
await self._create_hybrid_search_index(decoded_r)
await self._add_data_for_hybrid_search(decoded_r, items_sets=10)
# set search query
search_query = HybridSearchQuery("@color:{red}")
vsim_query = HybridVsimQuery(
vector_field_name="@embedding",
vector_data=np.array([1, 2, 7, 6], dtype=np.float32).tobytes(),
)
hybrid_query = HybridQuery(search_query, vsim_query)
postprocessing_config = HybridPostProcessingConfig()
postprocessing_config.load("@color", "@price", "@size")
postprocessing_config.apply(
price_discount="@price - (@price * 0.1)",
tax_discount="@price_discount * 0.2",
)
postprocessing_config.limit(0, 3)
res = await decoded_r.ft().hybrid_search(
query=hybrid_query, post_processing=postprocessing_config, timeout=10
)
expected_results = [
{
"color": b"red",
"price": b"15",
"size": b"10",
"price_discount": b"13.5",
"tax_discount": b"2.7",
},
{
"color": b"red",
"price": b"17",
"size": b"12",
"price_discount": b"15.3",
"tax_discount": b"3.06",
},
{
"color": b"red",
"price": b"18",
"size": b"11",
"price_discount": b"16.2",
"tax_discount": b"3.24",
},
]
if is_resp2_connection(decoded_r):
assert len(res.results) == 3
self.compare_list_of_dicts(res.results, expected_results)
assert res.warnings == []
assert res.execution_time > 0
else:
assert len(res["results"]) == 3
self.compare_list_of_dicts(
res["results"], self._convert_dict_values_to_str(expected_results)
)
assert res["warnings"] == []
assert res["execution_time"] > 0
@pytest.mark.redismod
@skip_if_server_version_lt("8.3.224")
async def test_hybrid_search_query_with_load_and_filter(self, decoded_r):
# Create index and add data
await self._create_hybrid_search_index(decoded_r)
await self._add_data_for_hybrid_search(decoded_r, items_sets=10)
# set search query
search_query = HybridSearchQuery("@color:{red|green|black}")
vsim_query = HybridVsimQuery(
vector_field_name="@embedding",
vector_data=np.array([1, 2, 7, 6], dtype=np.float32).tobytes(),
)
hybrid_query = HybridQuery(search_query, vsim_query)
postprocessing_config = HybridPostProcessingConfig()
postprocessing_config.load("@description", "@color", "@price", "@size")
# for the postprocessing filter we need to filter on the loaded fields
# expecting all of them to be interpreted as strings - the initial filed types
# are not preserved
postprocessing_config.filter(HybridFilter('@price=="15"'))
postprocessing_config.limit(0, 3)
res = await decoded_r.ft().hybrid_search(
query=hybrid_query, post_processing=postprocessing_config, timeout=10
)
if is_resp2_connection(decoded_r):
assert len(res.results) == 3
for item in res.results:
assert item["price"] == b"15"
assert res.warnings == []
assert res.execution_time > 0
else:
assert len(res["results"]) == 3
for item in res["results"]:
assert item["price"] == "15"
assert res["warnings"] == []
assert res["execution_time"] > 0
@pytest.mark.redismod
@skip_if_server_version_lt("8.3.224")
async def test_hybrid_search_query_with_load_apply_and_params(self, decoded_r):
# Create index and add data
await self._create_hybrid_search_index(decoded_r)
await self._add_data_for_hybrid_search(
decoded_r, items_sets=5, use_random_str_data=True
)
# set search query
search_query = HybridSearchQuery("@color:{$color_criteria}")
vsim_query = HybridVsimQuery(
vector_field_name="@embedding",
vector_data="$vector",
)
hybrid_query = HybridQuery(search_query, vsim_query)
postprocessing_config = HybridPostProcessingConfig()
postprocessing_config.load("@description", "@color", "@price")
postprocessing_config.apply(price_discount="@price - (@price * 0.1)")
postprocessing_config.limit(0, 3)
params_substitution = {
"vector": "abcd1234abcd5678",
"color_criteria": "red",
}
res = await decoded_r.ft().hybrid_search(
query=hybrid_query,
post_processing=postprocessing_config,
params_substitution=params_substitution,
timeout=10,
)
expected_results = [
{
"description": b"red shoes",
"color": b"red",
"price": b"15",
"price_discount": b"13.5",
},
{
"description": b"red dress",
"color": b"red",
"price": b"17",
"price_discount": b"15.3",
},
{
"description": b"red shoes",
"color": b"red",
"price": b"16",
"price_discount": b"14.4",
},
]
if is_resp2_connection(decoded_r):
assert len(res.results) == 3
assert res.results == expected_results
assert res.warnings == []
assert res.execution_time > 0
else:
assert len(res["results"]) == 3
assert res["results"] == self._convert_dict_values_to_str(expected_results)
assert res["warnings"] == []
assert res["execution_time"] > 0
@pytest.mark.redismod
@skip_if_server_version_lt("8.3.224")
async def test_hybrid_search_query_with_limit(self, decoded_r):
# Create index and add data
await self._create_hybrid_search_index(decoded_r)
await self._add_data_for_hybrid_search(decoded_r, items_sets=10)
# set search query
search_query = HybridSearchQuery("@color:{red}")
vsim_query = HybridVsimQuery(
vector_field_name="@embedding",
vector_data=np.array([1, 2, 7, 6], dtype=np.float32).tobytes(),
)
hybrid_query = HybridQuery(search_query, vsim_query)
postprocessing_config = HybridPostProcessingConfig()
postprocessing_config.limit(0, 3)
res = await decoded_r.ft().hybrid_search(
query=hybrid_query, post_processing=postprocessing_config, timeout=10
)
if is_resp2_connection(decoded_r):
assert len(res.results) == 3
assert res.warnings == []
else:
assert len(res["results"]) == 3
assert res["warnings"] == []
@pytest.mark.redismod
@skip_if_server_version_lt("8.3.224")
async def test_hybrid_search_query_with_load_apply_and_sortby(self, decoded_r):
# Create index and add data
await self._create_hybrid_search_index(decoded_r)
await self._add_data_for_hybrid_search(decoded_r, items_sets=1)
# set search query
search_query = HybridSearchQuery("@color:{red|green}")
vsim_query = HybridVsimQuery(
vector_field_name="@embedding",
vector_data=np.array([1, 2, 7, 6], dtype=np.float32).tobytes(),
)
hybrid_query = HybridQuery(search_query, vsim_query)
postprocessing_config = HybridPostProcessingConfig()
postprocessing_config.load("@color", "@price")
postprocessing_config.apply(price_discount="@price - (@price * 0.1)")
postprocessing_config.sort_by(
SortbyField("@price_discount", asc=False), SortbyField("@color", asc=True)
)
postprocessing_config.limit(0, 5)
res = await decoded_r.ft().hybrid_search(
query=hybrid_query, post_processing=postprocessing_config, timeout=10
)
expected_results = [
{"color": b"orange", "price": b"18", "price_discount": b"16.2"},
{"color": b"red", "price": b"17", "price_discount": b"15.3"},
{"color": b"green", "price": b"16", "price_discount": b"14.4"},
{"color": b"black", "price": b"15", "price_discount": b"13.5"},
{"color": b"red", "price": b"15", "price_discount": b"13.5"},
]
if is_resp2_connection(decoded_r):
assert res.total_results >= 5
assert len(res.results) == 5
# the order here should match because of the sort
assert res.results == expected_results
assert res.warnings == []
assert res.execution_time > 0
else:
assert res["total_results"] >= 5
assert len(res["results"]) == 5
# the order here should match because of the sort
assert res["results"] == self._convert_dict_values_to_str(expected_results)
assert res["warnings"] == []
assert res["execution_time"] > 0
@pytest.mark.redismod
@skip_if_server_version_lt("8.3.224")
async def test_hybrid_search_query_with_timeout(self, decoded_r):
dim = 128
# Create index and add data
await self._create_hybrid_search_index(decoded_r, dim=dim)
await self._add_data_for_hybrid_search(
decoded_r,
items_sets=5000,
dim_for_random_data=dim,
use_random_str_data=True,
)
# set search query
search_query = HybridSearchQuery("*")
vsim_query = HybridVsimQuery(
vector_field_name="@embedding-hnsw",
vector_data="abcd" * dim,
)
vsim_query.vsim_method_params(VectorSearchMethods.KNN, K=1000)
vsim_query.filter(
HybridFilter(
"((@price:[15 16] @size:[10 11]) | (@price:[13 15] @size:[11 12])) @description:(shoes) -@description:(green)"
)
)
hybrid_query = HybridQuery(search_query, vsim_query)
combine_method = CombineResultsMethod(CombinationMethods.RRF, WINDOW=1000)
timeout = 5000 # 5 second timeout
res = await decoded_r.ft().hybrid_search(
query=hybrid_query, combine_method=combine_method, timeout=timeout
)
if is_resp2_connection(decoded_r):
assert len(res.results) > 0
assert res.warnings == []
assert res.execution_time > 0 and res.execution_time < timeout
else:
assert len(res["results"]) > 0
assert res["warnings"] == []
assert res["execution_time"] > 0 and res["execution_time"] < timeout
res = await decoded_r.ft().hybrid_search(
query=hybrid_query, timeout=1
) # 1 ms timeout
if is_resp2_connection(decoded_r):
assert (
b"Timeout limit was reached (VSIM)" in res.warnings
or b"Timeout limit was reached (SEARCH)" in res.warnings
)
else:
assert (
"Timeout limit was reached (VSIM)" in res["warnings"]
or "Timeout limit was reached (SEARCH)" in res["warnings"]
)
@pytest.mark.redismod
@skip_if_server_version_lt("8.3.224")
async def test_hybrid_search_query_with_load_and_groupby(self, decoded_r):
# Create index and add data
await self._create_hybrid_search_index(decoded_r)
await self._add_data_for_hybrid_search(decoded_r, items_sets=10)
# set search query
search_query = HybridSearchQuery("@color:{red|green}")
vsim_query = HybridVsimQuery(
vector_field_name="@embedding",
vector_data=np.array([1, 2, 7, 6], dtype=np.float32).tobytes(),
)
hybrid_query = HybridQuery(search_query, vsim_query)
postprocessing_config = HybridPostProcessingConfig()
postprocessing_config.load("@color", "@price", "@size", "@item_type")
postprocessing_config.limit(0, 4)
postprocessing_config.group_by(
["@price"],
reducers.count_distinct("@color").alias("colors_count"),
)
postprocessing_config.sort_by(SortbyField("@price", asc=True))
res = await decoded_r.ft().hybrid_search(
query=hybrid_query, post_processing=postprocessing_config, timeout=10
)
expected_results = [
{"price": b"15", "colors_count": b"2"},
{"price": b"16", "colors_count": b"2"},
{"price": b"17", "colors_count": b"2"},
{"price": b"18", "colors_count": b"2"},
]
if is_resp2_connection(decoded_r):
assert len(res.results) == 4
assert res.results == expected_results
assert res.warnings == []
else:
assert len(res["results"]) == 4
assert res["results"] == self._convert_dict_values_to_str(expected_results)
assert res["warnings"] == []
postprocessing_config = HybridPostProcessingConfig()
postprocessing_config.load("@color", "@price", "@size", "@item_type")
postprocessing_config.limit(0, 6)
postprocessing_config.sort_by(
SortbyField("@price", asc=True),
SortbyField("@item_type", asc=True),
)
postprocessing_config.group_by(
["@price", "@item_type"],
reducers.count_distinct("@color").alias("unique_colors_count"),
)
res = await decoded_r.ft().hybrid_search(
query=hybrid_query, post_processing=postprocessing_config, timeout=1000
)
expected_results = [
{"price": b"15", "item_type": b"dress", "unique_colors_count": b"1"},
{"price": b"15", "item_type": b"shoes", "unique_colors_count": b"2"},
{"price": b"16", "item_type": b"dress", "unique_colors_count": b"1"},
{"price": b"16", "item_type": b"shoes", "unique_colors_count": b"2"},
{"price": b"17", "item_type": b"dress", "unique_colors_count": b"1"},
{"price": b"17", "item_type": b"shoes", "unique_colors_count": b"2"},
]
if is_resp2_connection(decoded_r):
assert len(res.results) == 6
assert res.results == expected_results
assert res.warnings == []
else:
assert len(res["results"]) == 6
assert res["results"] == self._convert_dict_values_to_str(expected_results)
assert res["warnings"] == []
@pytest.mark.redismod
@skip_if_server_version_lt("8.3.224")
async def test_hybrid_search_query_with_cursor(self, decoded_r):
# Create index and add data
await self._create_hybrid_search_index(decoded_r)
await self._add_data_for_hybrid_search(decoded_r, items_sets=10)
# set search query
search_query = HybridSearchQuery("@color:{red|green}")
vsim_query = HybridVsimQuery(
vector_field_name="@embedding",
vector_data=np.array([1, 2, 7, 6], dtype=np.float32).tobytes(),
)
hybrid_query = HybridQuery(search_query, vsim_query)
res = await decoded_r.ft().hybrid_search(
query=hybrid_query,
cursor=HybridCursorQuery(count=5, max_idle=100),
timeout=10,
)
if is_resp2_connection(decoded_r):
assert isinstance(res, HybridCursorResult)
assert res.search_cursor_id > 0
assert res.vsim_cursor_id > 0
search_cursor = aggregations.Cursor(res.search_cursor_id)
vsim_cursor = aggregations.Cursor(res.vsim_cursor_id)
else:
assert res["SEARCH"] > 0
assert res["VSIM"] > 0
search_cursor = aggregations.Cursor(res["SEARCH"])
vsim_cursor = aggregations.Cursor(res["VSIM"])
search_res_from_cursor = await decoded_r.ft().aggregate(query=search_cursor)
if is_resp2_connection(decoded_r):
assert len(search_res_from_cursor.rows) == 5
else:
assert len(search_res_from_cursor[0]["results"]) == 5
vsim_res_from_cursor = await decoded_r.ft().aggregate(query=vsim_cursor)
if is_resp2_connection(decoded_r):
assert len(vsim_res_from_cursor.rows) == 5
else:
assert len(vsim_res_from_cursor[0]["results"]) == 5
| TestHybridSearch |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/clipboard/base.py | {
"start": 622,
"end": 1415
} | class ____(metaclass=ABCMeta):
"""
Abstract baseclass for clipboards.
(An implementation can be in memory, it can share the X11 or Windows
keyboard, or can be persistent.)
"""
@abstractmethod
def set_data(self, data: ClipboardData) -> None:
"""
Set data to the clipboard.
:param data: :class:`~.ClipboardData` instance.
"""
def set_text(self, text: str) -> None: # Not abstract.
"""
Shortcut for setting plain text on clipboard.
"""
self.set_data(ClipboardData(text))
def rotate(self) -> None:
"""
For Emacs mode, rotate the kill ring.
"""
@abstractmethod
def get_data(self) -> ClipboardData:
"""
Return clipboard data.
"""
| Clipboard |
python | ray-project__ray | python/ray/util/client/common.py | {
"start": 22074,
"end": 22996
} | class ____:
"""An ID generated by the client for objects not yet given an ObjectRef"""
def __init__(self, id: bytes):
assert len(id) != 0
self.id = id
@staticmethod
def generate_id() -> "ClientSideRefID":
tid = uuid.uuid4()
return ClientSideRefID(b"\xcc" + tid.bytes)
def remote_decorator(options: Optional[Dict[str, Any]]):
def decorator(function_or_class) -> ClientStub:
if inspect.isfunction(function_or_class) or is_cython(function_or_class):
return ClientRemoteFunc(function_or_class, options=options)
elif inspect.isclass(function_or_class):
return ClientActorClass(function_or_class, options=options)
else:
raise TypeError(
"The @ray.remote decorator must be applied to "
"either a function or to a class."
)
return decorator
@dataclass
| ClientSideRefID |
python | pennersr__django-allauth | allauth/socialaccount/providers/apple/provider.py | {
"start": 465,
"end": 1010
} | class ____(ProviderAccount):
def to_str(self):
email = self.account.extra_data.get("email")
if email and not email.lower().endswith("@privaterelay.appleid.com"):
return email
name = self.account.extra_data.get("name") or {}
if name.get("firstName") or name.get("lastName"):
full_name = f"{name['firstName'] or ''} {name['lastName'] or ''}"
full_name = full_name.strip()
if full_name:
return full_name
return super().to_str()
| AppleAccount |
python | ApeWorX__ape | src/ape/managers/converters.py | {
"start": 2705,
"end": 2962
} | class ____(ConverterAPI):
def is_convertible(self, value: Any) -> bool:
return isinstance(value, BaseAddress)
def convert(self, value: BaseAddress) -> int:
return self.conversion_manager.convert(value.address, int)
| AccountIntConverter |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/constructor28.py | {
"start": 512,
"end": 706
} | class ____(ParentB[T]):
def __init__(self, a: T) -> None: ...
def func2(arg1: ParentB[T], arg2: ParentB[T]) -> T: ...
# This should generate an error.
func2(ChildB(""), ChildB(1.2))
| ChildB |
python | django__django | django/core/mail/backends/filebased.py | {
"start": 253,
"end": 2353
} | class ____(ConsoleEmailBackend):
def __init__(self, *args, file_path=None, **kwargs):
self._fname = None
if file_path is not None:
self.file_path = file_path
else:
self.file_path = getattr(settings, "EMAIL_FILE_PATH", None)
self.file_path = os.path.abspath(self.file_path)
try:
os.makedirs(self.file_path, exist_ok=True)
except FileExistsError:
raise ImproperlyConfigured(
"Path for saving email messages exists, but is not a directory: %s"
% self.file_path
)
except OSError as err:
raise ImproperlyConfigured(
"Could not create directory for saving email messages: %s (%s)"
% (self.file_path, err)
)
# Make sure that self.file_path is writable.
if not os.access(self.file_path, os.W_OK):
raise ImproperlyConfigured(
"Could not write to directory: %s" % self.file_path
)
# Finally, call super().
# Since we're using the console-based backend as a base,
# force the stream to be None, so we don't default to stdout
kwargs["stream"] = None
super().__init__(*args, **kwargs)
def write_message(self, message):
self.stream.write(message.message().as_bytes() + b"\n")
self.stream.write(b"-" * 79)
self.stream.write(b"\n")
def _get_filename(self):
"""Return a unique file name."""
if self._fname is None:
timestamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
fname = "%s-%s.log" % (timestamp, abs(id(self)))
self._fname = os.path.join(self.file_path, fname)
return self._fname
def open(self):
if self.stream is None:
self.stream = open(self._get_filename(), "ab")
return True
return False
def close(self):
try:
if self.stream is not None:
self.stream.close()
finally:
self.stream = None
| EmailBackend |
python | ray-project__ray | rllib/algorithms/dqn/dqn_torch_policy.py | {
"start": 1411,
"end": 4403
} | class ____:
def __init__(
self,
q_t_selected: TensorType,
q_logits_t_selected: TensorType,
q_tp1_best: TensorType,
q_probs_tp1_best: TensorType,
importance_weights: TensorType,
rewards: TensorType,
done_mask: TensorType,
gamma=0.99,
n_step=1,
num_atoms=1,
v_min=-10.0,
v_max=10.0,
loss_fn=huber_loss,
):
if num_atoms > 1:
# Distributional Q-learning which corresponds to an entropy loss
z = torch.arange(0.0, num_atoms, dtype=torch.float32).to(rewards.device)
z = v_min + z * (v_max - v_min) / float(num_atoms - 1)
# (batch_size, 1) * (1, num_atoms) = (batch_size, num_atoms)
r_tau = torch.unsqueeze(rewards, -1) + gamma**n_step * torch.unsqueeze(
1.0 - done_mask, -1
) * torch.unsqueeze(z, 0)
r_tau = torch.clamp(r_tau, v_min, v_max)
b = (r_tau - v_min) / ((v_max - v_min) / float(num_atoms - 1))
lb = torch.floor(b)
ub = torch.ceil(b)
# Indispensable judgement which is missed in most implementations
# when b happens to be an integer, lb == ub, so pr_j(s', a*) will
# be discarded because (ub-b) == (b-lb) == 0.
floor_equal_ceil = ((ub - lb) < 0.5).float()
# (batch_size, num_atoms, num_atoms)
l_project = F.one_hot(lb.long(), num_atoms)
# (batch_size, num_atoms, num_atoms)
u_project = F.one_hot(ub.long(), num_atoms)
ml_delta = q_probs_tp1_best * (ub - b + floor_equal_ceil)
mu_delta = q_probs_tp1_best * (b - lb)
ml_delta = torch.sum(l_project * torch.unsqueeze(ml_delta, -1), dim=1)
mu_delta = torch.sum(u_project * torch.unsqueeze(mu_delta, -1), dim=1)
m = ml_delta + mu_delta
# Rainbow paper claims that using this cross entropy loss for
# priority is robust and insensitive to `prioritized_replay_alpha`
self.td_error = softmax_cross_entropy_with_logits(
logits=q_logits_t_selected, labels=m.detach()
)
self.loss = torch.mean(self.td_error * importance_weights)
self.stats = {
# TODO: better Q stats for dist dqn
}
else:
q_tp1_best_masked = (1.0 - done_mask) * q_tp1_best
# compute RHS of bellman equation
q_t_selected_target = rewards + gamma**n_step * q_tp1_best_masked
# compute the error (potentially clipped)
self.td_error = q_t_selected - q_t_selected_target.detach()
self.loss = torch.mean(importance_weights.float() * loss_fn(self.td_error))
self.stats = {
"mean_q": torch.mean(q_t_selected),
"min_q": torch.min(q_t_selected),
"max_q": torch.max(q_t_selected),
}
@OldAPIStack
| QLoss |
python | django__django | tests/admin_views/admin.py | {
"start": 13864,
"end": 14110
} | class ____(admin.TabularInline):
model = Link
extra = 1
readonly_fields = ("posted", "multiline", "readonly_link_content")
@admin.display
def multiline(self, instance):
return "InlineMultiline\ntest\nstring"
| LinkInline |
python | openai__openai-python | src/openai/resources/chat/completions/messages.py | {
"start": 7783,
"end": 8010
} | class ____:
def __init__(self, messages: AsyncMessages) -> None:
self._messages = messages
self.list = async_to_streamed_response_wrapper(
messages.list,
)
| AsyncMessagesWithStreamingResponse |
python | google__jax | tests/mosaic/gpu_test_multidevice.py | {
"start": 1294,
"end": 1943
} | class ____(parameterized.TestCase):
def setUp(self):
if not HAS_MOSAIC_GPU:
self.skipTest("jaxlib built without Mosaic GPU")
if (not jtu.test_device_matches(["cuda"]) or
not jtu.is_cuda_compute_capability_at_least("9.0")):
self.skipTest("Only works on GPU with capability >= sm90")
super().setUp()
self.prng = np.random.default_rng(1234)
self.context = mlir.make_ir_context()
if mgpu_dialect is not None:
mgpu_dialect.register_dialect(self.context)
self.enter_context(config.traceback_filtering("off"))
self.enter_context(self.context)
self.enter_context(ir.Location.unknown())
| TestCase |
python | pypa__pip | tests/functional/test_download.py | {
"start": 11754,
"end": 40652
} | class ____:
"""
"pip download --platform" downloads a .whl archive supported for
manylinux platforms.
"""
@pytest.mark.parametrize(
"platform",
[
"linux_x86_64",
"manylinux1_x86_64",
"manylinux2010_x86_64",
"manylinux2014_x86_64",
],
)
def test_download_universal(
self, platform: str, script: PipTestEnvironment, data: TestData
) -> None:
"""
Universal wheels are returned even for specific platforms.
"""
fake_wheel(data, "fake-1.0-py2.py3-none-any.whl")
result = script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--only-binary=:all:",
"--dest",
".",
"--platform",
platform,
"fake",
)
result.did_create(Path("scratch") / "fake-1.0-py2.py3-none-any.whl")
@pytest.mark.parametrize(
"wheel_abi,platform",
[
("manylinux1_x86_64", "manylinux1_x86_64"),
("manylinux1_x86_64", "manylinux2010_x86_64"),
("manylinux2010_x86_64", "manylinux2010_x86_64"),
("manylinux1_x86_64", "manylinux2014_x86_64"),
("manylinux2010_x86_64", "manylinux2014_x86_64"),
("manylinux2014_x86_64", "manylinux2014_x86_64"),
],
)
def test_download_compatible_manylinuxes(
self,
wheel_abi: str,
platform: str,
script: PipTestEnvironment,
data: TestData,
) -> None:
"""
Earlier manylinuxes are compatible with later manylinuxes.
"""
wheel = f"fake-1.0-py2.py3-none-{wheel_abi}.whl"
fake_wheel(data, wheel)
result = script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--only-binary=:all:",
"--dest",
".",
"--platform",
platform,
"fake",
)
result.did_create(Path("scratch") / wheel)
def test_explicit_platform_only(
self, data: TestData, script: PipTestEnvironment
) -> None:
"""
When specifying the platform, manylinux1 needs to be the
explicit platform--it won't ever be added to the compatible
tags.
"""
fake_wheel(data, "fake-1.0-py2.py3-none-linux_x86_64.whl")
script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--only-binary=:all:",
"--dest",
".",
"--platform",
"linux_x86_64",
"fake",
)
def test_download__python_version(script: PipTestEnvironment, data: TestData) -> None:
"""
Test using "pip download --python-version" to download a .whl archive
supported for a specific interpreter
"""
fake_wheel(data, "fake-1.0-py2.py3-none-any.whl")
result = script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--only-binary=:all:",
"--dest",
".",
"--python-version",
"2",
"fake",
)
result.did_create(Path("scratch") / "fake-1.0-py2.py3-none-any.whl")
result = script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--only-binary=:all:",
"--dest",
".",
"--python-version",
"3",
"fake",
)
result = script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--only-binary=:all:",
"--dest",
".",
"--python-version",
"27",
"fake",
)
result = script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--only-binary=:all:",
"--dest",
".",
"--python-version",
"33",
"fake",
)
data.reset()
fake_wheel(data, "fake-1.0-py2-none-any.whl")
fake_wheel(data, "fake-2.0-py3-none-any.whl")
# No py3 provided for version 1.
result = script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--only-binary=:all:",
"--dest",
".",
"--python-version",
"3",
"fake==1.0",
expect_error=True,
)
result = script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--only-binary=:all:",
"--dest",
".",
"--python-version",
"2",
"fake",
)
result.did_create(Path("scratch") / "fake-1.0-py2-none-any.whl")
result = script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--only-binary=:all:",
"--dest",
".",
"--python-version",
"26",
"fake",
)
result = script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--only-binary=:all:",
"--dest",
".",
"--python-version",
"3",
"fake",
)
result.did_create(Path("scratch") / "fake-2.0-py3-none-any.whl")
def make_wheel_with_python_requires(
script: PipTestEnvironment, package_name: str, python_requires: str
) -> Path:
"""
Create a wheel using the given python_requires.
:return: the path to the wheel file.
"""
package_dir = script.scratch_path / package_name
package_dir.mkdir()
text = textwrap.dedent(
"""\
from setuptools import setup
setup(name='{}',
python_requires='{}',
version='1.0')
"""
).format(package_name, python_requires)
package_dir.joinpath("setup.py").write_text(text)
script.run(
"python",
"setup.py",
"bdist_wheel",
"--universal",
cwd=package_dir,
)
file_name = f"{package_name}-1.0-py2.py3-none-any.whl"
return package_dir / "dist" / file_name
def test_download__python_version_used_for_python_requires(
script: PipTestEnvironment, data: TestData
) -> None:
"""
Test that --python-version is used for the Requires-Python check.
"""
wheel_path = make_wheel_with_python_requires(
script,
"mypackage",
python_requires="==3.2",
)
wheel_dir = os.path.dirname(wheel_path)
def make_args(python_version: str) -> list[str]:
return [
"download",
"--no-index",
"--find-links",
wheel_dir,
"--only-binary=:all:",
"--dest",
".",
"--python-version",
python_version,
"mypackage==1.0",
]
args = make_args("33")
result = script.pip(*args, expect_error=True)
expected_err = (
"ERROR: Package 'mypackage' requires a different Python: 3.3.0 not in '==3.2'"
)
assert expected_err in result.stderr, f"stderr: {result.stderr}"
# Now try with a --python-version that satisfies the Requires-Python.
args = make_args("32")
script.pip(*args) # no exception
def test_download_ignore_requires_python_dont_fail_with_wrong_python(
script: PipTestEnvironment,
) -> None:
"""
Test that --ignore-requires-python ignores Requires-Python check.
"""
wheel_path = make_wheel_with_python_requires(
script,
"mypackage",
python_requires="==999",
)
wheel_dir = os.path.dirname(wheel_path)
result = script.pip(
"download",
"--ignore-requires-python",
"--no-index",
"--find-links",
wheel_dir,
"--only-binary=:all:",
"--dest",
".",
"mypackage==1.0",
)
result.did_create(Path("scratch") / "mypackage-1.0-py2.py3-none-any.whl")
def test_download_specify_abi(script: PipTestEnvironment, data: TestData) -> None:
"""
Test using "pip download --abi" to download a .whl archive
supported for a specific abi
"""
fake_wheel(data, "fake-1.0-py2.py3-none-any.whl")
result = script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--only-binary=:all:",
"--dest",
".",
"--implementation",
"fk",
"--abi",
"fake_abi",
"fake",
)
result.did_create(Path("scratch") / "fake-1.0-py2.py3-none-any.whl")
result = script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--only-binary=:all:",
"--dest",
".",
"--implementation",
"fk",
"--abi",
"none",
"fake",
)
result = script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--only-binary=:all:",
"--dest",
".",
"--abi",
"cp27m",
"fake",
)
data.reset()
fake_wheel(data, "fake-1.0-fk2-fakeabi-fake_platform.whl")
result = script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--only-binary=:all:",
"--dest",
".",
"--python-version",
"2",
"--implementation",
"fk",
"--platform",
"fake_platform",
"--abi",
"fakeabi",
"fake",
)
result.did_create(Path("scratch") / "fake-1.0-fk2-fakeabi-fake_platform.whl")
result = script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--only-binary=:all:",
"--dest",
".",
"--implementation",
"fk",
"--platform",
"fake_platform",
"--abi",
"none",
"fake",
expect_error=True,
)
data.reset()
fake_wheel(data, "fake-1.0-fk2-otherabi-fake_platform.whl")
result = script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--only-binary=:all:",
"--dest",
".",
"--python-version",
"2",
"--implementation",
"fk",
"--platform",
"fake_platform",
"--abi",
"fakeabi",
"--abi",
"otherabi",
"--abi",
"none",
"fake",
)
result.did_create(Path("scratch") / "fake-1.0-fk2-otherabi-fake_platform.whl")
def test_download_specify_implementation(
script: PipTestEnvironment, data: TestData
) -> None:
"""
Test using "pip download --abi" to download a .whl archive
supported for a specific abi
"""
fake_wheel(data, "fake-1.0-py2.py3-none-any.whl")
result = script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--only-binary=:all:",
"--dest",
".",
"--implementation",
"fk",
"fake",
)
result.did_create(Path("scratch") / "fake-1.0-py2.py3-none-any.whl")
data.reset()
fake_wheel(data, "fake-1.0-fk3-none-any.whl")
result = script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--only-binary=:all:",
"--dest",
".",
"--implementation",
"fk",
"--python-version",
"3",
"fake",
)
result.did_create(Path("scratch") / "fake-1.0-fk3-none-any.whl")
result = script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--only-binary=:all:",
"--dest",
".",
"--implementation",
"fk",
"--python-version",
"2",
"fake",
expect_error=True,
)
def test_download_exit_status_code_when_no_requirements(
script: PipTestEnvironment,
) -> None:
"""
Test download exit status code when no requirements specified
"""
result = script.pip("download", expect_error=True)
assert "You must give at least one requirement to download" in result.stderr
assert result.returncode == ERROR
def test_download_exit_status_code_when_blank_requirements_file(
script: PipTestEnvironment,
) -> None:
"""
Test download exit status code when blank requirements file specified
"""
script.scratch_path.joinpath("blank.txt").write_text("\n")
script.pip("download", "-r", "blank.txt")
def test_download_prefer_binary_when_tarball_higher_than_wheel(
script: PipTestEnvironment, data: TestData
) -> None:
fake_wheel(data, "source-0.8-py2.py3-none-any.whl")
result = script.pip(
"download",
"--prefer-binary",
"--no-index",
"-f",
data.packages,
"-d",
".",
"source",
)
result.did_create(Path("scratch") / "source-0.8-py2.py3-none-any.whl")
result.did_not_create(Path("scratch") / "source-1.0.tar.gz")
def test_prefer_binary_tarball_higher_than_wheel_req_file(
script: PipTestEnvironment, data: TestData
) -> None:
fake_wheel(data, "source-0.8-py2.py3-none-any.whl")
script.scratch_path.joinpath("test-req.txt").write_text(
textwrap.dedent(
"""
--prefer-binary
source
"""
)
)
result = script.pip(
"download",
"-r",
script.scratch_path / "test-req.txt",
"--no-index",
"-f",
data.packages,
"-d",
".",
)
result.did_create(Path("scratch") / "source-0.8-py2.py3-none-any.whl")
result.did_not_create(Path("scratch") / "source-1.0.tar.gz")
def test_download_prefer_binary_when_wheel_doesnt_satisfy_req(
script: PipTestEnvironment, data: TestData
) -> None:
fake_wheel(data, "source-0.8-py2.py3-none-any.whl")
script.scratch_path.joinpath("test-req.txt").write_text(
textwrap.dedent(
"""
source>0.9
"""
)
)
result = script.pip(
"download",
"--no-build-isolation",
"--prefer-binary",
"--no-index",
"-f",
data.packages,
"-d",
".",
"-r",
script.scratch_path / "test-req.txt",
)
result.did_create(Path("scratch") / "source-1.0.tar.gz")
result.did_not_create(Path("scratch") / "source-0.8-py2.py3-none-any.whl")
def test_prefer_binary_when_wheel_doesnt_satisfy_req_req_file(
script: PipTestEnvironment, data: TestData
) -> None:
fake_wheel(data, "source-0.8-py2.py3-none-any.whl")
script.scratch_path.joinpath("test-req.txt").write_text(
textwrap.dedent(
"""
--prefer-binary
source>0.9
"""
)
)
result = script.pip(
"download",
"--no-build-isolation",
"--no-index",
"-f",
data.packages,
"-d",
".",
"-r",
script.scratch_path / "test-req.txt",
)
result.did_create(Path("scratch") / "source-1.0.tar.gz")
result.did_not_create(Path("scratch") / "source-0.8-py2.py3-none-any.whl")
def test_download_prefer_binary_when_only_tarball_exists(
script: PipTestEnvironment, data: TestData
) -> None:
result = script.pip(
"download",
"--no-build-isolation",
"--prefer-binary",
"--no-index",
"-f",
data.packages,
"-d",
".",
"source",
)
result.did_create(Path("scratch") / "source-1.0.tar.gz")
def test_prefer_binary_when_only_tarball_exists_req_file(
script: PipTestEnvironment, data: TestData
) -> None:
script.scratch_path.joinpath("test-req.txt").write_text(
textwrap.dedent(
"""
--prefer-binary
source
"""
)
)
result = script.pip(
"download",
"--no-build-isolation",
"--no-index",
"-f",
data.packages,
"-d",
".",
"-r",
script.scratch_path / "test-req.txt",
)
result.did_create(Path("scratch") / "source-1.0.tar.gz")
@pytest.fixture(scope="session")
def shared_script(
tmpdir_factory: pytest.TempPathFactory, script_factory: ScriptFactory
) -> PipTestEnvironment:
tmpdir = tmpdir_factory.mktemp("download_shared_script")
script = script_factory(tmpdir.joinpath("workspace"))
return script
def test_download_file_url(
shared_script: PipTestEnvironment, shared_data: TestData, tmpdir: Path
) -> None:
download_dir = tmpdir / "download"
download_dir.mkdir()
downloaded_path = download_dir / "simple-1.0.tar.gz"
simple_pkg = shared_data.packages / "simple-1.0.tar.gz"
shared_script.pip(
"download",
"--no-build-isolation",
"-d",
str(download_dir),
"--no-index",
simple_pkg.as_uri(),
)
assert downloaded_path.exists()
assert simple_pkg.read_bytes() == downloaded_path.read_bytes()
def test_download_file_url_existing_ok_download(
shared_script: PipTestEnvironment, shared_data: TestData, tmpdir: Path
) -> None:
download_dir = tmpdir / "download"
download_dir.mkdir()
downloaded_path = download_dir / "simple-1.0.tar.gz"
fake_existing_package = shared_data.packages / "simple-2.0.tar.gz"
shutil.copy(str(fake_existing_package), str(downloaded_path))
downloaded_path_bytes = downloaded_path.read_bytes()
simple_pkg = shared_data.packages / "simple-1.0.tar.gz"
url = f"{simple_pkg.as_uri()}#sha256={sha256(downloaded_path_bytes).hexdigest()}"
shared_script.pip(
"download",
"--no-build-isolation",
"-d",
str(download_dir),
url,
"--disable-pip-version-check",
)
assert downloaded_path_bytes == downloaded_path.read_bytes()
def test_download_file_url_existing_bad_download(
shared_script: PipTestEnvironment, shared_data: TestData, tmpdir: Path
) -> None:
download_dir = tmpdir / "download"
download_dir.mkdir()
downloaded_path = download_dir / "simple-1.0.tar.gz"
fake_existing_package = shared_data.packages / "simple-2.0.tar.gz"
shutil.copy(str(fake_existing_package), str(downloaded_path))
simple_pkg = shared_data.packages / "simple-1.0.tar.gz"
simple_pkg_bytes = simple_pkg.read_bytes()
url = f"{simple_pkg.as_uri()}#sha256={sha256(simple_pkg_bytes).hexdigest()}"
result = shared_script.pip(
"download",
"--no-build-isolation",
"-d",
str(download_dir),
url,
allow_stderr_warning=True, # bad hash
)
assert simple_pkg_bytes == downloaded_path.read_bytes()
assert "WARNING: Previously-downloaded file" in result.stderr
assert "has bad hash. Re-downloading." in result.stderr
def test_download_http_url_bad_hash(
shared_script: PipTestEnvironment,
shared_data: TestData,
tmpdir: Path,
mock_server: MockServer,
) -> None:
"""
If already-downloaded file has bad checksum, re-download.
"""
download_dir = tmpdir / "download"
download_dir.mkdir()
downloaded_path = download_dir / "simple-1.0.tar.gz"
fake_existing_package = shared_data.packages / "simple-2.0.tar.gz"
shutil.copy(str(fake_existing_package), str(downloaded_path))
simple_pkg = shared_data.packages / "simple-1.0.tar.gz"
simple_pkg_bytes = simple_pkg.read_bytes()
digest = sha256(simple_pkg_bytes).hexdigest()
mock_server.set_responses([file_response(simple_pkg)])
mock_server.start()
base_address = f"http://{mock_server.host}:{mock_server.port}"
url = f"{base_address}/simple-1.0.tar.gz#sha256={digest}"
result = shared_script.pip(
"download",
"--no-build-isolation",
"-d",
str(download_dir),
url,
allow_stderr_warning=True, # bad hash
)
assert simple_pkg_bytes == downloaded_path.read_bytes()
assert "WARNING: Previously-downloaded file" in result.stderr
assert "has bad hash. Re-downloading." in result.stderr
mock_server.stop()
requests = mock_server.get_requests()
assert len(requests) == 1
assert requests[0]["PATH_INFO"] == "/simple-1.0.tar.gz"
assert requests[0]["HTTP_ACCEPT_ENCODING"] == "identity"
def test_download_editable(
script: PipTestEnvironment, data: TestData, tmpdir: Path
) -> None:
"""
Test 'pip download' of editables in requirement file.
"""
editable_path = str(data.src / "simplewheel-1.0").replace(os.path.sep, "/")
requirements_path = tmpdir / "requirements.txt"
requirements_path.write_text("-e " + editable_path + "\n")
download_dir = tmpdir / "download_dir"
script.pip(
"download",
"--no-build-isolation",
"--no-deps",
"-r",
str(requirements_path),
"-d",
str(download_dir),
)
downloads = os.listdir(download_dir)
assert len(downloads) == 1
assert downloads[0].endswith(".zip")
@pytest.fixture
def download_local_html_index(
script: PipTestEnvironment,
html_index_for_packages: Path,
tmpdir: Path,
) -> Callable[..., tuple[TestPipResult, Path]]:
"""Execute `pip download` against a generated PyPI index."""
download_dir = tmpdir / "download_dir"
def run_for_generated_index(
args: list[str],
allow_error: bool = False,
) -> tuple[TestPipResult, Path]:
"""
Produce a PyPI directory structure pointing to the specified packages, then
execute `pip download -i ...` pointing to our generated index.
"""
pip_args = [
"download",
"--no-build-isolation",
"-d",
str(download_dir),
"-i",
path_to_url(str(html_index_for_packages)),
*args,
]
result = script.pip(*pip_args, allow_error=allow_error)
return (result, download_dir)
return run_for_generated_index
@pytest.fixture
def download_server_html_index(
script: PipTestEnvironment,
tmpdir: Path,
html_index_with_onetime_server: http.server.ThreadingHTTPServer,
) -> Callable[..., tuple[TestPipResult, Path]]:
"""Execute `pip download` against a generated PyPI index."""
download_dir = tmpdir / "download_dir"
def run_for_generated_index(
args: list[str],
allow_error: bool = False,
) -> tuple[TestPipResult, Path]:
"""
Produce a PyPI directory structure pointing to the specified packages, then
execute `pip download -i ...` pointing to our generated index.
"""
pip_args = [
"download",
"--no-build-isolation",
"-d",
str(download_dir),
"-i",
"http://localhost:8000",
*args,
]
result = script.pip(*pip_args, allow_error=allow_error)
return (result, download_dir)
return run_for_generated_index
@pytest.mark.parametrize(
"requirement_to_download, expected_outputs",
[
("simple2==1.0", ["simple-1.0.tar.gz", "simple2-1.0.tar.gz"]),
("simple==2.0", ["simple-2.0.tar.gz"]),
(
"colander",
["colander-0.9.9-py2.py3-none-any.whl", "translationstring-1.1.tar.gz"],
),
(
"compilewheel",
["compilewheel-1.0-py2.py3-none-any.whl", "simple-1.0.tar.gz"],
),
],
)
def test_download_metadata(
download_local_html_index: Callable[..., tuple[TestPipResult, Path]],
requirement_to_download: str,
expected_outputs: list[str],
) -> None:
"""Verify that if a data-dist-info-metadata attribute is present, then it is used
instead of the actual dist's METADATA."""
_, download_dir = download_local_html_index(
[requirement_to_download],
)
assert sorted(os.listdir(download_dir)) == expected_outputs
@pytest.mark.parametrize(
"requirement_to_download, expected_outputs, doubled_path",
[
(
"simple2==1.0",
["simple-1.0.tar.gz", "simple2-1.0.tar.gz"],
"/simple2/simple2-1.0.tar.gz",
),
("simple==2.0", ["simple-2.0.tar.gz"], "/simple/simple-2.0.tar.gz"),
(
"colander",
["colander-0.9.9-py2.py3-none-any.whl", "translationstring-1.1.tar.gz"],
"/colander/colander-0.9.9-py2.py3-none-any.whl",
),
(
"compilewheel",
[
"compilewheel-1.0-py2.py3-none-any.whl",
"simple-1.0.tar.gz",
],
"/compilewheel/compilewheel-1.0-py2.py3-none-any.whl",
),
],
)
def test_download_metadata_server(
download_server_html_index: Callable[..., tuple[TestPipResult, Path]],
requirement_to_download: str,
expected_outputs: list[str],
doubled_path: str,
) -> None:
"""Verify that if a data-dist-info-metadata attribute is present, then it is used
instead of the actual dist's METADATA.
Additionally, verify that each dist is downloaded exactly once using a mock server.
This is a regression test for issue https://github.com/pypa/pip/issues/11847.
"""
_, download_dir = download_server_html_index(
[requirement_to_download, "--no-cache-dir"],
)
assert sorted(os.listdir(download_dir)) == expected_outputs
shutil.rmtree(download_dir)
result, _ = download_server_html_index(
[requirement_to_download, "--no-cache-dir"],
allow_error=True,
)
assert result.returncode != 0
expected_msg = f"File {doubled_path} not available more than once!"
assert expected_msg in result.stderr
@pytest.mark.parametrize(
"requirement_to_download, real_hash",
[
(
"simple==3.0",
"95e0f200b6302989bcf2cead9465cf229168295ea330ca30d1ffeab5c0fed996",
),
(
"has-script",
"16ba92d7f6f992f6de5ecb7d58c914675cf21f57f8e674fb29dcb4f4c9507e5b",
),
],
)
def test_incorrect_metadata_hash(
download_local_html_index: Callable[..., tuple[TestPipResult, Path]],
requirement_to_download: str,
real_hash: str,
) -> None:
"""Verify that if a hash for data-dist-info-metadata is provided, it must match the
actual hash of the metadata file."""
result, _ = download_local_html_index(
[requirement_to_download],
allow_error=True,
)
assert result.returncode != 0
expected_msg = f"""\
Expected sha256 wrong-hash
Got {real_hash}"""
assert expected_msg in result.stderr
@pytest.mark.parametrize(
"requirement_to_download, expected_url",
[
("simple2==2.0", "simple2-2.0.tar.gz.metadata"),
("priority", "priority-1.0-py2.py3-none-any.whl.metadata"),
],
)
def test_metadata_not_found(
download_local_html_index: Callable[..., tuple[TestPipResult, Path]],
requirement_to_download: str,
expected_url: str,
) -> None:
"""Verify that if a data-dist-info-metadata attribute is provided, that pip will
fetch the .metadata file at the location specified by PEP 658, and error
if unavailable."""
result, _ = download_local_html_index(
[requirement_to_download],
allow_error=True,
)
assert result.returncode != 0
expected_re = re.escape(expected_url)
pattern = re.compile(
f"ERROR: 404 Client Error: FileNotFoundError for url:.*{expected_re}"
)
assert pattern.search(result.stderr), (pattern, result.stderr)
def test_produces_error_for_mismatched_package_name_in_metadata(
download_local_html_index: Callable[..., tuple[TestPipResult, Path]],
) -> None:
"""Verify that the package name from the metadata matches the requested package."""
result, _ = download_local_html_index(
["simple2==3.0"],
allow_error=True,
)
assert result.returncode != 0
assert (
"simple2-3.0.tar.gz has inconsistent Name: expected 'simple2', but metadata "
"has 'not-simple2'"
) in result.stdout
@pytest.mark.parametrize(
"requirement",
[
"requires-simple-extra==0.1",
"REQUIRES_SIMPLE-EXTRA==0.1",
"REQUIRES....simple-_-EXTRA==0.1",
],
)
def test_canonicalizes_package_name_before_verifying_metadata(
download_local_html_index: Callable[..., tuple[TestPipResult, Path]],
requirement: str,
) -> None:
"""Verify that the package name from the command line and the package's
METADATA are both canonicalized before comparison.
Regression test for https://github.com/pypa/pip/issues/12038
"""
result, download_dir = download_local_html_index(
[requirement],
allow_error=True,
)
assert result.returncode == 0
assert os.listdir(download_dir) == [
"requires_simple_extra-0.1-py2.py3-none-any.whl",
]
| TestDownloadPlatformManylinuxes |
python | tensorflow__tensorflow | tensorflow/python/keras/optimizer_v2/learning_rate_schedule.py | {
"start": 23794,
"end": 28859
} | class ____(LearningRateSchedule):
"""A LearningRateSchedule that uses a cosine decay schedule with restarts.
See [Loshchilov & Hutter, ICLR2016](https://arxiv.org/abs/1608.03983),
SGDR: Stochastic Gradient Descent with Warm Restarts.
When training a model, it is often useful to lower the learning rate as
the training progresses. This schedule applies a cosine decay function with
restarts to an optimizer step, given a provided initial learning rate.
It requires a `step` value to compute the decayed learning rate. You can
just pass a TensorFlow variable that you increment at each training step.
The schedule a 1-arg callable that produces a decayed learning
rate when passed the current optimizer step. This can be useful for changing
the learning rate value across different invocations of optimizer functions.
The learning rate multiplier first decays
from 1 to `alpha` for `first_decay_steps` steps. Then, a warm
restart is performed. Each new warm restart runs for `t_mul` times more
steps and with `m_mul` times smaller initial learning rate.
Example usage:
```python
first_decay_steps = 1000
lr_decayed_fn = (
tf.keras.optimizers.schedules.CosineDecayRestarts(
initial_learning_rate,
first_decay_steps))
```
You can pass this schedule directly into a `tf.keras.optimizers.Optimizer`
as the learning rate. The learning rate schedule is also serializable and
deserializable using `tf.keras.optimizers.schedules.serialize` and
`tf.keras.optimizers.schedules.deserialize`.
Returns:
A 1-arg callable learning rate schedule that takes the current optimizer
step and outputs the decayed learning rate, a scalar `Tensor` of the same
type as `initial_learning_rate`.
"""
def __init__(
self,
initial_learning_rate,
first_decay_steps,
t_mul=2.0,
m_mul=1.0,
alpha=0.0,
name=None):
"""Applies cosine decay with restarts to the learning rate.
Args:
initial_learning_rate: A scalar `float32` or `float64` Tensor or a Python
number. The initial learning rate.
first_decay_steps: A scalar `int32` or `int64` `Tensor` or a Python
number. Number of steps to decay over.
t_mul: A scalar `float32` or `float64` `Tensor` or a Python number.
Used to derive the number of iterations in the i-th period
m_mul: A scalar `float32` or `float64` `Tensor` or a Python number.
Used to derive the initial learning rate of the i-th period:
alpha: A scalar `float32` or `float64` Tensor or a Python number.
Minimum learning rate value as a fraction of the initial_learning_rate.
name: String. Optional name of the operation. Defaults to 'SGDRDecay'.
"""
super(CosineDecayRestarts, self).__init__()
self.initial_learning_rate = initial_learning_rate
self.first_decay_steps = first_decay_steps
self._t_mul = t_mul
self._m_mul = m_mul
self.alpha = alpha
self.name = name
def __call__(self, step):
with ops.name_scope_v2(self.name or "SGDRDecay") as name:
initial_learning_rate = (
tensor_conversion.convert_to_tensor_v2_with_dispatch(
self.initial_learning_rate, name="initial_learning_rate"
)
)
dtype = initial_learning_rate.dtype
first_decay_steps = math_ops.cast(self.first_decay_steps, dtype)
alpha = math_ops.cast(self.alpha, dtype)
t_mul = math_ops.cast(self._t_mul, dtype)
m_mul = math_ops.cast(self._m_mul, dtype)
global_step_recomp = math_ops.cast(step, dtype)
completed_fraction = global_step_recomp / first_decay_steps
def compute_step(completed_fraction, geometric=False):
"""Helper for `cond` operation."""
if geometric:
i_restart = math_ops.floor(
math_ops.log(1.0 - completed_fraction * (1.0 - t_mul)) /
math_ops.log(t_mul))
sum_r = (1.0 - t_mul**i_restart) / (1.0 - t_mul)
completed_fraction = (completed_fraction - sum_r) / t_mul**i_restart
else:
i_restart = math_ops.floor(completed_fraction)
completed_fraction -= i_restart
return i_restart, completed_fraction
i_restart, completed_fraction = cond.cond(
math_ops.equal(t_mul, 1.0),
lambda: compute_step(completed_fraction, geometric=False),
lambda: compute_step(completed_fraction, geometric=True))
m_fac = m_mul**i_restart
cosine_decayed = 0.5 * m_fac * (1.0 + math_ops.cos(
constant_op.constant(math.pi) * completed_fraction))
decayed = (1 - alpha) * cosine_decayed + alpha
return math_ops.multiply(initial_learning_rate, decayed, name=name)
def get_config(self):
return {
"initial_learning_rate": self.initial_learning_rate,
"first_decay_steps": self.first_decay_steps,
"t_mul": self._t_mul,
"m_mul": self._m_mul,
"alpha": self.alpha,
"name": self.name
}
# Note: this code is still used by V1 APIs.
| CosineDecayRestarts |
python | rq__rq | rq/intermediate_queue.py | {
"start": 230,
"end": 3826
} | class ____:
def __init__(self, queue_key: str, connection: Redis):
self.queue_key = queue_key
self.key = self.get_intermediate_queue_key(queue_key)
self.connection = connection
@classmethod
def get_intermediate_queue_key(cls, queue_key: str) -> str:
"""Returns the intermediate queue key for a given queue key.
Args:
key (str): The queue key
Returns:
str: The intermediate queue key
"""
return f'{queue_key}:intermediate'
def get_first_seen_key(self, job_id: str) -> str:
"""Returns the first seen key for a given job ID.
Args:
job_id (str): The job ID
Returns:
str: The first seen key
"""
return f'{self.key}:first_seen:{job_id}'
def set_first_seen(self, job_id: str) -> bool:
"""Sets the first seen timestamp for a job.
Args:
job_id (str): The job ID
timestamp (float): The timestamp
"""
# TODO: job_id should be changed to execution ID in 2.0
return bool(self.connection.set(self.get_first_seen_key(job_id), now().timestamp(), nx=True, ex=3600 * 24))
def get_first_seen(self, job_id: str) -> Optional[datetime]:
"""Returns the first seen timestamp for a job.
Args:
job_id (str): The job ID
Returns:
Optional[datetime]: The timestamp
"""
timestamp = self.connection.get(self.get_first_seen_key(job_id))
if timestamp:
return datetime.fromtimestamp(float(timestamp), tz=timezone.utc)
return None
def should_be_cleaned_up(self, job_id: str) -> bool:
"""Returns whether a job should be cleaned up.
A job in intermediate queue should be cleaned up if it has been there for more than 1 minute.
Args:
job_id (str): The job ID
Returns:
bool: Whether the job should be cleaned up
"""
# TODO: should be changed to execution ID in 2.0
first_seen = self.get_first_seen(job_id)
if not first_seen:
return False
return now() - first_seen > timedelta(minutes=1)
def get_job_ids(self) -> list[str]:
"""Returns the job IDs in the intermediate queue.
Returns:
List[str]: The job IDs
"""
return [job_id.decode() for job_id in self.connection.lrange(self.key, 0, -1)]
def remove(self, job_id: str) -> None:
"""Removes a job from the intermediate queue.
Args:
job_id (str): The job ID
"""
self.connection.lrem(self.key, 1, job_id)
def cleanup(self, worker: 'BaseWorker', queue: 'Queue') -> None:
job_ids = self.get_job_ids()
for job_id in job_ids:
job = queue.fetch_job(job_id)
if job_id not in queue.started_job_registry:
if not job:
# If the job doesn't exist in the queue, we can safely remove it from the intermediate queue.
self.remove(job_id)
continue
# If this is the first time we've seen this job, do nothing.
# `set_first_seen` will return `True` if the key was set, `False` if it already existed.
if self.set_first_seen(job_id):
continue
if self.should_be_cleaned_up(job_id):
worker.handle_job_failure(job, queue, exc_string='Job was stuck in intermediate queue.')
self.remove(job_id)
| IntermediateQueue |
python | scipy__scipy | scipy/signal/tests/test_filter_design.py | {
"start": 96248,
"end": 125604
} | class ____:
def test_degenerate(self, xp):
for norm in ('delay', 'phase', 'mag'):
# 0-order filter is just a passthrough
b, a = bessel(0, xp.asarray(1), analog=True, norm=norm)
xp_assert_equal(b, xp.asarray([1.0], dtype=xp.float64))
xp_assert_equal(a, xp.asarray([1.0], dtype=xp.float64))
# 1-order filter is same for all types
b, a = bessel(1, xp.asarray(1.), analog=True, norm=norm)
xp_assert_close(b, xp.asarray([1.0], dtype=xp.float64), rtol=1e-15)
xp_assert_close(a, xp.asarray([1.0, 1], dtype=xp.float64), rtol=1e-15)
z, p, k = bessel(1, xp.asarray(0.3), analog=True, output='zpk', norm=norm)
xp_assert_equal(z, xp.asarray([], dtype=xp.float64))
xp_assert_close(
p, xp.asarray([-0.3+0j], dtype=xp.complex128),
rtol=1e-14 if not DEFAULT_F32 else 1e-7
)
assert math.isclose(
k, 0.3, rel_tol=1e-14 if not DEFAULT_F32 else 1e-6
)
@pytest.mark.xfail(reason="Failing in mypy workflow - see gh-23902")
def test_high_order(self, xp):
# high even order, 'phase'
z, p, k = bessel(24, xp.asarray(100), analog=True, output='zpk')
z2 = xp.asarray([], dtype=xp.float64)
p2 = [
-9.055312334014323e+01 + 4.844005815403969e+00j,
-8.983105162681878e+01 + 1.454056170018573e+01j,
-8.837357994162065e+01 + 2.426335240122282e+01j,
-8.615278316179575e+01 + 3.403202098404543e+01j,
-8.312326467067703e+01 + 4.386985940217900e+01j,
-7.921695461084202e+01 + 5.380628489700191e+01j,
-7.433392285433246e+01 + 6.388084216250878e+01j,
-6.832565803501586e+01 + 7.415032695116071e+01j,
-6.096221567378025e+01 + 8.470292433074425e+01j,
-5.185914574820616e+01 + 9.569048385258847e+01j,
-4.027853855197555e+01 + 1.074195196518679e+02j,
-2.433481337524861e+01 + 1.207298683731973e+02j,
]
p2 = np.union1d(p2, np.conj(p2))
p2 = xp.asarray(p2, dtype=xp.complex128)
k2 = 9.999999999999989e+47
xp_assert_equal(z, z2)
xp_assert_close(_sort_cmplx(p, xp=xp),
_sort_cmplx(p2, xp=xp))
assert math.isclose(k, k2, rel_tol=1e-14)
# high odd order, 'phase'
z, p, k = bessel(23, xp.asarray(1000.), analog=True, output='zpk')
z2 = xp.asarray([], dtype=xp.float64)
p2 = [
-2.497697202208956e+02 + 1.202813187870698e+03j,
-4.126986617510172e+02 + 1.065328794475509e+03j,
-5.304922463809596e+02 + 9.439760364018479e+02j,
-9.027564978975828e+02 + 1.010534334242318e+02j,
-8.909283244406079e+02 + 2.023024699647598e+02j,
-8.709469394347836e+02 + 3.039581994804637e+02j,
-8.423805948131370e+02 + 4.062657947488952e+02j,
-8.045561642249877e+02 + 5.095305912401127e+02j,
-7.564660146766259e+02 + 6.141594859516342e+02j,
-6.965966033906477e+02 + 7.207341374730186e+02j,
-6.225903228776276e+02 + 8.301558302815096e+02j,
-9.066732476324988e+02]
p2 = np.union1d(p2, np.conj(p2))
p2 = xp.asarray(p2, dtype=xp.complex128)
k2 = 9.999999999999983e+68
xp_assert_equal(z, z2)
xp_assert_close(_sort_cmplx(p, xp=xp),
_sort_cmplx(p2, xp=xp))
assert math.isclose(k, k2, rel_tol=1e-14)
# high even order, 'delay' (Orchard 1965 "The Roots of the
# Maximally Flat-Delay Polynomials" Table 1)
z, p, k = bessel(31, xp.asarray(1.), analog=True, output='zpk', norm='delay')
p2 = [-20.876706,
-20.826543 + 1.735732j,
-20.675502 + 3.473320j,
-20.421895 + 5.214702j,
-20.062802 + 6.961982j,
-19.593895 + 8.717546j,
-19.009148 + 10.484195j,
-18.300400 + 12.265351j,
-17.456663 + 14.065350j,
-16.463032 + 15.889910j,
-15.298849 + 17.746914j,
-13.934466 + 19.647827j,
-12.324914 + 21.610519j,
-10.395893 + 23.665701j,
- 8.005600 + 25.875019j,
- 4.792045 + 28.406037j,
]
p2 = np.union1d(p2, np.conj(p2))
p2 = xp.asarray(p2, dtype=xp.complex128)
xp_assert_close(_sort_cmplx(p, xp=xp),
_sort_cmplx(p2, xp=xp))
# high odd order, 'delay'
z, p, k = bessel(30, xp.asarray(1.), analog=True, output='zpk', norm='delay')
p2 = [-20.201029 + 0.867750j,
-20.097257 + 2.604235j,
-19.888485 + 4.343721j,
-19.572188 + 6.088363j,
-19.144380 + 7.840570j,
-18.599342 + 9.603147j,
-17.929195 + 11.379494j,
-17.123228 + 13.173901j,
-16.166808 + 14.992008j,
-15.039580 + 16.841580j,
-13.712245 + 18.733902j,
-12.140295 + 20.686563j,
-10.250119 + 22.729808j,
- 7.901170 + 24.924391j,
- 4.734679 + 27.435615j,
]
p2 = np.union1d(p2, np.conj(p2))
p2 = xp.asarray(p2, dtype=xp.complex128)
xp_assert_close(_sort_cmplx(p, xp=xp),
_sort_cmplx(p2, xp=xp))
def test_refs(self, xp):
# Compare to http://www.crbond.com/papers/bsf2.pdf
# "Delay Normalized Bessel Polynomial Coefficients"
bond_b = xp.asarray([10395.0], dtype=xp.float64)
bond_a = xp.asarray([1.0, 21, 210, 1260, 4725, 10395, 10395], dtype=xp.float64)
b, a = bessel(6, xp.asarray(1.0), norm='delay', analog=True)
xp_assert_close(b, bond_b)
xp_assert_close(a, bond_a)
# "Delay Normalized Bessel Pole Locations"
bond_poles = {
1: [-1.0000000000],
2: [-1.5000000000 + 0.8660254038j],
3: [-1.8389073227 + 1.7543809598j, -2.3221853546],
4: [-2.1037893972 + 2.6574180419j, -2.8962106028 + 0.8672341289j],
5: [-2.3246743032 + 3.5710229203j, -3.3519563992 + 1.7426614162j,
-3.6467385953],
6: [-2.5159322478 + 4.4926729537j, -3.7357083563 + 2.6262723114j,
-4.2483593959 + 0.8675096732j],
7: [-2.6856768789 + 5.4206941307j, -4.0701391636 + 3.5171740477j,
-4.7582905282 + 1.7392860611j, -4.9717868585],
8: [-2.8389839489 + 6.3539112986j, -4.3682892172 + 4.4144425005j,
-5.2048407906 + 2.6161751526j, -5.5878860433 + 0.8676144454j],
9: [-2.9792607982 + 7.2914636883j, -4.6384398872 + 5.3172716754j,
-5.6044218195 + 3.4981569179j, -6.1293679043 + 1.7378483835j,
-6.2970191817],
10: [-3.1089162336 + 8.2326994591j, -4.8862195669 + 6.2249854825j,
-5.9675283286 + 4.3849471889j, -6.6152909655 + 2.6115679208j,
-6.9220449054 + 0.8676651955j]
}
for N in range(1, 11):
p1 = np.sort(bond_poles[N])
z, p, k = besselap(N, 'delay', xp=xp)
assert array_namespace(z) == array_namespace(p) == xp
p2 = np.sort(np.concatenate(_cplxreal(_xp_copy_to_numpy(p))))
assert_array_almost_equal(xp.asarray(p2), xp.asarray(p1), decimal=10)
# "Frequency Normalized Bessel Pole Locations"
bond_poles = {
1: [-1.0000000000],
2: [-1.1016013306 + 0.6360098248j],
3: [-1.0474091610 + 0.9992644363j, -1.3226757999],
4: [-0.9952087644 + 1.2571057395j, -1.3700678306 + 0.4102497175j],
5: [-0.9576765486 + 1.4711243207j, -1.3808773259 + 0.7179095876j,
-1.5023162714],
6: [-0.9306565229 + 1.6618632689j, -1.3818580976 + 0.9714718907j,
-1.5714904036 + 0.3208963742j],
7: [-0.9098677806 + 1.8364513530j, -1.3789032168 + 1.1915667778j,
-1.6120387662 + 0.5892445069j, -1.6843681793],
8: [-0.8928697188 + 1.9983258436j, -1.3738412176 + 1.3883565759j,
-1.6369394181 + 0.8227956251j, -1.7574084004 + 0.2728675751j],
9: [-0.8783992762 + 2.1498005243j, -1.3675883098 + 1.5677337122j,
-1.6523964846 + 1.0313895670j, -1.8071705350 + 0.5123837306j,
-1.8566005012],
10: [-0.8657569017 + 2.2926048310j, -1.3606922784 + 1.7335057427j,
-1.6618102414 + 1.2211002186j, -1.8421962445 + 0.7272575978j,
-1.9276196914 + 0.2416234710j]
}
for N in range(1, 11):
p1 = np.sort(bond_poles[N])
z, p, k = besselap(N, 'mag', xp=xp)
assert array_namespace(z) == array_namespace(p) == xp
p2 = np.sort(np.concatenate(_cplxreal(_xp_copy_to_numpy(p))))
assert_array_almost_equal(xp.asarray(p2), xp.asarray(p1), decimal=10)
# Compare to https://www.ranecommercial.com/legacy/note147.html
# "Table 1 - Bessel Crossovers of Second, Third, and Fourth-Order"
a = xp.asarray([1, 1, 1/3], dtype=xp.float64)
b2, a2 = bessel(2, xp.asarray(1.), norm='delay', analog=True)
xp_assert_close(a2/b2, xp.flip(a))
a = xp.asarray([1, 1, 2/5, 1/15], dtype=xp.float64)
b2, a2 = bessel(3, xp.asarray(1.), norm='delay', analog=True)
xp_assert_close(a2/b2, xp.flip(a))
a = xp.asarray([1, 1, 9/21, 2/21, 1/105], dtype=xp.float64)
b2, a2 = bessel(4, xp.asarray(1.), norm='delay', analog=True)
xp_assert_close(a2/b2, xp.flip(a))
a = xp.asarray([1, math.sqrt(3), 1], dtype=xp.float64)
b2, a2 = bessel(2, xp.asarray(1.), norm='phase', analog=True)
xp_assert_close(a2/b2, xp.flip(a))
# TODO: Why so inaccurate? Is reference flawed?
a = xp.asarray([1, 2.481, 2.463, 1.018], dtype=xp.float64)
b2, a2 = bessel(3, xp.asarray(1.), norm='phase', analog=True)
assert_array_almost_equal(a2/b2, xp.flip(a), decimal=1)
# TODO: Why so inaccurate? Is reference flawed?
a = xp.asarray([1, 3.240, 4.5, 3.240, 1.050], dtype=xp.float64)
b2, a2 = bessel(4, xp.asarray(1.), norm='phase', analog=True)
assert_array_almost_equal(a2/b2, xp.flip(a), decimal=1)
# Table of -3 dB factors:
N, scale = 2, xp.asarray([1.272, 1.272], dtype=xp.complex128)
scale2 = besselap(N, 'mag', xp=xp)[1] / besselap(N, 'phase', xp=xp)[1]
assert_array_almost_equal(scale2, scale, decimal=3)
# TODO: Why so inaccurate? Is reference flawed?
N, scale = 3, xp.asarray([1.413, 1.413, 1.413], dtype=xp.complex128)
scale2 = besselap(N, 'mag', xp=xp)[1] / besselap(N, 'phase', xp=xp)[1]
assert_array_almost_equal(scale2, scale, decimal=2)
# TODO: Why so inaccurate? Is reference flawed?
N, scale = 4, xp.asarray([1.533]*4, dtype=xp.complex128)
scale2 = besselap(N, 'mag', xp=xp)[1] / besselap(N, 'phase', xp=xp)[1]
assert_array_almost_equal(scale2, scale, decimal=1)
@pytest.mark.xfail(reason="Failing in mypy workflow - see gh-23902")
def test_hardcoded(self, xp):
# Compare to values from original hardcoded implementation
originals = {
0: [],
1: [-1],
2: [-.8660254037844386467637229 + .4999999999999999999999996j],
3: [-.9416000265332067855971980,
-.7456403858480766441810907 + .7113666249728352680992154j],
4: [-.6572111716718829545787788 + .8301614350048733772399715j,
-.9047587967882449459642624 + .2709187330038746636700926j],
5: [-.9264420773877602247196260,
-.8515536193688395541722677 + .4427174639443327209850002j,
-.5905759446119191779319432 + .9072067564574549539291747j],
6: [-.9093906830472271808050953 + .1856964396793046769246397j,
-.7996541858328288520243325 + .5621717346937317988594118j,
-.5385526816693109683073792 + .9616876881954277199245657j],
7: [-.9194871556490290014311619,
-.8800029341523374639772340 + .3216652762307739398381830j,
-.7527355434093214462291616 + .6504696305522550699212995j,
-.4966917256672316755024763 + 1.002508508454420401230220j],
8: [-.9096831546652910216327629 + .1412437976671422927888150j,
-.8473250802359334320103023 + .4259017538272934994996429j,
-.7111381808485399250796172 + .7186517314108401705762571j,
-.4621740412532122027072175 + 1.034388681126901058116589j],
9: [-.9154957797499037686769223,
-.8911217017079759323183848 + .2526580934582164192308115j,
-.8148021112269012975514135 + .5085815689631499483745341j,
-.6743622686854761980403401 + .7730546212691183706919682j,
-.4331415561553618854685942 + 1.060073670135929666774323j],
10: [-.9091347320900502436826431 + .1139583137335511169927714j,
-.8688459641284764527921864 + .3430008233766309973110589j,
-.7837694413101441082655890 + .5759147538499947070009852j,
-.6417513866988316136190854 + .8175836167191017226233947j,
-.4083220732868861566219785 + 1.081274842819124562037210j],
11: [-.9129067244518981934637318,
-.8963656705721166099815744 + .2080480375071031919692341j,
-.8453044014712962954184557 + .4178696917801248292797448j,
-.7546938934722303128102142 + .6319150050721846494520941j,
-.6126871554915194054182909 + .8547813893314764631518509j,
-.3868149510055090879155425 + 1.099117466763120928733632j],
12: [-.9084478234140682638817772 + 95506365213450398415258360e-27j,
-.8802534342016826507901575 + .2871779503524226723615457j,
-.8217296939939077285792834 + .4810212115100676440620548j,
-.7276681615395159454547013 + .6792961178764694160048987j,
-.5866369321861477207528215 + .8863772751320727026622149j,
-.3679640085526312839425808 + 1.114373575641546257595657j],
13: [-.9110914665984182781070663,
-.8991314665475196220910718 + .1768342956161043620980863j,
-.8625094198260548711573628 + .3547413731172988997754038j,
-.7987460692470972510394686 + .5350752120696801938272504j,
-.7026234675721275653944062 + .7199611890171304131266374j,
-.5631559842430199266325818 + .9135900338325109684927731j,
-.3512792323389821669401925 + 1.127591548317705678613239j],
14: [-.9077932138396487614720659 + 82196399419401501888968130e-27j,
-.8869506674916445312089167 + .2470079178765333183201435j,
-.8441199160909851197897667 + .4131653825102692595237260j,
-.7766591387063623897344648 + .5819170677377608590492434j,
-.6794256425119233117869491 + .7552857305042033418417492j,
-.5418766775112297376541293 + .9373043683516919569183099j,
-.3363868224902037330610040 + 1.139172297839859991370924j],
15: [-.9097482363849064167228581,
-.9006981694176978324932918 + .1537681197278439351298882j,
-.8731264620834984978337843 + .3082352470564267657715883j,
-.8256631452587146506294553 + .4642348752734325631275134j,
-.7556027168970728127850416 + .6229396358758267198938604j,
-.6579196593110998676999362 + .7862895503722515897065645j,
-.5224954069658330616875186 + .9581787261092526478889345j,
-.3229963059766444287113517 + 1.149416154583629539665297j],
16: [-.9072099595087001356491337 + 72142113041117326028823950e-27j,
-.8911723070323647674780132 + .2167089659900576449410059j,
-.8584264231521330481755780 + .3621697271802065647661080j,
-.8074790293236003885306146 + .5092933751171800179676218j,
-.7356166304713115980927279 + .6591950877860393745845254j,
-.6379502514039066715773828 + .8137453537108761895522580j,
-.5047606444424766743309967 + .9767137477799090692947061j,
-.3108782755645387813283867 + 1.158552841199330479412225j],
17: [-.9087141161336397432860029,
-.9016273850787285964692844 + .1360267995173024591237303j,
-.8801100704438627158492165 + .2725347156478803885651973j,
-.8433414495836129204455491 + .4100759282910021624185986j,
-.7897644147799708220288138 + .5493724405281088674296232j,
-.7166893842372349049842743 + .6914936286393609433305754j,
-.6193710717342144521602448 + .8382497252826992979368621j,
-.4884629337672704194973683 + .9932971956316781632345466j,
-.2998489459990082015466971 + 1.166761272925668786676672j],
18: [-.9067004324162775554189031 + 64279241063930693839360680e-27j,
-.8939764278132455733032155 + .1930374640894758606940586j,
-.8681095503628830078317207 + .3224204925163257604931634j,
-.8281885016242836608829018 + .4529385697815916950149364j,
-.7726285030739558780127746 + .5852778162086640620016316j,
-.6987821445005273020051878 + .7204696509726630531663123j,
-.6020482668090644386627299 + .8602708961893664447167418j,
-.4734268069916151511140032 + 1.008234300314801077034158j,
-.2897592029880489845789953 + 1.174183010600059128532230j],
19: [-.9078934217899404528985092,
-.9021937639390660668922536 + .1219568381872026517578164j,
-.8849290585034385274001112 + .2442590757549818229026280j,
-.8555768765618421591093993 + .3672925896399872304734923j,
-.8131725551578197705476160 + .4915365035562459055630005j,
-.7561260971541629355231897 + .6176483917970178919174173j,
-.6818424412912442033411634 + .7466272357947761283262338j,
-.5858613321217832644813602 + .8801817131014566284786759j,
-.4595043449730988600785456 + 1.021768776912671221830298j,
-.2804866851439370027628724 + 1.180931628453291873626003j],
20: [-.9062570115576771146523497 + 57961780277849516990208850e-27j,
-.8959150941925768608568248 + .1740317175918705058595844j,
-.8749560316673332850673214 + .2905559296567908031706902j,
-.8427907479956670633544106 + .4078917326291934082132821j,
-.7984251191290606875799876 + .5264942388817132427317659j,
-.7402780309646768991232610 + .6469975237605228320268752j,
-.6658120544829934193890626 + .7703721701100763015154510j,
-.5707026806915714094398061 + .8982829066468255593407161j,
-.4465700698205149555701841 + 1.034097702560842962315411j,
-.2719299580251652601727704 + 1.187099379810885886139638j],
21: [-.9072262653142957028884077,
-.9025428073192696303995083 + .1105252572789856480992275j,
-.8883808106664449854431605 + .2213069215084350419975358j,
-.8643915813643204553970169 + .3326258512522187083009453j,
-.8299435470674444100273463 + .4448177739407956609694059j,
-.7840287980408341576100581 + .5583186348022854707564856j,
-.7250839687106612822281339 + .6737426063024382240549898j,
-.6506315378609463397807996 + .7920349342629491368548074j,
-.5564766488918562465935297 + .9148198405846724121600860j,
-.4345168906815271799687308 + 1.045382255856986531461592j,
-.2640041595834031147954813 + 1.192762031948052470183960j],
22: [-.9058702269930872551848625 + 52774908289999045189007100e-27j,
-.8972983138153530955952835 + .1584351912289865608659759j,
-.8799661455640176154025352 + .2644363039201535049656450j,
-.8534754036851687233084587 + .3710389319482319823405321j,
-.8171682088462720394344996 + .4785619492202780899653575j,
-.7700332930556816872932937 + .5874255426351153211965601j,
-.7105305456418785989070935 + .6982266265924524000098548j,
-.6362427683267827226840153 + .8118875040246347267248508j,
-.5430983056306302779658129 + .9299947824439872998916657j,
-.4232528745642628461715044 + 1.055755605227545931204656j,
-.2566376987939318038016012 + 1.197982433555213008346532j],
23: [-.9066732476324988168207439,
-.9027564979912504609412993 + .1010534335314045013252480j,
-.8909283242471251458653994 + .2023024699381223418195228j,
-.8709469395587416239596874 + .3039581993950041588888925j,
-.8423805948021127057054288 + .4062657948237602726779246j,
-.8045561642053176205623187 + .5095305912227258268309528j,
-.7564660146829880581478138 + .6141594859476032127216463j,
-.6965966033912705387505040 + .7207341374753046970247055j,
-.6225903228771341778273152 + .8301558302812980678845563j,
-.5304922463810191698502226 + .9439760364018300083750242j,
-.4126986617510148836149955 + 1.065328794475513585531053j,
-.2497697202208956030229911 + 1.202813187870697831365338j],
24: [-.9055312363372773709269407 + 48440066540478700874836350e-27j,
-.8983105104397872954053307 + .1454056133873610120105857j,
-.8837358034555706623131950 + .2426335234401383076544239j,
-.8615278304016353651120610 + .3403202112618624773397257j,
-.8312326466813240652679563 + .4386985933597305434577492j,
-.7921695462343492518845446 + .5380628490968016700338001j,
-.7433392285088529449175873 + .6388084216222567930378296j,
-.6832565803536521302816011 + .7415032695091650806797753j,
-.6096221567378335562589532 + .8470292433077202380020454j,
-.5185914574820317343536707 + .9569048385259054576937721j,
-.4027853855197518014786978 + 1.074195196518674765143729j,
-.2433481337524869675825448 + 1.207298683731972524975429j],
25: [-.9062073871811708652496104,
-.9028833390228020537142561 + 93077131185102967450643820e-27j,
-.8928551459883548836774529 + .1863068969804300712287138j,
-.8759497989677857803656239 + .2798521321771408719327250j,
-.8518616886554019782346493 + .3738977875907595009446142j,
-.8201226043936880253962552 + .4686668574656966589020580j,
-.7800496278186497225905443 + .5644441210349710332887354j,
-.7306549271849967721596735 + .6616149647357748681460822j,
-.6704827128029559528610523 + .7607348858167839877987008j,
-.5972898661335557242320528 + .8626676330388028512598538j,
-.5073362861078468845461362 + .9689006305344868494672405j,
-.3934529878191079606023847 + 1.082433927173831581956863j,
-.2373280669322028974199184 + 1.211476658382565356579418j],
}
for N in originals:
p1 = xp.asarray(
np.union1d(originals[N], np.conj(originals[N])),
dtype=xp.complex128
)
p2 = besselap(N, xp=xp)[1]
xp_assert_close(_sort_cmplx(p2, xp=xp),
_sort_cmplx(p1, xp=xp), rtol=1e-14)
def test_norm_phase(self, xp):
# Test some orders and frequencies and see that they have the right
# phase at w0
if is_torch(xp) and DEFAULT_F32:
pytest.xfail(reason="inaccurate on torch with float32")
for N in (1, 2, 3, 4, 5, 51, 72):
for w0 in (1, 100):
b, a = bessel(N, xp.asarray(w0), analog=True, norm='phase')
assert array_namespace(b) == array_namespace(a) == xp
w = np.linspace(0, w0, 100)
w, h = freqs(_xp_copy_to_numpy(b), _xp_copy_to_numpy(a), w)
phase = np.unwrap(np.angle(h))
xp_assert_close(
xp.asarray(phase[[0, -1]], dtype=xp.float64),
xp.asarray([0, -N*xp.pi/4], dtype=xp.float64), rtol=1e-1
)
def test_norm_mag(self, xp):
# Test some orders and frequencies and see that they have the right
# mag at w0
if DEFAULT_F32 and is_torch(xp):
pytest.skip(reason="overflow occurs with float32 on torch")
for N in (1, 2, 3, 4, 5, 51, 72):
for w0 in (1, 100):
b, a = bessel(N, xp.asarray(w0), analog=True, norm='mag')
assert array_namespace(b) == array_namespace(a) == xp
w = [0.0, w0]
w, h = freqs(_xp_copy_to_numpy(b), _xp_copy_to_numpy(a), w)
mag = np.abs(h)
xp_assert_close(
xp.asarray(mag), xp.asarray([1, 1/math.sqrt(2)], dtype=xp.float64)
)
def test_norm_delay(self, xp):
# Test some orders and frequencies and see that they have the right
# delay at DC
if DEFAULT_F32 and is_torch(xp):
pytest.skip(reason="overflow occurs with float32 on torch")
for N in (1, 2, 3, 4, 5, 51, 72):
for w0 in (1, 100):
b, a = bessel(N, xp.asarray(w0), analog=True, norm='delay')
w = np.linspace(0, 10*w0, 1000)
w, h = freqs(_xp_copy_to_numpy(b), _xp_copy_to_numpy(a), w)
unwr_h = np.unwrap(np.angle(h))
delay = -np.diff(unwr_h) / np.diff(w)
assert math.isclose(delay[0], 1/w0, rel_tol=1e-4)
def test_norm_factor(self):
mpmath_values = {
1: 1.0, 2: 1.361654128716130520, 3: 1.755672368681210649,
4: 2.113917674904215843, 5: 2.427410702152628137,
6: 2.703395061202921876, 7: 2.951722147038722771,
8: 3.179617237510651330, 9: 3.391693138911660101,
10: 3.590980594569163482, 11: 3.779607416439620092,
12: 3.959150821144285315, 13: 4.130825499383535980,
14: 4.295593409533637564, 15: 4.454233021624377494,
16: 4.607385465472647917, 17: 4.755586548961147727,
18: 4.899289677284488007, 19: 5.038882681488207605,
20: 5.174700441742707423, 21: 5.307034531360917274,
22: 5.436140703250035999, 23: 5.562244783787878196,
24: 5.685547371295963521, 25: 5.806227623775418541,
50: 8.268963160013226298, 51: 8.352374541546012058,
}
for N in mpmath_values:
z, p, k = besselap(N, 'delay')
xp_assert_close(_norm_factor(p, k), mpmath_values[N], rtol=1e-13)
def test_bessel_poly(self):
xp_assert_equal(_bessel_poly(5), [945, 945, 420, 105, 15, 1])
xp_assert_equal(_bessel_poly(4, True), [1, 10, 45, 105, 105])
def test_bessel_zeros(self):
xp_assert_equal(_bessel_zeros(0), [])
def test_invalid(self):
assert_raises(ValueError, besselap, 5, 'nonsense')
assert_raises(ValueError, besselap, -5)
assert_raises(ValueError, besselap, 3.2)
assert_raises(ValueError, _bessel_poly, -3)
assert_raises(ValueError, _bessel_poly, 3.3)
@pytest.mark.fail_slow(10)
def test_fs_param(self):
for norm in ('phase', 'mag', 'delay'):
for fs in (900, 900.1, 1234.567):
for N in (0, 1, 2, 3, 10):
for fc in (100, 100.1, 432.12345):
for btype in ('lp', 'hp'):
ba1 = bessel(N, fc, btype, norm=norm, fs=fs)
ba2 = bessel(N, fc/(fs/2), btype, norm=norm)
for ba1_, ba2_ in zip(ba1, ba2):
xp_assert_close(ba1_, ba2_)
for fc in ((100, 200), (100.1, 200.2), (321.123, 432.123)):
for btype in ('bp', 'bs'):
ba1 = bessel(N, fc, btype, norm=norm, fs=fs)
for seq in (list, tuple, array):
fcnorm = seq([f/(fs/2) for f in fc])
ba2 = bessel(N, fcnorm, btype, norm=norm)
for ba1_, ba2_ in zip(ba1, ba2):
xp_assert_close(ba1_, ba2_)
@skip_xp_backends("dask.array", reason="https://github.com/dask/dask/issues/11883")
@make_xp_test_case(butter)
| TestBessel |
python | skorch-dev__skorch | skorch/exceptions.py | {
"start": 541,
"end": 639
} | class ____(SkorchWarning):
"""A problem with a device (e.g. CUDA) was detected."""
| DeviceWarning |
python | huggingface__transformers | tests/models/chameleon/test_modeling_chameleon.py | {
"start": 7358,
"end": 8332
} | class ____(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (ChameleonModel, ChameleonForConditionalGeneration) if is_torch_available() else ()
pipeline_model_mapping = (
{
"feature-extraction": ChameleonModel,
"text-generation": ChameleonForConditionalGeneration,
}
if is_torch_available()
else {}
)
def setUp(self):
self.model_tester = ChameleonModelTester(self)
self.config_tester = ConfigTester(self, config_class=ChameleonConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
@unittest.skip("Chameleon forces some token ids to be -inf!")
def test_batching_equivalence(self):
pass
| ChameleonModelTest |
python | psf__black | tests/data/cases/no_blank_line_before_docstring.py | {
"start": 900,
"end": 987
} | class ____:
"I'm a docstring but I don't even get triple quotes."
| SingleQuotedDocstring |
python | HypothesisWorks__hypothesis | hypothesis-python/src/hypothesis/errors.py | {
"start": 10161,
"end": 11319
} | class ____(HypothesisException):
"""
Raised by alternative backends when a |PrimitiveProvider| cannot proceed.
This is expected to occur inside one of the ``.draw_*()`` methods, or for
symbolic execution perhaps in |PrimitiveProvider.realize|.
The optional ``scope`` argument can enable smarter integration:
verified:
Do not request further test cases from this backend. We *may*
generate more test cases with other backends; if one fails then
Hypothesis will report unsound verification in the backend too.
exhausted:
Do not request further test cases from this backend; finish testing
with test cases generated with the default backend. Common if e.g.
native code blocks symbolic reasoning very early.
discard_test_case:
This particular test case could not be converted to concrete values;
skip any further processing and continue with another test case from
this backend.
"""
def __init__(self, scope: CannotProceedScopeT = "other", /) -> None:
self.scope = scope
| BackendCannotProceed |
python | celery__celery | t/smoke/tests/test_signals.py | {
"start": 1036,
"end": 1444
} | class ____:
def test_sanity(self, celery_setup: CeleryTestSetup):
@before_task_publish.connect
def before_task_publish_handler(*args, **kwargs):
nonlocal signal_was_called
signal_was_called = True
signal_was_called = False
noop.s().apply_async(queue=celery_setup.worker.worker_queue)
assert signal_was_called is True
| test_before_task_publish |
python | django-mptt__django-mptt | tests/myapp/models.py | {
"start": 7498,
"end": 7808
} | class ____(MPTTModel):
parent = TreeForeignKey(
"self", null=True, blank=True, related_name="children", on_delete=models.CASCADE
)
now = models.DateTimeField(auto_now_add=True)
class MPTTMeta:
order_insertion_by = ("now",)
# test registering of remote model
| AutoNowDateFieldModel |
python | facebook__pyre-check | tools/upgrade/filesystem.py | {
"start": 9427,
"end": 11794
} | class ____(Filesystem):
@override
def list(
self, root: str, patterns: List[str], exclude: Optional[List[str]] = None
) -> List[str]:
command = ["hg", "files"]
for pattern in patterns:
command += ["--include", pattern]
if exclude:
for pattern in exclude:
command += ["--exclude", pattern]
return (
subprocess.run(
command, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, cwd=root
)
.stdout.decode("utf-8")
.split()
)
@functools.lru_cache(1)
def get_filesystem() -> Filesystem:
try:
subprocess.check_output(["hg", "status"], stderr=subprocess.DEVNULL)
return MercurialBackedFilesystem()
except (subprocess.CalledProcessError, FileNotFoundError):
return Filesystem()
def remove_non_pyre_ignores(subdirectory: Path) -> None:
python_files = [
subdirectory / path
for path in get_filesystem().list(str(subdirectory), patterns=[r"**/*.py"])
]
if python_files:
LOG.info("...cleaning %s python files", len(python_files))
remove_type_ignore_command = [
"sed",
"-i",
r"s/\s*# \?type: \?ignore$//g",
] + [str(file) for file in python_files if file.exists()]
subprocess.check_output(remove_type_ignore_command)
def find_files(
directory: Path, name: str, grep_pattern: Optional[str] = None
) -> List[str]:
grep_arguments = (
["-exec", "grep", "--files-with-matches", grep_pattern, "{}", "+"]
if grep_pattern is not None
else []
)
try:
output = (
subprocess.check_output(
["find", str(directory), "-name", name, *grep_arguments]
)
.decode("utf-8")
.strip()
)
except subprocess.CalledProcessError as error:
LOG.warning(
"Failed to find files with name `%s` in directory `%s`:\n%s",
name,
directory,
error.stderr,
)
return []
if output == "":
return []
files = output.split("\n")
return [file.strip() for file in files]
def find_directories(directory: Path) -> List[Path]:
return [path for path in directory.iterdir() if path.is_dir()]
| MercurialBackedFilesystem |
python | numba__numba | numba/cuda/errors.py | {
"start": 61,
"end": 416
} | class ____(RuntimeError):
def __init__(self, msg, tid=None, ctaid=None):
self.tid = tid
self.ctaid = ctaid
self.msg = msg
t = ("An exception was raised in thread=%s block=%s\n"
"\t%s")
msg = t % (self.tid, self.ctaid, self.msg)
super(KernelRuntimeError, self).__init__(msg)
| KernelRuntimeError |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.