language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 337010,
"end": 337359
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
node = sgqlc.types.Field("EnterpriseServerUserAccount", graphql_name="node")
| EnterpriseServerUserAccountEdge |
python | faif__python-patterns | patterns/other/blackboard.py | {
"start": 444,
"end": 929
} | class ____(ABC):
"""Abstract class for experts in the blackboard system."""
@abstractmethod
def __init__(self, blackboard) -> None:
self.blackboard = blackboard
@property
@abstractmethod
def is_eager_to_contribute(self) -> int:
raise NotImplementedError("Must provide implementation in subclass.")
@abstractmethod
def contribute(self) -> None:
raise NotImplementedError("Must provide implementation in subclass.")
| AbstractExpert |
python | getsentry__sentry | tests/sentry/issues/endpoints/test_project_codeowners_details.py | {
"start": 492,
"end": 8876
} | class ____(APITestCase):
def setUp(self) -> None:
self.user = self.create_user("admin@sentry.io", is_superuser=True)
self.login_as(user=self.user)
self.team = self.create_team(
organization=self.organization, slug="tiger-team", members=[self.user]
)
self.project = self.project = self.create_project(
organization=self.organization, teams=[self.team], slug="bengal"
)
self.code_mapping = self.create_code_mapping(project=self.project)
self.external_user = self.create_external_user(
external_name="@NisanthanNanthakumar", integration=self.integration
)
self.external_team = self.create_external_team(integration=self.integration)
self.data = {
"raw": "docs/* @NisanthanNanthakumar @getsentry/ecosystem\n",
}
self.codeowners = self.create_codeowners(
project=self.project, code_mapping=self.code_mapping
)
self.url = reverse(
"sentry-api-0-project-codeowners-details",
kwargs={
"organization_id_or_slug": self.organization.slug,
"project_id_or_slug": self.project.slug,
"codeowners_id": self.codeowners.id,
},
)
# Mock the external HTTP request to prevent real network calls
self.codeowner_patcher = patch(
"sentry.integrations.source_code_management.repository.RepositoryIntegration.get_codeowner_file",
return_value={
"html_url": "https://github.com/test/CODEOWNERS",
"filepath": "CODEOWNERS",
"raw": "test content",
},
)
self.codeowner_mock = self.codeowner_patcher.start()
self.addCleanup(self.codeowner_patcher.stop)
def test_basic_delete(self) -> None:
with self.feature({"organizations:integrations-codeowners": True}):
response = self.client.delete(self.url)
assert response.status_code == 204
assert not ProjectCodeOwners.objects.filter(id=str(self.codeowners.id)).exists()
@freeze_time("2023-10-03 00:00:00")
def test_basic_update(self) -> None:
self.create_external_team(external_name="@getsentry/frontend", integration=self.integration)
self.create_external_team(external_name="@getsentry/docs", integration=self.integration)
raw = "\n# cool stuff comment\n*.js @getsentry/frontend @NisanthanNanthakumar\n# good comment\n\n\n docs/* @getsentry/docs @getsentry/ecosystem\n\n"
data = {
"raw": raw,
}
# Reset call count to verify this specific test's calls
self.codeowner_mock.reset_mock()
with self.feature({"organizations:integrations-codeowners": True}):
response = self.client.put(self.url, data)
# Verify our mock was called instead of making real HTTP requests
assert (
self.codeowner_mock.called
), "Mock should have been called - no external HTTP requests made"
assert response.status_code == 200
assert response.data["id"] == str(self.codeowners.id)
assert response.data["raw"] == raw.strip()
codeowner = ProjectCodeOwners.objects.filter(id=self.codeowners.id)[0]
assert codeowner.date_updated.strftime("%Y-%m-%d %H:%M:%S") == "2023-10-03 00:00:00"
def test_wrong_codeowners_id(self) -> None:
self.url = reverse(
"sentry-api-0-project-codeowners-details",
kwargs={
"organization_id_or_slug": self.organization.slug,
"project_id_or_slug": self.project.slug,
"codeowners_id": 1000,
},
)
with self.feature({"organizations:integrations-codeowners": True}):
response = self.client.put(self.url, self.data)
assert response.status_code == 404
assert response.data == {"detail": "The requested resource does not exist"}
def test_missing_external_associations_update(self) -> None:
data = {
"raw": "\n# cool stuff comment\n*.js @getsentry/frontend @NisanthanNanthakumar\n# good comment\n\n\n docs/* @getsentry/docs @getsentry/ecosystem\nsrc/sentry/* @AnotherUser\n\n"
}
with self.feature({"organizations:integrations-codeowners": True}):
response = self.client.put(self.url, data)
assert response.status_code == 200
assert response.data["id"] == str(self.codeowners.id)
assert response.data["codeMappingId"] == str(self.code_mapping.id)
errors = response.data["errors"]
assert set(errors["missing_external_teams"]) == {"@getsentry/frontend", "@getsentry/docs"}
assert set(errors["missing_external_users"]) == {"@AnotherUser"}
assert errors["missing_user_emails"] == []
assert errors["teams_without_access"] == []
assert errors["users_without_access"] == []
def test_invalid_code_mapping_id_update(self) -> None:
with self.feature({"organizations:integrations-codeowners": True}):
response = self.client.put(self.url, {"codeMappingId": 500})
assert response.status_code == 400
assert response.data == {"codeMappingId": ["This code mapping does not exist."]}
def test_no_duplicates_code_mappings(self) -> None:
new_code_mapping = self.create_code_mapping(project=self.project, stack_root="blah")
self.create_codeowners(project=self.project, code_mapping=new_code_mapping)
with self.feature({"organizations:integrations-codeowners": True}):
response = self.client.put(self.url, {"codeMappingId": new_code_mapping.id})
assert response.status_code == 400
assert response.data == {"codeMappingId": ["This code mapping is already in use."]}
def test_codeowners_email_update(self) -> None:
data = {"raw": f"\n# cool stuff comment\n*.js {self.user.email}\n# good comment\n\n\n"}
with self.feature({"organizations:integrations-codeowners": True}):
response = self.client.put(self.url, data)
assert response.status_code == 200
assert response.data["raw"] == "# cool stuff comment\n*.js admin@sentry.io\n# good comment"
@patch("sentry.analytics.record")
def test_codeowners_max_raw_length(self, mock_record: MagicMock) -> None:
with mock.patch(
"sentry.issues.endpoints.serializers.MAX_RAW_LENGTH", len(self.data["raw"]) + 1
):
data = {
"raw": f"# cool stuff comment\n*.js {self.user.email}\n# good comment"
}
with self.feature({"organizations:integrations-codeowners": True}):
response = self.client.put(self.url, data)
assert response.status_code == 400
assert response.data == {
"raw": [
ErrorDetail(
string=f"Raw needs to be <= {len(self.data['raw']) + 1} characters in length",
code="invalid",
)
]
}
assert_last_analytics_event(
mock_record,
CodeOwnersMaxLengthExceeded(
organization_id=self.organization.id,
),
)
# Test that we allow this to be modified for existing large rows
code_mapping = self.create_code_mapping(project=self.project, stack_root="/")
codeowners = self.create_codeowners(
project=self.project,
code_mapping=code_mapping,
raw=f"*.py test@localhost #{self.team.slug}",
)
url = reverse(
"sentry-api-0-project-codeowners-details",
kwargs={
"organization_id_or_slug": self.organization.slug,
"project_id_or_slug": self.project.slug,
"codeowners_id": codeowners.id,
},
)
with self.feature({"organizations:integrations-codeowners": True}):
response = self.client.put(url, data)
assert ProjectCodeOwners.objects.get(id=codeowners.id).raw == data.get("raw")
| ProjectCodeOwnersDetailsEndpointTestCase |
python | OmkarPathak__pygorithm | pygorithm/data_structures/graph.py | {
"start": 10919,
"end": 13072
} | class ____(object):
"""CheckCycleUndirectedGraph
Class to check cycle in undirected graph
"""
def __init__(self):
self.graph = {}
self.count = 0
def print_graph(self):
"""
for printing the contents of the graph
"""
for i in self.graph:
print(i, '->', ' -> '.join([str(j) for j in self.graph[i]]))
def add_edge(self, from_vertex, to_vertex):
"""
for adding the edge between two vertices
"""
# check if vertex is already present,
if from_vertex in self.graph.keys():
self.graph[from_vertex].append(to_vertex)
else:
# otherwise add it to the graph
self.graph[from_vertex] = [to_vertex]
if to_vertex in self.graph.keys():
self.graph[to_vertex].append(from_vertex)
else:
self.graph[to_vertex] = [from_vertex]
def check_cycle(self):
"""
This function will return True if graph is cyclic else return False
"""
# Marking all vertices as not visited
visited = [False] * len(self.graph)
for vertex in range(len(self.graph)):
# Call the recursive function only if not visited
if not visited[vertex]:
if self.__check_cycle_rec(visited, -1, vertex):
return True
return False
def __check_cycle_rec(self, visited, parent, vertex):
"""
Recursive function for finding the cycle
"""
# Mark the current node in visited
visited[vertex] = True
# mark all adjacent nodes of the current node
for adjacentNode in self.graph[vertex]:
if not visited[adjacentNode]:
if self.__check_cycle_rec(visited, vertex, adjacentNode):
return True
elif parent != adjacentNode:
return True
return False
@staticmethod
def get_code():
"""
returns the code for the current class
"""
return inspect.getsource(CheckCycleUndirectedGraph)
| CheckCycleUndirectedGraph |
python | PrefectHQ__prefect | tests/server/orchestration/api/test_artifacts.py | {
"start": 6745,
"end": 17943
} | class ____:
async def test_read_artifacts(self, artifacts, client):
response = await client.post("/artifacts/filter")
assert response.status_code == status.HTTP_200_OK
assert len(response.json()) == len(artifacts)
assert {r["key"] for r in response.json()} == {a["key"] for a in artifacts}
assert {r["data"] for r in response.json()} == {a["data"] for a in artifacts}
assert {r["description"] for r in response.json()} == {
a["description"] for a in artifacts
}
assert {r["flow_run_id"] for r in response.json()} == {
a["flow_run_id"] for a in artifacts
}
async def test_read_artifacts_with_artifact_key_filter_any(self, artifacts, client):
artifact_filter = dict(
artifacts=schemas.filters.ArtifactFilter(
key=schemas.filters.ArtifactFilterKey(
any_=[artifacts[0]["key"], artifacts[1]["key"]]
)
).model_dump(mode="json")
)
response = await client.post("/artifacts/filter", json=artifact_filter)
assert response.status_code == status.HTTP_200_OK
assert len(response.json()) == 2
assert {r["key"] for r in response.json()} == {
artifacts[0]["key"],
artifacts[1]["key"],
}
async def test_read_artifact_with_artifact_key_filter_exists(
self, artifacts, client
):
artifact_filter = dict(
artifacts=schemas.filters.ArtifactFilter(
key=schemas.filters.ArtifactFilterKey(exists_=True)
).model_dump(mode="json")
)
response = await client.post("/artifacts/filter", json=artifact_filter)
assert response.status_code == status.HTTP_200_OK
assert len(response.json()) == len(artifacts) - 1
assert all(r["key"] for r in response.json())
async def test_read_artifact_with_artifact_key_filter_not_exists(
self, artifacts, client
):
artifact_filter = dict(
artifacts=schemas.filters.ArtifactFilter(
key=schemas.filters.ArtifactFilterKey(exists_=False)
).model_dump(mode="json")
)
response = await client.post("/artifacts/filter", json=artifact_filter)
assert response.status_code == status.HTTP_200_OK
assert len(response.json()) == 1
assert response.json()[0]["key"] is None
async def test_read_artifacts_with_artifact_id_filter(self, artifacts, client):
artifact_id = artifacts[0]["id"]
artifact_filter = dict(
artifacts=schemas.filters.ArtifactFilter(
id=schemas.filters.ArtifactFilterId(any_=[artifact_id])
).model_dump(mode="json")
)
response = await client.post("/artifacts/filter", json=artifact_filter)
assert response.status_code == status.HTTP_200_OK
assert len(response.json()) == 1
async def test_read_artifacts_with_artifact_flow_run_id_filter(
self, artifacts, client
):
flow_run_id = artifacts[0]["flow_run_id"]
flow_run_filter = dict(
artifacts=schemas.filters.ArtifactFilter(
flow_run_id=schemas.filters.ArtifactFilterFlowRunId(any_=[flow_run_id])
).model_dump(mode="json")
)
response = await client.post("/artifacts/filter", json=flow_run_filter)
assert response.status_code == status.HTTP_200_OK
assert len(response.json()) == 2
assert all(
[item["flow_run_id"] == str(flow_run_id) for item in response.json()]
)
async def test_read_artifacts_with_artifact_task_run_id_filter(
self, artifacts, client
):
task_run_id = artifacts[0]["task_run_id"]
task_run_filter = dict(
artifacts=schemas.filters.ArtifactFilter(
task_run_id=schemas.filters.ArtifactFilterTaskRunId(any_=[task_run_id])
).model_dump(mode="json")
)
response = await client.post("/artifacts/filter", json=task_run_filter)
assert response.status_code == status.HTTP_200_OK
assert len(response.json()) == 1
assert all(
[item["task_run_id"] == str(task_run_id) for item in response.json()]
)
async def test_read_artifacts_with_artifact_type_filter_any(
self, artifacts, client
):
artifact_type = artifacts[1]["type"]
artifact_type_filter = dict(
artifacts=schemas.filters.ArtifactFilter(
type=schemas.filters.ArtifactFilterType(any_=[artifact_type])
).model_dump(mode="json")
)
response = await client.post("/artifacts/filter", json=artifact_type_filter)
assert response.status_code == status.HTTP_200_OK
assert len(response.json()) == 1
assert response.json()[0]["type"] == artifact_type
async def test_read_artifacts_with_artifact_type_filter_not_any(
self, artifacts, client
):
artifact_type = artifacts[2]["type"]
artifact_type_filter = dict(
artifacts=schemas.filters.ArtifactFilter(
type=schemas.filters.ArtifactFilterType(not_any_=[artifact_type])
).model_dump(mode="json")
)
response = await client.post("/artifacts/filter", json=artifact_type_filter)
assert response.status_code == status.HTTP_200_OK
assert len(response.json()) == 2
assert all([item["type"] != artifact_type for item in response.json()])
async def test_read_artifacts_with_multiple_filters(
self, artifacts, flow_run, task_run, client
):
multiple_filters = dict(
artifacts=schemas.filters.ArtifactFilter(
flow_run_id=schemas.filters.ArtifactFilterFlowRunId(any_=[flow_run.id]),
task_run_id=schemas.filters.ArtifactFilterTaskRunId(any_=[task_run.id]),
).model_dump(mode="json"),
)
response = await client.post("/artifacts/filter", json=multiple_filters)
assert response.status_code == status.HTTP_200_OK
assert len(response.json()) == 1
assert all(
[item["flow_run_id"] == str(flow_run.id) for item in response.json()]
)
assert all(
[item["task_run_id"] == str(task_run.id) for item in response.json()]
)
async def test_read_artifacts_with_flow_run_filter(self, artifacts, client):
flow_run_id = artifacts[0]["flow_run_id"]
flow_run_filter = dict(
flow_runs=schemas.filters.FlowRunFilter(
id=schemas.filters.FlowRunFilterId(any_=[flow_run_id])
).model_dump(mode="json")
)
response = await client.post("/artifacts/filter", json=flow_run_filter)
assert response.status_code == status.HTTP_200_OK
assert len(response.json()) == 2
assert all(
[item["flow_run_id"] == str(flow_run_id) for item in response.json()]
)
async def test_read_artifacts_with_task_run_filter(self, artifacts, client):
task_run_id = artifacts[0]["task_run_id"]
task_run_filter = dict(
task_runs=schemas.filters.TaskRunFilter(
id=schemas.filters.TaskRunFilterId(any_=[task_run_id])
).model_dump(mode="json")
)
response = await client.post("/artifacts/filter", json=task_run_filter)
assert response.status_code == status.HTTP_200_OK
assert len(response.json()) == 1
assert all(
[item["task_run_id"] == str(task_run_id) for item in response.json()]
)
async def test_read_artifacts_with_limit(self, artifacts, client):
response = await client.post("/artifacts/filter", json={"limit": 1})
assert response.status_code == status.HTTP_200_OK
assert len(response.json()) == 1
async def test_read_artifacts_with_offset(self, artifacts, client):
response = await client.post(
"/artifacts/filter",
json={
"offset": 1,
"sort": schemas.sorting.ArtifactSort.CREATED_DESC,
},
)
assert response.status_code == status.HTTP_200_OK
assert len(response.json()) == len(artifacts) - 1
actual_keys = [item["key"] for item in response.json()]
expected_keys = [item["key"] for item in artifacts[:-1]]
assert set(actual_keys) == set(expected_keys)
async def test_read_artifacts_with_sort(self, artifacts, client):
response = await client.post(
"/artifacts/filter",
json=dict(sort=schemas.sorting.ArtifactSort.UPDATED_DESC),
)
assert response.status_code == status.HTTP_200_OK
assert len(response.json()) == len(artifacts)
# assert they are sorted correctly
assert all(
[
response.json()[i]["updated"] >= response.json()[i + 1]["updated"]
for i in range(len(response.json()) - 1)
]
)
async def test_read_artifacts_returns_empty_list(self, client):
response = await client.post("/artifacts/filter")
assert response.status_code == status.HTTP_200_OK
assert len(response.json()) == 0
async def test_read_artifacts_with_applies_key_like_filter(self, artifacts, client):
like_first_key = artifacts[0]["key"][-1]
artifact_filter = dict(
artifacts=schemas.filters.ArtifactFilter(
key=schemas.filters.ArtifactFilterKey(like_=like_first_key)
).model_dump(mode="json")
)
response = await client.post("/artifacts/filter", json=artifact_filter)
assert response.status_code == status.HTTP_200_OK
assert len(response.json()) == 1
assert response.json()[0]["key"] == artifacts[0]["key"]
async def test_reading_artifacts_by_flow_name(self, flow_artifacts, client):
flow_name = flow_artifacts[0]["name"]
flow_filter = dict(
flows=schemas.filters.FlowFilter(
name=schemas.filters.FlowFilterName(any_=[flow_name])
).model_dump(mode="json")
)
response = await client.post("/artifacts/filter", json=flow_filter)
assert response.status_code == status.HTTP_200_OK
json = response.json()
assert len(json) == 2
assert sorted([json[0]["id"], json[1]["id"]]) == sorted(
[flow_artifacts[1]["id"], flow_artifacts[2]["id"]]
)
async def test_reading_artifacts_by_deployment(self, flow_artifacts, client):
deployment_id = flow_artifacts[3]
deployment_filter = dict(
deployments=schemas.filters.DeploymentFilter(
id=schemas.filters.DeploymentFilterId(any_=[deployment_id])
).model_dump(mode="json")
)
response = await client.post("/artifacts/filter", json=deployment_filter)
assert response.status_code == status.HTTP_200_OK
json = response.json()
assert len(json) == 2
assert sorted([json[0]["id"], json[1]["id"]]) == sorted(
[flow_artifacts[1]["id"], flow_artifacts[2]["id"]]
)
| TestReadArtifacts |
python | doocs__leetcode | solution/2900-2999/2932.Maximum Strong Pair XOR I/Solution.py | {
"start": 0,
"end": 157
} | class ____:
def maximumStrongPairXor(self, nums: List[int]) -> int:
return max(x ^ y for x in nums for y in nums if abs(x - y) <= min(x, y))
| Solution |
python | pytorch__pytorch | torch/fx/passes/utils/matcher_with_name_node_map_utils.py | {
"start": 1504,
"end": 4241
} | class ____(SubgraphMatcher):
"""Extends SubgraphMatcher to support querying the matched subgraph nodes through node name,
this requires pattern to have specific format (returning and additional dictionary at the output,
that has node name as key, and the node in the pattern graph as value, see Example for more details)
Difference with SubgraphMatcher is that it takes a `pattern_gm` GraphModule as input during
initialization since we need to modify the graph (which requires `recompile` the GraphModule)
Example::
def pattern(x, weight):
conv = F.conv2d(x, weight)
relu = F.relu(conv)
return relu, {"conv": conv, "relu": relu}
def target_graph(x, weight):
conv = F.conv2d(x, weight)
relu = F.relu(conv)
relu *= 2
return relu
pattern_gm = export_for_training(pattern, example_inputs).module()
target_gm = export_for_training(target_graph, example_inputs).module()
matcher = SubgraphMatcherWithNameNodeMap(pattern_gm)
matches = matcher.match(target_gm)
for match in matches:
match.name_node_map["conv"].meta["annotation"] = ...
"""
def __init__(
self,
pattern_gm: GraphModule,
match_output: bool = False,
match_placeholder: bool = False,
remove_overlapping_matches: bool = True,
ignore_literals: bool = False,
) -> None:
pattern_gm, name_node_map = _split_to_graph_and_name_node_map(pattern_gm)
self.name_node_map = name_node_map
super().__init__(
pattern_gm.graph,
match_output,
match_placeholder,
remove_overlapping_matches,
ignore_literals,
)
def match(self, graph: Graph, node_name_match: str = "") -> list[InternalMatch]:
"""The returned InternalMatch will have name_node_map populated with a map
from node name (str) to the target node, e.g.
{"conv": target_conv_ndoe, "relu": target_relu_node}
this requires the pattern graph returns an additional
output of node name to node, e.g. instead of:
```
def pattern(...):
...
return relu
```
we should do:
```
def pattern(...):
...
return relu, {"conv": conv, "relu": relu}
``` instead
"""
internal_matches = super().match(graph, node_name_match)
for internal_match in internal_matches:
for k, n in self.name_node_map.items():
internal_match.name_node_map[k] = internal_match.nodes_map[n]
return internal_matches
| SubgraphMatcherWithNameNodeMap |
python | getsentry__sentry | src/sentry/api/serializers/models/team.py | {
"start": 12197,
"end": 12363
} | class ____(OrganizationTeamSCIMSerializerRequired, total=False):
members: list[SCIMTeamMemberListItem]
@dataclasses.dataclass
| OrganizationTeamSCIMSerializerResponse |
python | jmcnamara__XlsxWriter | xlsxwriter/test/vml/test_write_anchor.py | {
"start": 289,
"end": 786
} | class ____(unittest.TestCase):
"""
Test the Vml _write_anchor() method.
"""
def setUp(self):
self.fh = StringIO()
self.vml = Vml()
self.vml._set_filehandle(self.fh)
def test_write_anchor(self):
"""Test the _write_anchor() method"""
self.vml._write_anchor([2, 0, 15, 10, 4, 4, 15, 4])
exp = """<x:Anchor>2, 15, 0, 10, 4, 15, 4, 4</x:Anchor>"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
| TestWriteXAnchor |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/vertex_ai/generative_model.py | {
"start": 4590,
"end": 8474
} | class ____(GoogleCloudBaseOperator):
"""
Use the Vertex AI Gemini Pro foundation model to generate content.
:param project_id: Required. The ID of the Google Cloud project that the
service belongs to (templated).
:param location: Required. The ID of the Google Cloud location that the
service belongs to (templated).
:param contents: Required. The multi-part content of a message that a user or a program
gives to the generative model, in order to elicit a specific response.
:param generation_config: Optional. Generation configuration settings.
:param safety_settings: Optional. Per request settings for blocking unsafe content.
:param tools: Optional. A list of tools available to the model during evaluation, such as a data store.
:param system_instruction: Optional. An instruction given to the model to guide its behavior.
:param pretrained_model: Required. The name of the model to use for content generation,
which can be a text-only or multimodal model. For example, `gemini-pro` or
`gemini-pro-vision`.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = ("location", "project_id", "impersonation_chain", "contents", "pretrained_model")
def __init__(
self,
*,
project_id: str,
location: str,
contents: list,
tools: list | None = None,
generation_config: dict | None = None,
safety_settings: dict | None = None,
system_instruction: str | None = None,
pretrained_model: str,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.location = location
self.contents = contents
self.tools = tools
self.generation_config = generation_config
self.safety_settings = safety_settings
self.system_instruction = system_instruction
self.pretrained_model = pretrained_model
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
self.hook = GenerativeModelHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
response = self.hook.generative_model_generate_content(
project_id=self.project_id,
location=self.location,
contents=self.contents,
tools=self.tools,
generation_config=self.generation_config,
safety_settings=self.safety_settings,
system_instruction=self.system_instruction,
pretrained_model=self.pretrained_model,
)
self.log.info("Model response: %s", response)
context["ti"].xcom_push(key="model_response", value=response)
return response
@deprecated(
planned_removal_date="January 3, 2026",
use_instead="airflow.providers.google.cloud.operators.gen_ai.generative_model.GenAISupervisedFineTuningTrainOperator",
category=AirflowProviderDeprecationWarning,
)
| GenerativeModelGenerateContentOperator |
python | fastapi__sqlmodel | docs_src/tutorial/many_to_many/tutorial001_py310.py | {
"start": 77,
"end": 295
} | class ____(SQLModel, table=True):
team_id: int | None = Field(default=None, foreign_key="team.id", primary_key=True)
hero_id: int | None = Field(default=None, foreign_key="hero.id", primary_key=True)
| HeroTeamLink |
python | django__django | tests/forms_tests/tests/tests.py | {
"start": 19343,
"end": 19404
} | class ____(EmptyLabelTestCase):
pass
| Jinja2EmptyLabelTestCase |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-jira/components.py | {
"start": 572,
"end": 1097
} | class ____(DpathExtractor):
"""
A custom record extractor is needed to handle cases when records are represented as list of strings insted of dictionaries.
Example:
-> ["label 1", "label 2", ..., "label n"]
<- [{"label": "label 1"}, {"label": "label 2"}, ..., {"label": "label n"}]
"""
def extract_records(self, response: Response) -> List[Mapping[str, Any]]:
records = super().extract_records(response)
return [{"label": record} for record in records]
| LabelsRecordExtractor |
python | pytorch__pytorch | torch/_inductor/ir.py | {
"start": 87392,
"end": 87440
} | class ____(Scan):
pass
@ir_dataclass
| SplitScan |
python | ansible__ansible | hacking/azp/incidental.py | {
"start": 13140,
"end": 16777
} | class ____:
def __init__(self, path, source, coverage_data, coverage_points):
self.path = path
self.lines = source.decode().splitlines()
self.coverage_data = coverage_data
self.coverage_points = coverage_points
self.github_url = coverage_data.github_base_url + path
is_arcs = ':' in dict(coverage_points).popitem()[0]
if is_arcs:
parse = parse_arc
else:
parse = int
self.covered_points = set(parse(v) for v in coverage_points)
self.covered_arcs = self.covered_points if is_arcs else None
self.covered_lines = set(abs(p[0]) for p in self.covered_points) | set(abs(p[1]) for p in self.covered_points)
def collect_sources(data_path, git, coverage_data, result_sha):
with open(data_path) as data_file:
data = json.load(data_file)
sources = []
for path_coverage in data.values():
for path, path_data in path_coverage.items():
sources.append(SourceFile(path, git.show(['%s:%s' % (result_sha, path)]), coverage_data, path_data))
return sources
def generate_report(sources, report_path, coverage_data, target_name, missing):
output = [
'Target: %s (%s coverage)' % (target_name, 'missing' if missing else 'exclusive'),
'GitHub: %stest/integration/targets/%s' % (coverage_data.github_base_url, target_name),
]
for source in sources:
if source.covered_arcs:
output.extend([
'',
'Source: %s (%d arcs, %d/%d lines):' % (source.path, len(source.covered_arcs), len(source.covered_lines), len(source.lines)),
'GitHub: %s' % source.github_url,
'',
])
else:
output.extend([
'',
'Source: %s (%d/%d lines):' % (source.path, len(source.covered_lines), len(source.lines)),
'GitHub: %s' % source.github_url,
'',
])
last_line_no = 0
for line_no, line in enumerate(source.lines, start=1):
if line_no not in source.covered_lines:
continue
if last_line_no and last_line_no != line_no - 1:
output.append('')
notes = ''
if source.covered_arcs:
from_lines = sorted(p[0] for p in source.covered_points if abs(p[1]) == line_no)
to_lines = sorted(p[1] for p in source.covered_points if abs(p[0]) == line_no)
if from_lines:
notes += ' ### %s -> (here)' % ', '.join(str(from_line) for from_line in from_lines)
if to_lines:
notes += ' ### (here) -> %s' % ', '.join(str(to_line) for to_line in to_lines)
output.append('%4d %s%s' % (line_no, line, notes))
last_line_no = line_no
with open(report_path, 'w') as report_file:
report_file.write('\n'.join(output) + '\n')
def parse_arc(value):
return tuple(int(v) for v in value.split(':'))
def cached(path, use_cache, show_messages, func):
if os.path.exists(path) and use_cache:
if show_messages:
sys.stderr.write('%s: cached\n' % path)
sys.stderr.flush()
return
if show_messages:
sys.stderr.write('%s: generating ... ' % path)
sys.stderr.flush()
func()
if show_messages:
sys.stderr.write('done\n')
sys.stderr.flush()
def check_failed(args, message):
if args.skip_checks:
sys.stderr.write('WARNING: %s\n' % message)
return
raise ApplicationError(message)
| SourceFile |
python | spack__spack | lib/spack/spack/solver/core.py | {
"start": 1714,
"end": 2896
} | class ____(AspObject):
"""A term in the ASP logic program"""
__slots__ = ["name", "args"]
def __init__(self, name: str, args: Optional[Tuple[Any, ...]] = None) -> None:
self.name = name
self.args = () if args is None else tuple(args)
def _cmp_key(self) -> Tuple[str, Optional[Tuple[Any, ...]]]:
return self.name, self.args
def __call__(self, *args: Any) -> "AspFunction":
"""Return a new instance of this function with added arguments.
Note that calls are additive, so you can do things like::
>>> attr = AspFunction("attr")
attr()
>>> attr("version")
attr("version")
>>> attr("version")("foo")
attr("version", "foo")
>>> v = AspFunction("attr", "version")
attr("version")
>>> v("foo", "bar")
attr("version", "foo", "bar")
"""
return AspFunction(self.name, self.args + args)
def __str__(self) -> str:
args = f"({','.join(str(_id(arg)) for arg in self.args)})"
return f"{self.name}{args}"
def __repr__(self) -> str:
return str(self)
| AspFunction |
python | mlflow__mlflow | mlflow/genai/judges/builtin_judges.py | {
"start": 189,
"end": 322
} | class ____(BuiltInScorer, Judge):
"""
Base class for built-in AI judge scorers that use LLMs for evaluation.
"""
| BuiltinJudge |
python | h5py__h5py | h5py/tests/test_group.py | {
"start": 23410,
"end": 24754
} | class ____(TestCase):
"""
Feature: The .visit and .visititems methods allow iterative access to
group and subgroup members
"""
def setUp(self):
self.f = File(self.mktemp(), 'w')
self.groups = [
'grp1', 'grp1/sg1', 'grp1/sg2', 'grp2', 'grp2/sg1', 'grp2/sg1/ssg1'
]
for x in self.groups:
self.f.create_group(x)
def tearDown(self):
self.f.close()
def test_visit(self):
""" All subgroups are visited """
l = []
self.f.visit(l.append)
self.assertSameElements(l, self.groups)
def test_visititems(self):
""" All subgroups and contents are visited """
l = []
comp = [(x, self.f[x]) for x in self.groups]
self.f.visititems(lambda x, y: l.append((x,y)))
self.assertSameElements(comp, l)
def test_bailout(self):
""" Returning a non-None value immediately aborts iteration """
# do not make assumption on iteration order
l = []
x = self.f.visit(lambda x: l.append(x) or -1)
assert x == -1 and len(l) == 1 and l[0] in self.groups
l = []
comp = [(x, self.f[x]) for x in self.groups]
x = self.f.visititems(lambda x, y: l.append((x,y)) or -1)
assert x == -1 and len(l) == 1 and l[0] in comp
| TestVisit |
python | euske__pdfminer | pdfminer/psparser.py | {
"start": 276,
"end": 367
} | class ____(PSException):
pass
## Basic PostScript Types
##
## PSObject
##
| PSValueError |
python | fastai__fastai | fastai/layers.py | {
"start": 14378,
"end": 15499
} | class ____(Module):
def __init__(self, n_in:int, ks=1, sym=False):
self.sym,self.n_in = sym,n_in
self.conv = _conv1d_spect(n_in, n_in, ks, padding=ks//2, bias=False)
self.gamma = nn.Parameter(tensor([0.]))
def forward(self,x):
if self.sym:
c = self.conv.weight.view(self.n_in,self.n_in)
c = (c + c.t())/2
self.conv.weight = c.view(self.n_in,self.n_in,1)
size = x.size()
x = x.view(*size[:2],-1)
convx = self.conv(x)
xxT = torch.bmm(x,x.permute(0,2,1).contiguous())
o = torch.bmm(xxT, convx)
o = self.gamma * o + x
return o.view(*size).contiguous()
# %% ../nbs/01_layers.ipynb 101
def icnr_init(x, scale=2, init=nn.init.kaiming_normal_):
"ICNR init of `x`, with `scale` and `init` function"
ni,nf,h,w = x.shape
ni2 = int(ni/(scale**2))
k = init(x.new_zeros([ni2,nf,h,w])).transpose(0, 1)
k = k.contiguous().view(ni2, nf, -1)
k = k.repeat(1, 1, scale**2)
return k.contiguous().view([nf,ni,h,w]).transpose(0, 1)
# %% ../nbs/01_layers.ipynb 104
| SimpleSelfAttention |
python | google__pytype | pytype/typegraph/cfg_utils.py | {
"start": 8754,
"end": 10569
} | class ____(PredecessorNode, Protocol):
id: int
_OrderableNode = TypeVar("_OrderableNode", bound=OrderableNode)
def order_nodes(nodes: Sequence[_OrderableNode]) -> list[_OrderableNode]:
"""Build an ancestors first traversal of CFG nodes.
This guarantees that at least one predecessor of a block is scheduled before
the block itself, and it also tries to schedule as many of them before the
block as possible (so e.g. if two branches merge in a node, it prefers to
process both the branches before that node).
Args:
nodes: A list of nodes or blocks. They have two attributes: "id" (an int to
enable deterministic sorting) and "outgoing" (a list of nodes).
Returns:
A list of nodes in the proper order.
"""
if not nodes:
return []
root = nodes[0]
predecessor_map = compute_predecessors(nodes)
dead = {
node
for node, predecessors in predecessor_map.items()
if root not in predecessors
}
queue = {root: predecessor_map[root]}
order = []
seen = set()
while queue:
# Find node with minimum amount of predecessors that's connected to a node
# we already processed.
_, _, node = min(
(len(predecessors), node.id, node)
for node, predecessors in queue.items()
)
del queue[node]
if node in seen:
continue
order.append(node)
seen.add(node)
# Remove this node from the predecessors of all nodes after it.
for _, predecessors in queue.items():
predecessors.discard(node)
# Potentially schedule nodes we couldn't reach before:
for n in node.outgoing:
if n not in queue:
queue[n] = predecessor_map[n] - seen
# check that we don't have duplicates and that we didn't miss anything:
assert len(set(order) | dead) == len(set(nodes))
return order
| OrderableNode |
python | run-llama__llama_index | llama-index-integrations/vector_stores/llama-index-vector-stores-openGauss/llama_index/vector_stores/openGauss/base.py | {
"start": 350,
"end": 4755
} | class ____(NamedTuple):
node_id: str
text: str
metadata: dict
similarity: float
PGType = Literal[
"text",
"int",
"integer",
"numeric",
"float",
"double precision",
"boolean",
"date",
"timestamp",
"uuid",
]
def get_data_model(
base: Type,
index_name: str,
schema_name: str,
hybrid_search: bool,
text_search_config: str,
cache_okay: bool,
embed_dim: int = 1536,
use_jsonb: bool = False,
indexed_metadata_keys: Optional[Set[Tuple[str, PGType]]] = None,
) -> Any:
"""
This part create a dynamic sqlalchemy model with a new table.
"""
from opengauss_sqlalchemy.usertype import Vector
from sqlalchemy import Column, Computed
from sqlalchemy.dialects.postgresql import (
BIGINT,
JSON,
JSONB,
TSVECTOR,
VARCHAR,
UUID,
DOUBLE_PRECISION,
)
from sqlalchemy import cast, column
from sqlalchemy import String, Integer, Numeric, Float, Boolean, Date, DateTime
from sqlalchemy.schema import Index
from sqlalchemy.types import TypeDecorator
pg_type_map = {
"text": String,
"int": Integer,
"integer": Integer,
"numeric": Numeric,
"float": Float,
"double precision": DOUBLE_PRECISION, # or Float(precision=53)
"boolean": Boolean,
"date": Date,
"timestamp": DateTime,
"uuid": UUID,
}
indexed_metadata_keys = indexed_metadata_keys or set()
# check that types are in pg_type_map
for key, pg_type in indexed_metadata_keys:
if pg_type not in pg_type_map:
raise ValueError(
f"Invalid type {pg_type} for key {key}. "
f"Must be one of {list(pg_type_map.keys())}"
)
class TSVector(TypeDecorator):
impl = TSVECTOR
cache_ok = cache_okay
tablename = "data_og_%s" % index_name # dynamic table name
class_name = "Data_og_%s" % index_name # dynamic class name
indexname = "%s_og_idx" % index_name # dynamic class name
metadata_dtype = JSONB if use_jsonb else JSON
embedding_col = Column(Vector(embed_dim))
metadata_indices = [
Index(
f"{indexname}_{key}_{pg_type.replace(' ', '_')}",
cast(column("metadata_").op("->>")(key), pg_type_map[pg_type]),
postgresql_using="btree",
)
for key, pg_type in indexed_metadata_keys
]
if hybrid_search:
class HybridAbstractData(base): # type: ignore
__abstract__ = True # this line is necessary
id = Column(BIGINT, primary_key=True, autoincrement=True)
text = Column(VARCHAR, nullable=False)
metadata_ = Column(metadata_dtype)
node_id = Column(VARCHAR)
embedding = embedding_col
text_search_tsv = Column( # type: ignore
TSVector(),
Computed(
"to_tsvector('%s', text)" % text_search_config, persisted=True
),
)
model = type(
class_name,
(HybridAbstractData,),
{
"__tablename__": tablename,
"__table_args__": (*metadata_indices, {"schema": schema_name}),
},
)
Index(
indexname,
model.text_search_tsv, # type: ignore
postgresql_using="gin",
)
Index(
f"{indexname}_1",
model.metadata_["ref_doc_id"].astext, # type: ignore
postgresql_using="btree",
)
else:
class AbstractData(base): # type: ignore
__abstract__ = True # this line is necessary
id = Column(BIGINT, primary_key=True, autoincrement=True)
text = Column(VARCHAR, nullable=False)
metadata_ = Column(metadata_dtype)
node_id = Column(VARCHAR)
embedding = embedding_col
model = type(
class_name,
(AbstractData,),
{
"__tablename__": tablename,
"__table_args__": (*metadata_indices, {"schema": schema_name}),
},
)
Index(
f"{indexname}_1",
model.metadata_["ref_doc_id"].astext, # type: ignore
postgresql_using="btree",
)
return model
| DBEmbeddingRow |
python | django-haystack__django-haystack | test_haystack/test_app_loading.py | {
"start": 152,
"end": 2074
} | class ____(TestCase):
def test_load_apps(self):
apps = app_loading.haystack_load_apps()
self.assertIsInstance(apps, (list, GeneratorType))
self.assertIn("hierarchal_app_django", apps)
self.assertNotIn(
"test_app_without_models",
apps,
msg="haystack_load_apps should exclude apps without defined models",
)
def test_get_app_modules(self):
app_modules = app_loading.haystack_get_app_modules()
self.assertIsInstance(app_modules, (list, GeneratorType))
for i in app_modules:
self.assertIsInstance(i, ModuleType)
def test_get_models_all(self):
models = app_loading.haystack_get_models("core")
self.assertIsInstance(models, (list, GeneratorType))
def test_get_models_specific(self):
from test_haystack.core.models import MockModel
models = app_loading.haystack_get_models("core.MockModel")
self.assertIsInstance(models, (list, GeneratorType))
self.assertListEqual(models, [MockModel])
def test_hierarchal_app_get_models(self):
models = app_loading.haystack_get_models("hierarchal_app_django")
self.assertIsInstance(models, (list, GeneratorType))
self.assertSetEqual(
set(str(i._meta) for i in models),
set(
(
"hierarchal_app_django.hierarchalappsecondmodel",
"hierarchal_app_django.hierarchalappmodel",
)
),
)
def test_hierarchal_app_specific_model(self):
models = app_loading.haystack_get_models(
"hierarchal_app_django.HierarchalAppModel"
)
self.assertIsInstance(models, (list, GeneratorType))
self.assertSetEqual(
set(str(i._meta) for i in models),
set(("hierarchal_app_django.hierarchalappmodel",)),
)
| AppLoadingTests |
python | getsentry__sentry | src/sentry/replays/lib/storage.py | {
"start": 867,
"end": 2171
} | class ____:
project_id: int
replay_id: str
segment_id: int
retention_days: int | None
date_added: datetime | None = None
file_id: int | None = None
file: File | None = None
def make_recording_filename(segment: RecordingSegmentStorageMeta) -> str:
return _make_recording_filename(
segment.retention_days,
segment.project_id,
segment.replay_id,
segment.segment_id,
)
def make_video_filename(segment: RecordingSegmentStorageMeta) -> str:
return _make_video_filename(
segment.retention_days,
segment.project_id,
segment.replay_id,
segment.segment_id,
)
def _make_recording_filename(
retention_days: int | None,
project_id: int,
replay_id: str,
segment_id: int,
) -> str:
"""Return a recording segment filename."""
return "{}/{}/{}/{}".format(
retention_days or 30,
project_id,
replay_id,
segment_id,
)
def _make_video_filename(
retention_days: int | None,
project_id: int,
replay_id: str,
segment_id: int,
) -> str:
"""Return a recording segment video filename."""
filename = _make_recording_filename(retention_days, project_id, replay_id, segment_id)
return filename + ".video"
| RecordingSegmentStorageMeta |
python | walkccc__LeetCode | solutions/34. Find First and Last Position of Element in Sorted Array/34.py | {
"start": 0,
"end": 237
} | class ____:
def searchRange(self, nums: list[int], target: int) -> list[int]:
l = bisect_left(nums, target)
if l == len(nums) or nums[l] != target:
return -1, -1
r = bisect_right(nums, target) - 1
return l, r
| Solution |
python | facebook__pyre-check | client/error.py | {
"start": 686,
"end": 771
} | class ____(Exception):
pass
@dataclasses.dataclass(frozen=True)
| ErrorParsingFailure |
python | kamyu104__LeetCode-Solutions | Python/minimum-sum-of-values-by-dividing-array.py | {
"start": 2219,
"end": 4546
} | class ____(object):
def minimumValueSum(self, nums, andValues):
"""
:type nums: List[int]
:type andValues: List[int]
:rtype: int
"""
INF = float("inf")
# RMQ - Sparse Table
# Template: https://github.com/kamyu104/GoogleCodeJam-Farewell-Rounds/blob/main/Round%20D/genetic_sequences2.py3
# Time: ctor: O(NlogN) * O(fn)
# query: O(fn)
# Space: O(NlogN)
class SparseTable(object):
def __init__(self, arr, fn):
self.fn = fn
self.bit_length = [0]
n = len(arr)
k = n.bit_length()-1 # log2_floor(n)
for i in xrange(k+1):
self.bit_length.extend(i+1 for _ in xrange(min(1<<i, (n+1)-len(self.bit_length))))
self.st = [[0]*n for _ in xrange(k+1)]
self.st[0] = arr[:]
for i in xrange(1, k+1): # Time: O(NlogN) * O(fn)
for j in xrange((n-(1<<i))+1):
self.st[i][j] = fn(self.st[i-1][j], self.st[i-1][j+(1<<(i-1))])
def query(self, L, R): # Time: O(fn)
i = self.bit_length[R-L+1]-1 # log2_floor(R-L+1)
return self.fn(self.st[i][L], self.st[i][R-(1<<i)+1])
dp = [INF]*(len(nums)+1)
dp[0] = 0
for j in xrange(len(andValues)):
new_dp = [INF]*(len(nums)+1)
masks = []
st = SparseTable(dp, min)
for i in xrange(j, len(nums)):
masks.append([nums[i], i])
for x in masks:
x[0] &= nums[i]
masks = [x for k, x in enumerate(masks) if k == 0 or masks[k-1][0] != masks[k][0]]
for k, [mask, left] in enumerate(masks):
if mask == andValues[j]:
# any j in range(left, right+1) has same and(nums[j:i+1]) = mask
right = masks[k+1][1]-1 if k+1 != len(masks) else i
new_dp[i+1] = min(new_dp[i+1], st.query(left, right)+nums[i])
break
dp = new_dp
return dp[-1] if dp[-1] != INF else -1
# Time: O(n * m * logr), r = max(nums)
# Space: O(n * m * logr)
import collections
# memoization
| Solution2 |
python | great-expectations__great_expectations | contrib/experimental/great_expectations_experimental/expectations/expect_column_values_number_of_decimal_places_to_equal.py | {
"start": 632,
"end": 2298
} | class ____(ColumnMapMetricProvider):
"""
Computes number of decimal places of values in column through string conversion. In the case of an integer, the
value automatically passes.
"""
# This is the id string that will be used to reference your metric.
# Please see {some doc} for information on how to choose an id string for your Metric.
condition_metric_name = "column_values.decimal_places_equal"
condition_value_keys = ("decimal_places",)
# This method defines the business logic for evaluating your metric when using a PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, decimal_places, **kwargs):
def decimal_func(x):
try:
if x == int(x):
return decimal_places
except Exception:
pass
return len(str(x).split(".")[1])
column_decimal_places = column.apply(decimal_func)
return column_decimal_places == decimal_places
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# return column.in_([3])
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @column_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, **kwargs):
# return column.isin([3])
# This class defines the Expectation itself
# The main business logic for calculation lives here.
| ColumnValuesDecimalPlacesEquals |
python | jazzband__django-oauth-toolkit | tests/test_authorization_code.py | {
"start": 79018,
"end": 81334
} | class ____(BaseTest):
@classmethod
def setUpTestData(cls):
super().setUpTestData()
cls.application.algorithm = Application.RS256_ALGORITHM
cls.application.save()
def test_id_token_resource_access_allowed(self):
self.client.login(username="test_user", password="123456")
# retrieve a valid authorization code
authcode_data = {
"client_id": self.application.client_id,
"state": "random_state_string",
"scope": "openid",
"redirect_uri": "http://example.org",
"response_type": "code",
"allow": True,
}
response = self.client.post(reverse("oauth2_provider:authorize"), data=authcode_data)
query_dict = parse_qs(urlparse(response["Location"]).query)
authorization_code = query_dict["code"].pop()
# exchange authorization code for a valid access token
token_request_data = {
"grant_type": "authorization_code",
"code": authorization_code,
"redirect_uri": "http://example.org",
}
auth_headers = get_basic_auth_header(self.application.client_id, CLEARTEXT_SECRET)
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
content = json.loads(response.content.decode("utf-8"))
access_token = content["access_token"]
id_token = content["id_token"]
# use token to access the resource
auth_headers = {
"HTTP_AUTHORIZATION": "Bearer " + access_token,
}
request = self.factory.get("/fake-resource", **auth_headers)
request.user = self.test_user
view = ResourceView.as_view()
response = view(request)
self.assertEqual(response, "This is a protected resource")
# use id_token to access the resource
auth_headers = {
"HTTP_AUTHORIZATION": "Bearer " + id_token,
}
request = self.factory.get("/fake-resource", **auth_headers)
request.user = self.test_user
view = ResourceView.as_view()
response = view(request)
self.assertEqual(response, "This is a protected resource")
@pytest.mark.oauth2_settings(presets.DEFAULT_SCOPES_RO)
| TestOIDCAuthorizationCodeProtectedResource |
python | doocs__leetcode | lcof2/剑指 Offer II 008. 和大于等于 target 的最短子数组/Solution.py | {
"start": 0,
"end": 340
} | class ____:
def minSubArrayLen(self, target: int, nums: List[int]) -> int:
ans = inf
s = i = 0
for j, x in enumerate(nums):
s += x
while s >= target:
ans = min(ans, j - i + 1)
s -= nums[i]
i += 1
return 0 if ans == inf else ans
| Solution |
python | getsentry__sentry | src/sentry/utils/services.py | {
"start": 14414,
"end": 18521
} | class ____(Delegator, Service):
"""\
The backends are provided as mapping of backend name to configuration
parameters:
'redis': {
'path': 'sentry.tsdb.redis.RedisTSDB',
'executor': {
'path': 'sentry.utils.services.ThreadedExecutor',
'options': {
'worker_count': 1,
},
},
},
'dummy': {
'path': 'sentry.tsdb.dummy.DummyTSDB',
'executor': {
'path': 'sentry.utils.services.ThreadedExecutor',
'options': {
'worker_count': 4,
},
},
},
# ... etc ...
The selector function and callback function can be provided as either:
- A dotted import path string (``path.to.callable``) that will be
imported at backend instantiation, or
- A reference to a callable object.
If you're shifting a service from one backend storage system to another
consider using `make_writebehind_selector` to generate your selector function.
"""
def __init__(
self,
backend_base: str,
backends: Mapping[str, ServiceOptions],
selector_func: str | Selector,
callback_func: str | Callback | None = None,
):
super().__init__(
import_string(backend_base),
{
name: (
build_instance_from_options_of_type(Service, options),
build_instance_from_options_of_type(
Executor, options.get("executor", {}), default_constructor=ThreadedExecutor
),
)
for name, options in backends.items()
},
resolve_callable(selector_func),
resolve_callable(callback_func) if callback_func is not None else None,
)
def validate(self) -> None:
for backend, executor in self.backends.values():
backend.validate()
def setup(self) -> None:
for backend, executor in self.backends.values():
backend.setup()
KeyFetch = Callable[[Context, str, Mapping[str, Any]], str | int]
def make_writebehind_selector(
*, option_name: str, key_fetch: KeyFetch, move_to: str, move_from: str
) -> Selector:
"""
Generates a selector_func that will do write-behind delegation
The provided option_name is expected to have values between -1 and 1
-1.0 - 0.01 The move_from will be primary, while move_to will increasingly be added as a secondary.
At 0.0 - Only move_from will be used.
0.01 - 1.0 The move_to will increasingly be used as primary.
The `key_fetch` function gets the parameters expected by `Selector` and
is expected to return a consistent str|int that will be hashed for consistent
rollouts. If no consistent key exists you can use random number generation.
The `move_to` and `move_from` parameters should match the keys used to defined
the backends in the `ServiceDelegator` configuration.
Example:
selector = make_writebehind_selector(
option_name="feature.rollout",
move_to="new",
move_from="old",
key_fetch=lambda *args: "a-consistent-key",
)
"""
def selector(context: Context, method: str, callargs: Mapping[str, Any]) -> list[str]:
rollout_rate = options.get(option_name)
if rollout_rate == 0.0:
return [move_from]
key = key_fetch(context, method, callargs)
if isinstance(key, str):
intkey = int(hashlib.md5(key.encode("utf8")).hexdigest(), base=16)
else:
intkey = key
assert isinstance(intkey, int), intkey
if rollout_rate < 0:
if (intkey % 10000) / 10000 < rollout_rate * -1.0:
return [move_from, move_to]
return [move_from]
else:
# rollout > 0
if (intkey % 10000) / 10000 < rollout_rate:
return [move_to, move_from]
return [move_from, move_to]
return selector
| ServiceDelegator |
python | google__jax | jax/_src/core.py | {
"start": 48965,
"end": 57911
} | class ____:
__slots__ = ['prev', 'axis_names']
def __init__(self, axis_names: AxisName | None):
self.axis_names = axis_names
def __enter__(self):
self.prev = trace_ctx.axis_env
if self.axis_names is not None:
trace_ctx.set_axis_env(self.prev.add_explicit_mesh_axis_names(
self.axis_names))
def __exit__(self, exc_type, exc_value, traceback):
trace_ctx.set_axis_env(self.prev)
add_explicit_mesh_axis_names = AddExplicitMeshAxisNamesContextManager
def get_axis_env():
return trace_ctx.axis_env
def _initialize_jax_jit_thread_local_state():
"""Initializes the C++ thread-local context.
When the user spawns threads, the C++ `jax_jit.thread_local_state` is None.
The C++ accessor calls this function if it realizes the thread_local_state
is None (which means it's not yet initialized for this thread).
This function does not live in `config.py`, to prevent circular imports.
"""
trace_ctx.update_thread_local_jit_state()
jax_jit.set_thread_local_state_initialization_callback(
_initialize_jax_jit_thread_local_state)
def trace_state_clean() -> bool:
return trace_ctx.is_top_level()
def reset_trace_state() -> bool:
"""Resets the global trace state and returns True if it was already clean."""
if not trace_ctx.is_top_level():
trace_ctx.reset()
trace_ctx.update_thread_local_jit_state()
return False
else:
return True
TRACER_LEAK_DEBUGGER_WARNING = """\
JAX check_tracer_leaks behavior can trigger false positives when used with a debugger.
To avoid false positives and silence this warning, you can disable thread tracing using
the following:
import threading
threading.current_thread().pydev_do_not_trace = True
"""
@contextmanager
def ensure_no_leaks(trace:Trace):
yield
trace.invalidate()
if config.check_tracer_leaks.value:
trace_ref = trace._weakref
del trace
live_trace = trace_ref()
if live_trace is not None:
leaked_tracers = maybe_find_leaked_tracers(live_trace)
if leaked_tracers:
raise leaked_tracer_error("trace", live_trace, leaked_tracers)
def maybe_find_leaked_tracers(trace: Trace) -> list[Tracer]:
"""Find the leaked tracers holding a reference to the Trace
"""
if not getattr(threading.current_thread(), 'pydev_do_not_trace', True):
warnings.warn(TRACER_LEAK_DEBUGGER_WARNING)
# Trigger garbage collection to filter out unreachable objects that are alive
# only due to cyclical dependencies. (We don't care about unreachable leaked
# tracers since they can't interact with user code and cause a problem.)
gc.collect()
tracers = list(filter(lambda x: isinstance(x, Tracer), gc.get_referrers(trace)))
return tracers
def leaked_tracer_error(name: str, t, tracers: list[Tracer]) -> Exception:
assert tracers
why = partial(_why_alive, {id(tracers)})
msgs = '\n\n'.join(f'{tracers[i]}{tracers[i]._origin_msg()}{why(tracers[i])}'
for i in range(len(tracers)))
return Exception(f'Leaked {name} {t}. Leaked tracer(s):\n\n{msgs}\n')
def _why_alive(ignore_ids: set[int], x: Any) -> str:
parents = lambda x: [r for r in gc.get_referrers(x) if id(r) not in ignore_ids]
child, lines, seen = x, [], set()
while (id(child) not in seen and type(child) is not types.ModuleType
and parents(child)):
parent = parents(child)[0] # just pick one parent
# For namespaces (like modules and class instances) and closures, the
# references may form a simple chain: e.g. instance refers to its own
# __dict__ which refers to child, or function refers to its __closure__
# which refers to cells which refer to child. In these cases, we can provide
# a more intuitive description by collapsing the chain into a single
# parent->child jump. We do that by setting `parent` here to be a
# grandparent (or great-grandparent) of `child`, and then handling that case
# in _why_alive_container_info. See example:
# https://github.com/jax-ml/jax/pull/13022#discussion_r1008456599
# To prevent this collapsing behavior, just comment out this code block.
if (isinstance(parent, dict) and
getattr(parents(parent)[0], '__dict__', None) is parents(child)[0]):
parent = parents(parent)[0]
elif type(parent) is types.CellType:
parent = parents(parents(parent)[0])[0]
line = f'<{type(child).__name__} {id(child)}> is referred to by '
lines.append(line + _why_alive_container_info(parent, id(child)))
seen.add(id(child))
child = parent
return '\n' + '\n'.join(lines) if lines else ''
def _why_alive_container_info(container, obj_id) -> str:
name = f'<{type(container).__name__} {id(container)}>'
if type(container) is types.ModuleType:
name = getattr(container, '__name__', name)
if type(container) is types.FunctionType:
name_ = getattr(container, '__name__', '<no-name>')
closure = inspect.getclosurevars(container)
keys = [k for k, v in dict(closure.nonlocals, **closure.globals).items()
if id(v) == obj_id]
if len(keys) == 1: return f'{name} ({name_}) closed-over variable {keys[0]}'
elif len(keys) > 1: return (f'{name} in closed-over variables ' +
', '.join(map(repr, keys)))
if hasattr(container, '__dict__'):
keys = [k for k in vars(container) if id(vars(container)[k]) == obj_id]
if len(keys) == 1: return f'{name}.{keys[0]}'
elif len(keys) > 1: return f'{name} in vars ' + ', '.join(map(repr, keys))
if isinstance(container, (list, tuple)):
idxs = [i for i, x in enumerate(container) if id(x) == obj_id]
if len(idxs) == 1: return f'{name}[{idxs[0]}]'
else: return f'{name} at indices ' + ', '.join(map(str, idxs))
if isinstance(container, dict):
keys = [k for k in container if id(container[k]) == obj_id]
if len(keys) == 1: return f'{name}[{keys[0]!r}]'
else: return f'{name} at keys ' + ', '.join(map(repr, keys))
if isinstance(container, types.ModuleType):
return f' named {container.__name__}'
return name
@contextmanager
def ensure_compile_time_eval():
"""Context manager to ensure evaluation at trace/compile time (or error).
Some JAX APIs like :func:`jax.jit` and :func:`jax.lax.scan` involve staging,
i.e., delaying the evaluation of numerical expressions (like :mod:`jax.numpy`
function applications) so that instead of performing those computations
eagerly while evaluating the corresponding Python expressions, their
computation is carried out separately, e.g. after optimized compilation. But
this delay can be undesirable. For example, numerical values might be needed
to evaluate Python control flow and so their evaluation cannot be delayed. As
another example, it may be beneficial to ensure compile time evaluation (or
"constant folding") for performance reasons.
This context manager ensures that JAX computations are evaluated eagerly. If
eager evaluation is not possible, a ``ConcretizationTypeError`` is raised.
Here's a contrived example::
import jax
import jax.numpy as jnp
@jax.jit
def f(x):
with jax.ensure_compile_time_eval():
y = jnp.sin(3.0)
z = jnp.sin(y)
z_positive = z > 0
if z_positive: # z_positive is usable in Python control flow
return jnp.sin(x)
else:
return jnp.cos(x)
Here's a real-world example from https://github.com/jax-ml/jax/issues/3974::
import jax
import jax.numpy as jnp
from jax import random
@jax.jit
def jax_fn(x):
with jax.ensure_compile_time_eval():
y = random.randint(random.key(0), (1000,1000), 0, 100)
y2 = y @ y
x2 = jnp.sum(y2) * x
return x2
A similar behavior can often be achieved simply by 'hoisting' the constant
expression out of the corresponding staging API::
y = random.randint(random.key(0), (1000,1000), 0, 100)
@jax.jit
def jax_fn(x):
y2 = y @ y
x2 = jnp.sum(y2)*x
return x2
But in some cases it can be more convenient to use this context manager.
"""
with config.eager_constant_folding(True):
yield
@contextmanager
def eval_context():
with set_current_trace(eval_trace):
yield
# TODO(dougalm): deprecate/delete
def full_lower(val):
if isinstance(val, Tracer):
return val.full_lower()
else:
return val
def get_referent(x: Any) -> Any:
return x.get_referent() if isinstance(x, Tracer) else x
def same_referent(x: Any, y: Any) -> bool:
return get_referent(x) is get_referent(y)
def dedup_referents(itr: Iterable[Any]) -> list[Any]:
return list({HashableWrapper(get_referent(x)):x for x in itr}.values())
def definitely_equal(x, y):
if isinstance(x, Tracer) or isinstance(y, Tracer):
return same_referent(x, y)
elif x is y:
return True
try:
return x == y
except InconclusiveDimensionOperation:
return False
# -------------------- abstract values --------------------
| AddExplicitMeshAxisNamesContextManager |
python | pypa__setuptools | setuptools/_distutils/command/install_lib.py | {
"start": 369,
"end": 8588
} | class ____(Command):
description = "install all Python modules (extensions and pure Python)"
# The byte-compilation options are a tad confusing. Here are the
# possible scenarios:
# 1) no compilation at all (--no-compile --no-optimize)
# 2) compile .pyc only (--compile --no-optimize; default)
# 3) compile .pyc and "opt-1" .pyc (--compile --optimize)
# 4) compile "opt-1" .pyc only (--no-compile --optimize)
# 5) compile .pyc and "opt-2" .pyc (--compile --optimize-more)
# 6) compile "opt-2" .pyc only (--no-compile --optimize-more)
#
# The UI for this is two options, 'compile' and 'optimize'.
# 'compile' is strictly boolean, and only decides whether to
# generate .pyc files. 'optimize' is three-way (0, 1, or 2), and
# decides both whether to generate .pyc files and what level of
# optimization to use.
user_options = [
('install-dir=', 'd', "directory to install to"),
('build-dir=', 'b', "build directory (where to install from)"),
('force', 'f', "force installation (overwrite existing files)"),
('compile', 'c', "compile .py to .pyc [default]"),
('no-compile', None, "don't compile .py files"),
(
'optimize=',
'O',
"also compile with optimization: -O1 for \"python -O\", "
"-O2 for \"python -OO\", and -O0 to disable [default: -O0]",
),
('skip-build', None, "skip the build steps"),
]
boolean_options: ClassVar[list[str]] = ['force', 'compile', 'skip-build']
negative_opt: ClassVar[dict[str, str]] = {'no-compile': 'compile'}
def initialize_options(self):
# let the 'install' command dictate our installation directory
self.install_dir = None
self.build_dir = None
self.force = False
self.compile = None
self.optimize = None
self.skip_build = None
def finalize_options(self) -> None:
# Get all the information we need to install pure Python modules
# from the umbrella 'install' command -- build (source) directory,
# install (target) directory, and whether to compile .py files.
self.set_undefined_options(
'install',
('build_lib', 'build_dir'),
('install_lib', 'install_dir'),
('force', 'force'),
('compile', 'compile'),
('optimize', 'optimize'),
('skip_build', 'skip_build'),
)
if self.compile is None:
self.compile = True
if self.optimize is None:
self.optimize = False
if not isinstance(self.optimize, int):
try:
self.optimize = int(self.optimize)
except ValueError:
pass
if self.optimize not in (0, 1, 2):
raise DistutilsOptionError("optimize must be 0, 1, or 2")
def run(self) -> None:
# Make sure we have built everything we need first
self.build()
# Install everything: simply dump the entire contents of the build
# directory to the installation directory (that's the beauty of
# having a build directory!)
outfiles = self.install()
# (Optionally) compile .py to .pyc
if outfiles is not None and self.distribution.has_pure_modules():
self.byte_compile(outfiles)
# -- Top-level worker functions ------------------------------------
# (called from 'run()')
def build(self) -> None:
if not self.skip_build:
if self.distribution.has_pure_modules():
self.run_command('build_py')
if self.distribution.has_ext_modules():
self.run_command('build_ext')
# Any: https://typing.readthedocs.io/en/latest/guides/writing_stubs.html#the-any-trick
def install(self) -> list[str] | Any:
if os.path.isdir(self.build_dir):
outfiles = self.copy_tree(self.build_dir, self.install_dir)
else:
self.warn(
f"'{self.build_dir}' does not exist -- no Python modules to install"
)
return
return outfiles
def byte_compile(self, files) -> None:
if sys.dont_write_bytecode:
self.warn('byte-compiling is disabled, skipping.')
return
from ..util import byte_compile
# Get the "--root" directory supplied to the "install" command,
# and use it as a prefix to strip off the purported filename
# encoded in bytecode files. This is far from complete, but it
# should at least generate usable bytecode in RPM distributions.
install_root = self.get_finalized_command('install').root
if self.compile:
byte_compile(
files,
optimize=0,
force=self.force,
prefix=install_root,
dry_run=self.dry_run,
)
if self.optimize > 0:
byte_compile(
files,
optimize=self.optimize,
force=self.force,
prefix=install_root,
verbose=self.verbose,
dry_run=self.dry_run,
)
# -- Utility methods -----------------------------------------------
def _mutate_outputs(self, has_any, build_cmd, cmd_option, output_dir):
if not has_any:
return []
build_cmd = self.get_finalized_command(build_cmd)
build_files = build_cmd.get_outputs()
build_dir = getattr(build_cmd, cmd_option)
prefix_len = len(build_dir) + len(os.sep)
outputs = [os.path.join(output_dir, file[prefix_len:]) for file in build_files]
return outputs
def _bytecode_filenames(self, py_filenames):
bytecode_files = []
for py_file in py_filenames:
# Since build_py handles package data installation, the
# list of outputs can contain more than just .py files.
# Make sure we only report bytecode for the .py files.
ext = os.path.splitext(os.path.normcase(py_file))[1]
if ext != PYTHON_SOURCE_EXTENSION:
continue
if self.compile:
bytecode_files.append(
importlib.util.cache_from_source(py_file, optimization='')
)
if self.optimize > 0:
bytecode_files.append(
importlib.util.cache_from_source(
py_file, optimization=self.optimize
)
)
return bytecode_files
# -- External interface --------------------------------------------
# (called by outsiders)
def get_outputs(self):
"""Return the list of files that would be installed if this command
were actually run. Not affected by the "dry-run" flag or whether
modules have actually been built yet.
"""
pure_outputs = self._mutate_outputs(
self.distribution.has_pure_modules(),
'build_py',
'build_lib',
self.install_dir,
)
if self.compile:
bytecode_outputs = self._bytecode_filenames(pure_outputs)
else:
bytecode_outputs = []
ext_outputs = self._mutate_outputs(
self.distribution.has_ext_modules(),
'build_ext',
'build_lib',
self.install_dir,
)
return pure_outputs + bytecode_outputs + ext_outputs
def get_inputs(self):
"""Get the list of files that are input to this command, ie. the
files that get installed as they are named in the build tree.
The files in this list correspond one-to-one to the output
filenames returned by 'get_outputs()'.
"""
inputs = []
if self.distribution.has_pure_modules():
build_py = self.get_finalized_command('build_py')
inputs.extend(build_py.get_outputs())
if self.distribution.has_ext_modules():
build_ext = self.get_finalized_command('build_ext')
inputs.extend(build_ext.get_outputs())
return inputs
| install_lib |
python | RaRe-Technologies__gensim | gensim/similarities/docsim.py | {
"start": 3869,
"end": 8988
} | class ____(utils.SaveLoad):
"""A proxy that represents a single shard instance within :class:`~gensim.similarity.docsim.Similarity` index.
Basically just wraps :class:`~gensim.similarities.docsim.MatrixSimilarity`,
:class:`~gensim.similarities.docsim.SparseMatrixSimilarity`, etc, so that it mmaps from disk on request (query).
"""
def __init__(self, fname, index):
"""
Parameters
----------
fname : str
Path to top-level directory (file) to traverse for corpus documents.
index : :class:`~gensim.interfaces.SimilarityABC`
Index object.
"""
self.dirname, self.fname = os.path.split(fname)
self.length = len(index)
self.cls = index.__class__
logger.info("saving index shard to %s", self.fullname())
index.save(self.fullname())
self.index = self.get_index()
def fullname(self):
"""Get full path to shard file.
Return
------
str
Path to shard instance.
"""
return os.path.join(self.dirname, self.fname)
def __len__(self):
"""Get length."""
return self.length
def __getstate__(self):
"""Special handler for pickle.
Returns
-------
dict
Object that contains state of current instance without `index`.
"""
result = self.__dict__.copy()
# (S)MS objects must be loaded via load() because of mmap (simple pickle.load won't do)
if 'index' in result:
del result['index']
return result
def __str__(self):
return "%s<%i documents in %s>" % (self.cls.__name__, len(self), self.fullname())
def get_index(self):
"""Load & get index.
Returns
-------
:class:`~gensim.interfaces.SimilarityABC`
Index instance.
"""
if not hasattr(self, 'index'):
logger.debug("mmaping index from %s", self.fullname())
self.index = self.cls.load(self.fullname(), mmap='r')
return self.index
def get_document_id(self, pos):
"""Get index vector at position `pos`.
Parameters
----------
pos : int
Vector position.
Return
------
{:class:`scipy.sparse.csr_matrix`, :class:`numpy.ndarray`}
Index vector. Type depends on underlying index.
Notes
-----
The vector is of the same type as the underlying index (ie., dense for
:class:`~gensim.similarities.docsim.MatrixSimilarity`
and scipy.sparse for :class:`~gensim.similarities.docsim.SparseMatrixSimilarity`.
"""
assert 0 <= pos < len(self), "requested position out of range"
return self.get_index().index[pos]
def __getitem__(self, query):
"""Get similarities of document (or corpus) `query` to all documents in the corpus.
Parameters
----------
query : {iterable of list of (int, number) , list of (int, number))}
Document or corpus.
Returns
-------
:class:`numpy.ndarray`
Similarities of document/corpus if index is :class:`~gensim.similarities.docsim.MatrixSimilarity` **or**
:class:`scipy.sparse.csr_matrix`
for case if index is :class:`~gensim.similarities.docsim.SparseMatrixSimilarity`.
"""
index = self.get_index()
try:
index.num_best = self.num_best
index.normalize = self.normalize
except Exception:
raise ValueError("num_best and normalize have to be set before querying a proxy Shard object")
return index[query]
def query_shard(args):
"""Helper for request query from shard, same as shard[query].
Parameters
---------
args : (list of (int, number), :class:`~gensim.interfaces.SimilarityABC`)
Query and Shard instances
Returns
-------
:class:`numpy.ndarray` or :class:`scipy.sparse.csr_matrix`
Similarities of the query against documents indexed in this shard.
"""
query, shard = args # simulate starmap (not part of multiprocessing in older Pythons)
logger.debug("querying shard %s num_best=%s in process %s", shard, shard.num_best, os.getpid())
result = shard[query]
logger.debug("finished querying shard %s in process %s", shard, os.getpid())
return result
def _nlargest(n, iterable):
"""Helper for extracting n documents with maximum similarity.
Parameters
----------
n : int
Number of elements to be extracted
iterable : iterable of list of (int, float)
Iterable containing documents with computed similarities
Returns
-------
:class:`list`
List with the n largest elements from the dataset defined by iterable.
Notes
-----
Elements are compared by the absolute value of similarity, because negative value of similarity
does not mean some form of dissimilarity.
"""
return heapq.nlargest(n, itertools.chain(*iterable), key=lambda item: abs(item[1]))
| Shard |
python | jazzband__django-waffle | waffle/tests/test_waffle.py | {
"start": 32394,
"end": 33570
} | class ____(TestCase):
databases = DATABASES
def test_is_active_for_user_respects_everyone_on(self):
"""
Test flag.is_active_for_user returns truthy value when everyone is set to True overriding all other settings.
"""
flag = waffle.get_waffle_flag_model().objects.create(
name="feature_flag_name",
staff=False,
everyone=True,
)
staff_user = get_user_model()(
id=999,
username="foo",
is_staff=True,
)
self.assertTrue(flag.is_active_for_user(staff_user))
def test_is_active_for_user_respects_everyone_off(self):
"""
Test flag.is_active_for_user returns falsy value when everyone is set to False overriding all other settings.
"""
flag = waffle.get_waffle_flag_model().objects.create(
name="feature_flag_name",
staff=True,
everyone=False,
)
staff_user = get_user_model()(
id=999,
username="foo",
is_staff=True,
)
self.assertFalse(flag.is_active_for_user(staff_user))
| WaffleFlagEveryoneSettingTests |
python | getsentry__sentry | src/sentry/monitors/migrations/0008_fix_processing_error_keys.py | {
"start": 2473,
"end": 2588
} | class ____(TypedDict):
id: str
checkin: Any
errors: Sequence[Any]
@dataclass()
| CheckinProcessingErrorData |
python | huggingface__transformers | tests/models/glpn/test_image_processing_glpn.py | {
"start": 3586,
"end": 9909
} | class ____(ImageProcessingTestMixin, unittest.TestCase):
image_processing_class = GLPNImageProcessor if is_vision_available() else None
fast_image_processing_class = GLPNImageProcessorFast if is_torchvision_available() else None
def setUp(self):
super().setUp()
self.image_processor_tester = GLPNImageProcessingTester(self)
self.image_processor_dict = self.image_processor_tester.prepare_image_processor_dict()
def test_image_processor_properties(self):
image_processing = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processing, "do_resize"))
self.assertTrue(hasattr(image_processing, "size_divisor"))
self.assertTrue(hasattr(image_processing, "resample"))
self.assertTrue(hasattr(image_processing, "do_rescale"))
def test_call_pil(self):
# Initialize image_processing
image_processing = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False)
for image in image_inputs:
self.assertIsInstance(image, Image.Image)
# Test not batched input (GLPNImageProcessor doesn't support batching)
encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values
expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs)
self.assertTrue(tuple(encoded_images.shape) == (1, *expected_output_image_shape))
def test_call_numpy(self):
# Initialize image_processing
image_processing = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, numpify=True)
for image in image_inputs:
self.assertIsInstance(image, np.ndarray)
# Test not batched input (GLPNImageProcessor doesn't support batching)
encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values
expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs)
self.assertTrue(tuple(encoded_images.shape) == (1, *expected_output_image_shape))
def test_call_pytorch(self):
# Initialize image_processing
image_processing = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True)
for image in image_inputs:
self.assertIsInstance(image, torch.Tensor)
# Test not batched input (GLPNImageProcessor doesn't support batching)
encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values
expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs)
self.assertTrue(tuple(encoded_images.shape) == (1, *expected_output_image_shape))
def test_call_numpy_4_channels(self):
# Initialize image_processing
image_processing = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
self.image_processing_class.num_channels = 4
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, numpify=True)
for image in image_inputs:
self.assertIsInstance(image, np.ndarray)
# Test not batched input (GLPNImageProcessor doesn't support batching)
encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values
expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs)
self.assertTrue(tuple(encoded_images.shape) == (1, *expected_output_image_shape))
self.image_processing_class.num_channels = 3
# override as glpn image processors don't support heterogeneous batching
@require_vision
@require_torch
def test_slow_fast_equivalence_batched(self):
if not self.test_slow_image_processor or not self.test_fast_image_processor:
self.skipTest(reason="Skipping slow/fast equivalence test")
if self.image_processing_class is None or self.fast_image_processing_class is None:
self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined")
dummy_images = self.image_processor_tester.prepare_image_inputs(equal_resolution=True, torchify=True)
image_processor_slow = self.image_processing_class(**self.image_processor_dict)
image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict)
encoding_slow = image_processor_slow(dummy_images, return_tensors="pt")
encoding_fast = image_processor_fast(dummy_images, return_tensors="pt")
self._assert_slow_fast_tensors_equivalence(encoding_slow.pixel_values, encoding_fast.pixel_values)
def test_post_process_depth_equivalence(self):
# Check that both processors produce equivalent post-processed depth maps
if self.fast_image_processing_class is None:
self.skipTest("TorchVision not available")
outputs = self.image_processor_tester.prepare_depth_outputs()
slow = self.image_processing_class(**self.image_processor_dict)
fast = self.fast_image_processing_class(**self.image_processor_dict)
# target_sizes simulate resized inference outputs
target_sizes = [(240, 320)] * self.image_processor_tester.batch_size
processed_slow = slow.post_process_depth_estimation(outputs, target_sizes=target_sizes)
processed_fast = fast.post_process_depth_estimation(outputs, target_sizes=target_sizes)
# Compare per-sample predicted depth tensors
for pred_slow, pred_fast in zip(processed_slow, processed_fast):
depth_slow = pred_slow["predicted_depth"]
depth_fast = pred_fast["predicted_depth"]
torch.testing.assert_close(depth_fast, depth_slow, atol=1e-1, rtol=1e-3)
self.assertLessEqual(torch.mean(torch.abs(depth_fast.float() - depth_slow.float())).item(), 5e-3)
| GLPNImageProcessingTest |
python | huggingface__transformers | src/transformers/models/roc_bert/modeling_roc_bert.py | {
"start": 18723,
"end": 19419
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->RoCBert
| RoCBertOutput |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/roots/mutation.py | {
"start": 25693,
"end": 26002
} | class ____(graphene.Union):
"""The output from reloading the workspace."""
class Meta:
types = (
GrapheneWorkspace,
GrapheneUnauthorizedError,
GraphenePythonError,
)
name = "ReloadWorkspaceMutationResult"
| GrapheneReloadWorkspaceMutationResult |
python | wandb__wandb | wandb/sdk/lib/run_moment.py | {
"start": 133,
"end": 2419
} | class ____:
"""A moment in a run.
Defines a branching point in a finished run to fork or resume from.
A run moment is identified by a run ID and a metric value.
Currently, only the metric '_step' is supported.
"""
run: str
"""run ID"""
value: Union[int, float]
"""Value of the metric."""
metric: str = "_step"
"""Metric to use to determine the moment in the run.
Currently, only the metric '_step' is supported.
In future, this will be relaxed to be any metric.
"""
def __post_init__(self):
if self.metric != "_step":
raise ValueError(
f"Only the metric '_step' is supported, got '{self.metric}'."
)
if not isinstance(self.value, (int, float)):
raise TypeError(
f"Only int or float values are supported, got '{self.value}'."
)
if not isinstance(self.run, str):
raise TypeError(f"Only string run names are supported, got '{self.run}'.")
@classmethod
def from_uri(cls, uri: str) -> RunMoment:
parsable = "runmoment://" + uri
parse_err = ValueError(
f"Could not parse passed run moment string '{uri}', "
f"expected format '<run>?<metric>=<numeric_value>'. "
f"Currently, only the metric '_step' is supported. "
f"Example: 'ans3bsax?_step=123'."
)
try:
parsed = parse.urlparse(parsable)
except ValueError as e:
raise parse_err from e
if parsed.scheme != "runmoment":
raise parse_err
# extract run, metric, value from parsed
if not parsed.netloc:
raise parse_err
run = parsed.netloc
if parsed.path or parsed.params or parsed.fragment:
raise parse_err
query = parse.parse_qs(parsed.query)
if len(query) != 1:
raise parse_err
metric = list(query.keys())[0]
if metric != "_step":
raise parse_err
value: str = query[metric][0]
try:
num_value = int(value) if value.isdigit() else float(value)
except ValueError as e:
raise parse_err from e
return cls(run=run, metric=metric, value=num_value)
| RunMoment |
python | Delgan__loguru | loguru/_colorizer.py | {
"start": 10733,
"end": 11590
} | class ____:
def __init__(self, tokens, messages_color_tokens):
self._tokens = tokens
self._messages_color_tokens = messages_color_tokens
def strip(self):
return AnsiParser.strip(self._tokens)
def colorize(self, ansi_level):
return AnsiParser.colorize(self._tokens, ansi_level)
def make_coloring_message(self, message, *, ansi_level, colored_message):
messages = [
(
message
if color_tokens is None
else AnsiParser.wrap(
colored_message.tokens, ansi_level=ansi_level, color_tokens=color_tokens
)
)
for color_tokens in self._messages_color_tokens
]
coloring = ColoringMessage(message)
coloring._messages = iter(messages)
return coloring
| ColoredFormat |
python | vyperlang__vyper | tests/hevm.py | {
"start": 734,
"end": 4846
} | class ____:
num_calldataloads = 0
visited: set
def __init__(self):
self.visited = set()
def _prep_hevm_venom_ctx(ctx, verbose=False):
visitor = _FunctionVisitor()
_prep_hevm_venom_fn(ctx.entry_function, visitor)
compiler = VenomCompiler(ctx)
asm = compiler.generate_evm_assembly(no_optimize=False)
return assembly_to_evm(asm)[0].hex()
def _prep_hevm_venom_fn(fn, visitor):
ac = IRAnalysesCache(fn)
fcg = ac.force_analysis(FCGAnalysis)
if fn in visitor.visited:
return
visitor.visited.add(fn)
for next_fn in fcg.get_callees(fn):
_prep_hevm_venom_fn(next_fn, visitor)
for bb in fn.get_basic_blocks():
for inst in bb.instructions:
# transform `source` instructions into "symbolic" values for
# hevm via calldataload
if inst.opcode == "source":
# hevm limit: 256 bytes of symbolic calldata
assert visitor.num_calldataloads < 8
inst.opcode = "calldataload"
inst.operands = [IRLiteral(visitor.num_calldataloads * 32)]
visitor.num_calldataloads += 1
term = bb.instructions[-1]
# test convention, terminate by `sink`ing the variables
# you want to check
if term.opcode != "sink":
continue
# testing convention: first 256 bytes can be symbolically filled
# with calldata
RETURN_START = 256
num_return_values = 0
for op in term.operands:
ptr = IRLiteral(RETURN_START + num_return_values * 32)
new_inst = IRInstruction("mstore", [op, ptr])
bb.insert_instruction(new_inst, index=-1)
num_return_values += 1
# return 0, 32 * num_variables
term.opcode = "return"
term.operands = [IRLiteral(num_return_values * 32), IRLiteral(RETURN_START)]
# required for venom_to_assembly right now but should be removed
SimplifyCFGPass(ac, fn).run_pass()
# requirements for venom_to_assembly
LowerDloadPass(ac, fn).run_pass()
ConcretizeMemLocPass(ac, fn).run_pass()
SingleUseExpansion(ac, fn).run_pass()
CFGNormalization(ac, fn).run_pass()
def hevm_check_venom(pre, post, verbose=False):
if not has_hevm():
return
# perform hevm equivalence check
if verbose:
print("HEVM COMPARE.")
print("BEFORE:", pre)
print("OPTIMIZED:", post)
bytecode1 = _prep_hevm_venom(pre, verbose=verbose)
bytecode2 = _prep_hevm_venom(post, verbose=verbose)
hevm_check_bytecode(bytecode1, bytecode2, verbose=verbose)
def hevm_check_venom_ctx(pre, post, verbose=False):
if not has_hevm():
return
# perform hevm equivalence check
if verbose:
print("HEVM COMPARE.")
print("BEFORE:", pre)
print("OPTIMIZED:", post)
bytecode1 = _prep_hevm_venom_ctx(pre, verbose=verbose)
bytecode2 = _prep_hevm_venom_ctx(post, verbose=verbose)
hevm_check_bytecode(bytecode1, bytecode2, verbose=verbose)
@contextlib.contextmanager
def hevm_raises():
if not has_hevm():
pytest.skip("skipping because `--hevm` was not specified")
with pytest.raises(subprocess.CalledProcessError) as e:
yield e
# use hevm to check equality between two bytecodes (hex)
def hevm_check_bytecode(bytecode1, bytecode2, verbose=False, addl_args: list = None):
# debug:
if verbose:
print("RUN HEVM:")
print(bytecode1)
print(bytecode2)
subp_args = ["hevm", "equivalence", "--code-a", bytecode1, "--code-b", bytecode2]
subp_args.extend(["--num-solvers", "1"])
if addl_args:
subp_args.extend([*addl_args])
res = subprocess.run(
subp_args, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True
)
assert not res.stderr, res.stderr # hevm does not print to stderr
# TODO: get hevm team to provide a way to promote warnings to errors
assert "WARNING" not in res.stdout, res.stdout
assert "issues" not in res.stdout
if verbose:
print(res.stdout)
| _FunctionVisitor |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/dataplex.py | {
"start": 100399,
"end": 103524
} | class ____(DataplexCatalogBaseOperator):
"""
Get an EntryGroup resource.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:DataplexCatalogGetEntryGroupOperator`
:param entry_group_id: Required. EntryGroup identifier.
:param project_id: Required. The ID of the Google Cloud project where the service is used.
:param location: Required. The ID of the Google Cloud region where the service is used.
:param gcp_conn_id: Optional. The connection ID to use to connect to Google Cloud.
:param retry: Optional. A retry object used to retry requests. If `None` is specified, requests will not
be retried.
:param timeout: Optional. The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Optional. Additional metadata that is provided to the method.
:param impersonation_chain: Optional. Service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = tuple(
{"entry_group_id"} | set(DataplexCatalogBaseOperator.template_fields)
)
operator_extra_links = (DataplexCatalogEntryGroupLink(),)
def __init__(
self,
entry_group_id: str,
*args,
**kwargs,
) -> None:
super().__init__(*args, **kwargs)
self.entry_group_id = entry_group_id
@property
def extra_links_params(self) -> dict[str, Any]:
return {
**super().extra_links_params,
"entry_group_id": self.entry_group_id,
}
def execute(self, context: Context):
DataplexCatalogEntryGroupLink.persist(context=context)
self.log.info(
"Retrieving Dataplex Catalog EntryGroup %s.",
self.entry_group_id,
)
try:
entry_group = self.hook.get_entry_group(
entry_group_id=self.entry_group_id,
location=self.location,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
except NotFound:
self.log.info(
"Dataplex Catalog EntryGroup %s not found.",
self.entry_group_id,
)
raise AirflowException(NotFound)
except Exception as ex:
raise AirflowException(ex)
return EntryGroup.to_dict(entry_group)
| DataplexCatalogGetEntryGroupOperator |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/hooks/sqs.py | {
"start": 980,
"end": 5413
} | class ____(AwsBaseHook):
"""
Interact with Amazon Simple Queue Service.
Provide thin wrapper around :external+boto3:py:class:`boto3.client("sqs") <SQS.Client>`.
Additional arguments (such as ``aws_conn_id``) may be specified and
are passed down to the underlying AwsBaseHook.
.. seealso::
- :class:`airflow.providers.amazon.aws.hooks.base_aws.AwsBaseHook`
"""
def __init__(self, *args, **kwargs) -> None:
kwargs["client_type"] = "sqs"
super().__init__(*args, **kwargs)
def create_queue(self, queue_name: str, attributes: dict | None = None) -> dict:
"""
Create queue using connection object.
.. seealso::
- :external+boto3:py:meth:`SQS.Client.create_queue`
:param queue_name: name of the queue.
:param attributes: additional attributes for the queue (default: None)
:return: dict with the information about the queue.
"""
return self.get_conn().create_queue(QueueName=queue_name, Attributes=attributes or {})
@staticmethod
def _build_msg_params(
queue_url: str,
message_body: str,
delay_seconds: int = 0,
message_attributes: dict | None = None,
message_group_id: str | None = None,
message_deduplication_id: str | None = None,
) -> dict:
return prune_dict(
{
"QueueUrl": queue_url,
"MessageBody": message_body,
"DelaySeconds": delay_seconds,
"MessageAttributes": message_attributes or {},
"MessageGroupId": message_group_id,
"MessageDeduplicationId": message_deduplication_id,
}
)
def send_message(
self,
queue_url: str,
message_body: str,
delay_seconds: int = 0,
message_attributes: dict | None = None,
message_group_id: str | None = None,
message_deduplication_id: str | None = None,
) -> dict:
"""
Send message to the queue.
.. seealso::
- :external+boto3:py:meth:`SQS.Client.send_message`
:param queue_url: queue url
:param message_body: the contents of the message
:param delay_seconds: seconds to delay the message
:param message_attributes: additional attributes for the message (default: None)
:param message_group_id: This applies only to FIFO (first-in-first-out) queues. (default: None)
:param message_deduplication_id: This applies only to FIFO (first-in-first-out) queues.
:return: dict with the information about the message sent
"""
params = self._build_msg_params(
queue_url=queue_url,
message_body=message_body,
delay_seconds=delay_seconds,
message_attributes=message_attributes,
message_group_id=message_group_id,
message_deduplication_id=message_deduplication_id,
)
return self.get_conn().send_message(**params)
async def asend_message(
self,
queue_url: str,
message_body: str,
delay_seconds: int = 0,
message_attributes: dict | None = None,
message_group_id: str | None = None,
message_deduplication_id: str | None = None,
) -> dict:
"""
Send message to the queue (async).
.. seealso::
- :external+boto3:py:meth:`SQS.Client.send_message`
:param queue_url: queue url
:param message_body: the contents of the message
:param delay_seconds: seconds to delay the message
:param message_attributes: additional attributes for the message (default: None)
:param message_group_id: This applies only to FIFO (first-in-first-out) queues. (default: None)
:param message_deduplication_id: This applies only to FIFO (first-in-first-out) queues.
:return: dict with the information about the message sent
"""
params = self._build_msg_params(
queue_url=queue_url,
message_body=message_body,
delay_seconds=delay_seconds,
message_attributes=message_attributes,
message_group_id=message_group_id,
message_deduplication_id=message_deduplication_id,
)
async with await self.get_async_conn() as async_conn:
return await async_conn.send_message(**params)
| SqsHook |
python | spyder-ide__spyder | spyder/plugins/completion/api.py | {
"start": 3117,
"end": 3189
} | class ____:
CREATED = 1
CHANGED = 2
DELETED = 3
| FileChangeType |
python | pytorch__pytorch | torch/ao/nn/intrinsic/modules/fused.py | {
"start": 4107,
"end": 4815
} | class ____(_FusedModule):
r"""This is a sequential container which calls the Conv 1d, Batch Norm 1d, and ReLU modules.
During quantization this will be replaced with the corresponding fused module."""
def __init__(self, conv, bn, relu):
assert (
type_before_parametrizations(conv) == Conv1d
and type_before_parametrizations(bn) == BatchNorm1d
and type_before_parametrizations(relu) == ReLU
), (
f"Incorrect types for input modules{type_before_parametrizations(conv)}"
f"{type_before_parametrizations(bn)}"
f"{type_before_parametrizations(relu)}"
)
super().__init__(conv, bn, relu)
| ConvBnReLU1d |
python | numba__numba | numba/tests/test_boundscheck.py | {
"start": 3048,
"end": 4064
} | class ____(SerialMixin, TestCase):
@unittest.skipIf(not cuda.is_available(), "NO CUDA")
@TestCase.run_test_in_subprocess(envvars={'NUMBA_BOUNDSCHECK': '1'})
def test_no_cuda_boundscheck(self):
self.assertTrue(config.BOUNDSCHECK)
with self.assertRaises(NotImplementedError):
@cuda.jit(boundscheck=True)
def func():
pass
# Make sure we aren't raising "not supported" error if we aren't
# requesting bounds checking anyway. Related pull request: #5257
@cuda.jit(boundscheck=False)
def func3():
pass
@cuda.jit
def func2(x, a):
a[1] = x[1]
a = np.ones((1,))
x = np.zeros((1,))
# Out of bounds but doesn't raise (it does raise in the simulator,
# so skip there)
if not config.ENABLE_CUDASIM:
func2[1, 1](x, a)
# This is a separate test because the jitted functions that raise exceptions
# have memory leaks.
| TestNoCudaBoundsCheck |
python | google__jax | jax/_src/errors.py | {
"start": 14296,
"end": 18225
} | class ____(ConcretizationTypeError):
"""
This error occurs when a traced value in JAX is used in a context where a
boolean value is expected (see :ref:`faq-different-kinds-of-jax-values`
for more on what a Tracer is).
The boolean cast may be an explicit (e.g. ``bool(x)``) or implicit, through use of
control flow (e.g. ``if x > 0`` or ``while x``), use of Python boolean
operators (e.g. ``z = x and y``, ``z = x or y``, ``z = not x``) or functions
that use them (e.g. ``z = max(x, y)``, ``z = min(x, y)`` etc.).
In some situations, this problem can be easily fixed by marking traced values as
static; in others, it may indicate that your program is doing operations that are
not directly supported by JAX's JIT compilation model.
Examples:
Traced value used in control flow
One case where this often arises is when a traced value is used in
Python control flow. For example::
>>> from jax import jit
>>> import jax.numpy as jnp
>>> @jit
... def func(x, y):
... return x if x.sum() < y.sum() else y
>>> func(jnp.ones(4), jnp.zeros(4)) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TracerBoolConversionError: Attempted boolean conversion of JAX Tracer [...]
We could mark both inputs ``x`` and ``y`` as static, but that would defeat
the purpose of using :func:`jax.jit` here. Another option is to re-express
the if statement in terms of the three-term :func:`jax.numpy.where`::
>>> @jit
... def func(x, y):
... return jnp.where(x.sum() < y.sum(), x, y)
>>> func(jnp.ones(4), jnp.zeros(4))
Array([0., 0., 0., 0.], dtype=float32)
For more complicated control flow including loops, see
:ref:`lax-control-flow`.
Control flow on traced values
Another common cause of this error is if you inadvertently trace over a boolean
flag. For example::
>>> @jit
... def func(x, normalize=True):
... if normalize:
... return x / x.sum()
... return x
>>> func(jnp.arange(5), True) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TracerBoolConversionError: Attempted boolean conversion of JAX Tracer ...
Here because the flag ``normalize`` is traced, it cannot be used in Python
control flow. In this situation, the best solution is probably to mark this
value as static::
>>> from functools import partial
>>> @partial(jit, static_argnames=['normalize'])
... def func(x, normalize=True):
... if normalize:
... return x / x.sum()
... return x
>>> func(jnp.arange(5), True)
Array([0. , 0.1, 0.2, 0.3, 0.4], dtype=float32)
For more on ``static_argnums``, see the documentation of :func:`jax.jit`.
Using non-JAX aware functions
Another common cause of this error is using non-JAX aware functions within JAX
code. For example:
>>> @jit
... def func(x):
... return min(x, 0)
>>> func(2) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TracerBoolConversionError: Attempted boolean conversion of JAX Tracer ...
In this case, the error occurs because Python's built-in ``min`` function is not
compatible with JAX transforms. This can be fixed by replacing it with
``jnp.minimum``:
>>> @jit
... def func(x):
... return jnp.minimum(x, 0)
>>> print(func(2))
0
To understand more subtleties having to do with tracers vs. regular values,
and concrete vs. abstract values, you may want to read
:ref:`faq-different-kinds-of-jax-values`.
"""
def __init__(self, tracer: core.Tracer):
JAXTypeError.__init__(self,
f"Attempted boolean conversion of {tracer._error_repr()}."
f"{tracer._origin_msg()}")
@export
| TracerBoolConversionError |
python | eth-brownie__brownie | brownie/_gui/tooltip.py | {
"start": 43,
"end": 975
} | class ____(tk.Toplevel):
def __init__(self, widget, text=None, textvariable=None):
super().__init__(widget._root())
label = tk.Label(self, text=text, textvariable=textvariable, font=(None, 10))
label.pack()
self.wm_overrideredirect(True)
self.withdraw()
self.kill = False
self.widget = widget
widget.bind("<Enter>", self.enter)
def enter(self, event):
self.kill = False
self.widget.bind("<Leave>", self.leave)
self.widget.bind("<1>", self.leave)
self.after(1000, self.show)
def show(self):
if self.kill:
return
self.geometry(f"+{self.winfo_pointerx()+5}+{self.winfo_pointery()+5}")
self.lift()
self.deiconify()
def leave(self, event):
self.kill = True
self.widget.unbind("<Leave>")
self.withdraw()
self.widget.bind("<Enter>", self.enter)
| ToolTip |
python | doocs__leetcode | solution/0800-0899/0888.Fair Candy Swap/Solution.py | {
"start": 0,
"end": 280
} | class ____:
def fairCandySwap(self, aliceSizes: List[int], bobSizes: List[int]) -> List[int]:
diff = (sum(aliceSizes) - sum(bobSizes)) >> 1
s = set(bobSizes)
for a in aliceSizes:
if (b := (a - diff)) in s:
return [a, b]
| Solution |
python | protocolbuffers__protobuf | python/google/protobuf/internal/descriptor_database_test.py | {
"start": 732,
"end": 5233
} | class ____(unittest.TestCase):
def testAdd(self):
db = descriptor_database.DescriptorDatabase()
file_desc_proto = descriptor_pb2.FileDescriptorProto.FromString(
factory_test2_pb2.DESCRIPTOR.serialized_pb)
file_desc_proto2 = descriptor_pb2.FileDescriptorProto.FromString(
no_package_pb2.DESCRIPTOR.serialized_pb)
db.Add(file_desc_proto)
db.Add(file_desc_proto2)
self.assertEqual(file_desc_proto, db.FindFileByName(
'google/protobuf/internal/factory_test2.proto'))
# Can find message type.
self.assertEqual(file_desc_proto, db.FindFileContainingSymbol(
'google.protobuf.python.internal.Factory2Message'))
# Can find nested message type.
self.assertEqual(file_desc_proto, db.FindFileContainingSymbol(
'google.protobuf.python.internal.Factory2Message.NestedFactory2Message'))
# Can find enum type.
self.assertEqual(file_desc_proto, db.FindFileContainingSymbol(
'google.protobuf.python.internal.Factory2Enum'))
# Can find nested enum type.
self.assertEqual(file_desc_proto, db.FindFileContainingSymbol(
'google.protobuf.python.internal.Factory2Message.NestedFactory2Enum'))
self.assertEqual(file_desc_proto, db.FindFileContainingSymbol(
'google.protobuf.python.internal.MessageWithNestedEnumOnly.NestedEnum'))
# Can find field.
self.assertEqual(file_desc_proto, db.FindFileContainingSymbol(
'google.protobuf.python.internal.Factory2Message.list_field'))
# Can find enum value.
self.assertEqual(file_desc_proto, db.FindFileContainingSymbol(
'google.protobuf.python.internal.Factory2Enum.FACTORY_2_VALUE_0'))
self.assertEqual(file_desc_proto, db.FindFileContainingSymbol(
'google.protobuf.python.internal.FACTORY_2_VALUE_0'))
self.assertEqual(
file_desc_proto2, db.FindFileContainingSymbol('NO_PACKAGE_VALUE_0')
)
self.assertEqual(
file_desc_proto2, db.FindFileContainingSymbol('.NO_PACKAGE_VALUE_0')
)
self.assertEqual(
file_desc_proto2, db.FindFileContainingSymbol('NoPackageMessage')
)
self.assertEqual(
file_desc_proto2, db.FindFileContainingSymbol('.NoPackageMessage')
)
self.assertEqual(
file_desc_proto2,
db.FindFileContainingSymbol('NoPackageEnum'),
)
self.assertEqual(
file_desc_proto2,
db.FindFileContainingSymbol('.NoPackageEnum'),
)
# Can find top level extension.
self.assertEqual(file_desc_proto, db.FindFileContainingSymbol(
'google.protobuf.python.internal.another_field'))
# Can find nested extension inside a message.
self.assertEqual(file_desc_proto, db.FindFileContainingSymbol(
'google.protobuf.python.internal.Factory2Message.one_more_field'))
# Can find service.
file_desc_proto2 = descriptor_pb2.FileDescriptorProto.FromString(
unittest_pb2.DESCRIPTOR.serialized_pb)
db.Add(file_desc_proto2)
self.assertEqual(file_desc_proto2, db.FindFileContainingSymbol(
'proto2_unittest.TestService'))
# Non-existent field under a valid top level symbol can also be
# found. The behavior is the same with protobuf C++.
self.assertEqual(file_desc_proto2, db.FindFileContainingSymbol(
'proto2_unittest.TestAllTypes.none_field'))
with self.assertRaisesRegex(KeyError, r'\'proto2_unittest\.NoneMessage\''):
db.FindFileContainingSymbol('proto2_unittest.NoneMessage')
with self.assertRaises(KeyError):
db.FindFileContainingSymbol(
'.google.protobuf.python.internal.FACTORY_2_VALUE_0'
)
def testConflictRegister(self):
db = descriptor_database.DescriptorDatabase()
unittest_fd = descriptor_pb2.FileDescriptorProto.FromString(
unittest_pb2.DESCRIPTOR.serialized_pb)
db.Add(unittest_fd)
conflict_fd = descriptor_pb2.FileDescriptorProto.FromString(
unittest_pb2.DESCRIPTOR.serialized_pb)
conflict_fd.name = 'other_file2'
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter('always')
db.Add(conflict_fd)
self.assertTrue(len(w))
self.assertIs(w[0].category, RuntimeWarning)
self.assertIn('Conflict register for file "other_file2": ',
str(w[0].message))
self.assertIn(
'already defined in file '
'"google/protobuf/unittest.proto"', str(w[0].message))
if __name__ == '__main__':
unittest.main()
| DescriptorDatabaseTest |
python | bokeh__bokeh | tests/unit/bokeh/core/property/test_datetime.py | {
"start": 4721,
"end": 6331
} | class ____:
def test_valid(self) -> None:
prop = bcpd.Time()
assert prop.is_valid(datetime.time(19, 34, 57))
assert prop.is_valid("19:34:57")
def test_invalid(self) -> None:
prop = bcpd.Time()
assert not prop.is_valid(None)
assert not prop.is_valid(datetime.datetime(2020, 1, 11, 19, 34, 57))
assert not prop.is_valid(datetime.date(2020, 1, 11))
assert not prop.is_valid("")
assert not prop.is_valid("02 01 2019")
assert not prop.is_valid(False)
assert not prop.is_valid(True)
assert not prop.is_valid(1.0+1.0j)
assert not prop.is_valid(())
assert not prop.is_valid([])
assert not prop.is_valid({})
assert not prop.is_valid(_TestHasProps())
assert not prop.is_valid(_TestModel())
def test_has_ref(self) -> None:
prop = bcpd.Time()
assert not prop.has_ref
def test_str(self) -> None:
prop = bcpd.Time()
assert str(prop) == "Time"
# TODO (bev) class Test_TimeDelta(object)
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
Test___all__ = verify_all(bcpd, ALL)
| Test_Time |
python | jazzband__django-oauth-toolkit | tests/test_hybrid.py | {
"start": 47729,
"end": 52095
} | class ____(BaseTest):
def test_resource_access_allowed(self):
self.client.login(username="hy_test_user", password="123456")
# retrieve a valid authorization code
authcode_data = {
"client_id": self.application.client_id,
"state": "random_state_string",
"scope": "openid read write",
"redirect_uri": "http://example.org",
"response_type": "code token",
"allow": True,
}
response = self.client.post(reverse("oauth2_provider:authorize"), data=authcode_data)
fragment_dict = parse_qs(urlparse(response["Location"]).fragment)
authorization_code = fragment_dict["code"].pop()
# exchange authorization code for a valid access token
token_request_data = {
"grant_type": "authorization_code",
"code": authorization_code,
"redirect_uri": "http://example.org",
}
auth_headers = get_basic_auth_header(self.application.client_id, CLEARTEXT_SECRET)
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
content = json.loads(response.content.decode("utf-8"))
access_token = content["access_token"]
# use token to access the resource
auth_headers = {
"HTTP_AUTHORIZATION": "Bearer " + access_token,
}
request = self.factory.get("/fake-resource", **auth_headers)
request.user = self.hy_test_user
view = ResourceView.as_view()
response = view(request)
self.assertEqual(response, "This is a protected resource")
def test_id_token_resource_access_allowed(self):
self.client.login(username="hy_test_user", password="123456")
# retrieve a valid authorization code
authcode_data = {
"client_id": self.application.client_id,
"state": "random_state_string",
"scope": "openid",
"redirect_uri": "http://example.org",
"response_type": "code token",
"allow": True,
}
response = self.client.post(reverse("oauth2_provider:authorize"), data=authcode_data)
fragment_dict = parse_qs(urlparse(response["Location"]).fragment)
authorization_code = fragment_dict["code"].pop()
# exchange authorization code for a valid access token
token_request_data = {
"grant_type": "authorization_code",
"code": authorization_code,
"redirect_uri": "http://example.org",
}
auth_headers = get_basic_auth_header(self.application.client_id, CLEARTEXT_SECRET)
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
content = json.loads(response.content.decode("utf-8"))
access_token = content["access_token"]
id_token = content["id_token"]
# use token to access the resource
auth_headers = {
"HTTP_AUTHORIZATION": "Bearer " + access_token,
}
request = self.factory.get("/fake-resource", **auth_headers)
request.user = self.hy_test_user
view = ResourceView.as_view()
response = view(request)
self.assertEqual(response, "This is a protected resource")
# use id_token to access the resource
auth_headers = {
"HTTP_AUTHORIZATION": "Bearer " + id_token,
}
request = self.factory.get("/fake-resource", **auth_headers)
request.user = self.hy_test_user
view = ResourceView.as_view()
response = view(request)
self.assertEqual(response, "This is a protected resource")
# If the resource requires more scopes than we requested, we should get an error
view = ScopedResourceView.as_view()
response = view(request)
self.assertEqual(response.status_code, 403)
def test_resource_access_deny(self):
auth_headers = {
"HTTP_AUTHORIZATION": "Bearer " + "faketoken",
}
request = self.factory.get("/fake-resource", **auth_headers)
request.user = self.hy_test_user
view = ResourceView.as_view()
response = view(request)
self.assertEqual(response.status_code, 403)
@pytest.mark.oauth2_settings(presets.OIDC_SETTINGS_RO)
| TestHybridProtectedResource |
python | matplotlib__matplotlib | lib/matplotlib/backends/backend_gtk3.py | {
"start": 11899,
"end": 15979
} | class ____(_NavigationToolbar2GTK, Gtk.Toolbar):
def __init__(self, canvas):
GObject.GObject.__init__(self)
self.set_style(Gtk.ToolbarStyle.ICONS)
self._gtk_ids = {}
for text, tooltip_text, image_file, callback in self.toolitems:
if text is None:
self.insert(Gtk.SeparatorToolItem(), -1)
continue
image = Gtk.Image.new_from_gicon(
Gio.Icon.new_for_string(
str(cbook._get_data_path('images',
f'{image_file}-symbolic.svg'))),
Gtk.IconSize.LARGE_TOOLBAR)
self._gtk_ids[text] = button = (
Gtk.ToggleToolButton() if callback in ['zoom', 'pan'] else
Gtk.ToolButton())
button.set_label(text)
button.set_icon_widget(image)
# Save the handler id, so that we can block it as needed.
button._signal_handler = button.connect(
'clicked', getattr(self, callback))
button.set_tooltip_text(tooltip_text)
self.insert(button, -1)
# This filler item ensures the toolbar is always at least two text
# lines high. Otherwise the canvas gets redrawn as the mouse hovers
# over images because those use two-line messages which resize the
# toolbar.
toolitem = Gtk.ToolItem()
self.insert(toolitem, -1)
label = Gtk.Label()
label.set_markup(
'<small>\N{NO-BREAK SPACE}\n\N{NO-BREAK SPACE}</small>')
toolitem.set_expand(True) # Push real message to the right.
toolitem.add(label)
toolitem = Gtk.ToolItem()
self.insert(toolitem, -1)
self.message = Gtk.Label()
self.message.set_justify(Gtk.Justification.RIGHT)
toolitem.add(self.message)
self.show_all()
_NavigationToolbar2GTK.__init__(self, canvas)
def save_figure(self, *args):
dialog = Gtk.FileChooserDialog(
title="Save the figure",
transient_for=self.canvas.get_toplevel(),
action=Gtk.FileChooserAction.SAVE,
buttons=(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
Gtk.STOCK_SAVE, Gtk.ResponseType.OK),
)
for name, fmts \
in self.canvas.get_supported_filetypes_grouped().items():
ff = Gtk.FileFilter()
ff.set_name(name)
for fmt in fmts:
ff.add_pattern(f'*.{fmt}')
dialog.add_filter(ff)
if self.canvas.get_default_filetype() in fmts:
dialog.set_filter(ff)
@functools.partial(dialog.connect, "notify::filter")
def on_notify_filter(*args):
name = dialog.get_filter().get_name()
fmt = self.canvas.get_supported_filetypes_grouped()[name][0]
dialog.set_current_name(
str(Path(dialog.get_current_name()).with_suffix(f'.{fmt}')))
dialog.set_current_folder(mpl.rcParams["savefig.directory"])
dialog.set_current_name(self.canvas.get_default_filename())
dialog.set_do_overwrite_confirmation(True)
response = dialog.run()
fname = dialog.get_filename()
ff = dialog.get_filter() # Doesn't autoadjust to filename :/
fmt = self.canvas.get_supported_filetypes_grouped()[ff.get_name()][0]
dialog.destroy()
if response != Gtk.ResponseType.OK:
return None
# Save dir for next time, unless empty str (which means use cwd).
if mpl.rcParams['savefig.directory']:
mpl.rcParams['savefig.directory'] = os.path.dirname(fname)
try:
self.canvas.figure.savefig(fname, format=fmt)
return fname
except Exception as e:
dialog = Gtk.MessageDialog(
transient_for=self.canvas.get_toplevel(), text=str(e),
message_type=Gtk.MessageType.ERROR, buttons=Gtk.ButtonsType.OK)
dialog.run()
dialog.destroy()
| NavigationToolbar2GTK3 |
python | wandb__wandb | wandb/vendor/pygments/lexer.py | {
"start": 10730,
"end": 12557
} | class ____(object):
"""
Special singleton used for indicating the caller class.
Used by ``using``.
"""
this = _This()
def using(_other, **kwargs):
"""
Callback that processes the match with a different lexer.
The keyword arguments are forwarded to the lexer, except `state` which
is handled separately.
`state` specifies the state that the new lexer will start in, and can
be an enumerable such as ('root', 'inline', 'string') or a simple
string which is assumed to be on top of the root state.
Note: For that to work, `_other` must not be an `ExtendedRegexLexer`.
"""
gt_kwargs = {}
if 'state' in kwargs:
s = kwargs.pop('state')
if isinstance(s, (list, tuple)):
gt_kwargs['stack'] = s
else:
gt_kwargs['stack'] = ('root', s)
if _other is this:
def callback(lexer, match, ctx=None):
# if keyword arguments are given the callback
# function has to create a new lexer instance
if kwargs:
# XXX: cache that somehow
kwargs.update(lexer.options)
lx = lexer.__class__(**kwargs)
else:
lx = lexer
s = match.start()
for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs):
yield i + s, t, v
if ctx:
ctx.pos = match.end()
else:
def callback(lexer, match, ctx=None):
# XXX: cache that somehow
kwargs.update(lexer.options)
lx = _other(**kwargs)
s = match.start()
for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs):
yield i + s, t, v
if ctx:
ctx.pos = match.end()
return callback
| _This |
python | mkdocs__mkdocs | mkdocs/structure/pages.py | {
"start": 22091,
"end": 22168
} | class ____(enum.IntEnum):
RELATIVE_TO_DOCS = -1
| _AbsoluteLinksValidationValue |
python | pytest-dev__pytest-xdist | src/xdist/workermanage.py | {
"start": 7402,
"end": 9662
} | class ____(execnet.RSync):
"""RSyncer that filters out common files."""
PathLike = Union[str, "os.PathLike[str]"]
def __init__(
self,
sourcedir: PathLike,
*,
ignores: Sequence[PathLike] | None = None,
verbose: bool = True,
) -> None:
if ignores is None:
ignores = []
self._ignores = [re.compile(fnmatch.translate(os.fspath(x))) for x in ignores]
super().__init__(sourcedir=Path(sourcedir), verbose=verbose)
def filter(self, path: PathLike) -> bool:
path = Path(path)
for cre in self._ignores:
if cre.match(path.name) or cre.match(str(path)):
return False
else:
return True
def add_target_host(
self,
gateway: execnet.Gateway,
finished: Callable[[], None] | None = None,
) -> None:
remotepath = os.path.basename(self._sourcedir)
super().add_target(gateway, remotepath, finishedcallback=finished, delete=True)
def _report_send_file(
self,
gateway: execnet.Gateway, # type: ignore[override]
modified_rel_path: str,
) -> None:
if self._verbose > 0:
path = os.path.basename(self._sourcedir) + "/" + modified_rel_path
remotepath = gateway.spec.chdir
print(f"{gateway.spec}:{remotepath} <= {path}")
def make_reltoroot(roots: Sequence[Path], args: list[str]) -> list[str]:
# XXX introduce/use public API for splitting pytest args
splitcode = "::"
result = []
for arg in args:
parts = arg.split(splitcode)
fspath = Path(parts[0])
try:
exists = fspath.exists()
except OSError:
exists = False
if not exists:
result.append(arg)
continue
for root in roots:
x: Path | None
try:
x = fspath.relative_to(root)
except ValueError:
x = None
if x or fspath == root:
parts[0] = root.name + "/" + str(x)
break
else:
raise ValueError(f"arg {arg} not relative to an rsync root")
result.append(splitcode.join(parts))
return result
| HostRSync |
python | pytorch__pytorch | torch/jit/_recursive.py | {
"start": 15545,
"end": 42294
} | class ____:
type_store: dict[type[Module], list[torch._C.ConcreteModuleType]]
methods_compiled: set[torch._C.ConcreteModuleType]
def __init__(self) -> None:
# Python module type => List[ConcreteModuleType)]
self.type_store = {}
# ConcreteTypes that have had their methods already compiled
self.methods_compiled = set()
def get_or_create_concrete_type(self, nn_module):
"""Infer a ConcreteType from this `nn.Module` instance. Underlying JIT types are reused if possible."""
concrete_type_builder = infer_concrete_type_builder(nn_module)
nn_module_type = type(nn_module)
if nn_module_type not in self.type_store:
self.type_store[nn_module_type] = []
# Search the type store for an already-available JIT type
known_types = self.type_store[nn_module_type]
for known_type in known_types:
if known_type.equals(concrete_type_builder):
return known_type
# We didn't find anything; generate a new JIT type from this concrete type
concrete_type = concrete_type_builder.build()
self.type_store[nn_module_type].append(concrete_type)
return concrete_type
concrete_type_store = ConcreteTypeStore()
def create_methods_and_properties_from_stubs(
concrete_type, method_stubs, property_stubs
) -> None:
method_defs = [m.def_ for m in method_stubs]
method_rcbs = [m.resolution_callback for m in method_stubs]
method_defaults = [get_default_args(m.original_method) for m in method_stubs]
property_defs = [p.def_ for p in property_stubs]
property_rcbs = [p.resolution_callback for p in property_stubs]
concrete_type._create_methods_and_properties(
property_defs, property_rcbs, method_defs, method_rcbs, method_defaults
)
def create_hooks_from_stubs(concrete_type, hook_stubs, pre_hook_stubs) -> None:
hook_defs = [h.def_ for h in hook_stubs]
hook_rcbs = [h.resolution_callback for h in hook_stubs]
pre_hook_defs = [h.def_ for h in pre_hook_stubs]
pre_hook_rcbs = [h.resolution_callback for h in pre_hook_stubs]
concrete_type._create_hooks(hook_defs, hook_rcbs, pre_hook_defs, pre_hook_rcbs)
def get_module_concrete_type(nn_module, share_types=True):
"""
Get a concrete type for nn_modules.
If share_types is True, the concrete type is fetched from concrete_type_store.
If it is False, a new concrete type is created without first searching concrete_type_store.
Args:
nn_module: The original Python nn.Module that we are creating a ScriptModule for.
share_types = Whether to share underlying JIT types between modules (if possible).
Returns:
A concrete type for nn_module.
"""
assert isinstance(nn_module, Module)
if isinstance(nn_module, torch.jit.ScriptModule) and hasattr(
nn_module, "_concrete_type"
):
return nn_module._concrete_type
if share_types:
# Look into the store of cached JIT types
concrete_type = concrete_type_store.get_or_create_concrete_type(nn_module)
else:
# Get a concrete type directly, without trying to reuse an existing JIT
# type from the type store.
concrete_type_builder = infer_concrete_type_builder(nn_module, share_types)
concrete_type_builder.set_poisoned()
concrete_type = concrete_type_builder.build()
return concrete_type
def create_script_class(obj):
"""
Create and return a RecursiveScriptClass instance from a Python object.
Arguments:
obj: A Python object.
"""
qualified_class_name = _jit_internal._qualified_name(type(obj))
rcb = _jit_internal.createResolutionCallbackForClassMethods(type(obj))
# Script the type of obj if it hasn't already been scripted.
_compile_and_register_class(type(obj), rcb, qualified_class_name)
class_ty = _python_cu.get_class(qualified_class_name)
# Create an empty torch._C.ScriptObject with the scripted type.
cpp_object = torch._C._create_object_with_type(class_ty)
# Copy all of the attributes over to the torch._C.ScriptObject.
for name, value in obj.__dict__.items():
cpp_object.setattr(name, value)
# Wrap the torch._C.ScriptObject in a RecursiveScriptClass instance.
return wrap_cpp_class(cpp_object)
def create_script_module(nn_module, stubs_fn, share_types=True, is_tracing=False):
"""
Create a new ScriptModule from an nn.Module.
Args:
nn_module: The original Python nn.Module that we are creating a ScriptModule for.
stubs_fn: Lambda that takes an nn.Module and generates a list of ScriptMethodStubs to compile.
share_types: Whether to share underlying JIT types between modules (if possible).
NOTE: Only set to False this when we cannot guarantee type sharing will work
correctly. This only happens today for traced modules, where the same
module can produce different traced methods depending on the inputs.
is_tracing: Whether this function is called during tracing or scripting. If tracing,
we don't need to do AttributeTypeIsSupportedChecker because all the unsupported
attributes will be baked as constant in the tracing graph. In addition,
this check significantly slows down the traced modules when the module size is big.
"""
assert not isinstance(nn_module, torch.jit.RecursiveScriptModule)
check_module_initialized(nn_module)
concrete_type = get_module_concrete_type(nn_module, share_types)
if not is_tracing:
AttributeTypeIsSupportedChecker().check(nn_module)
return create_script_module_impl(nn_module, concrete_type, stubs_fn)
def create_script_module_impl(nn_module, concrete_type, stubs_fn):
"""
Convert an nn.Module to a RecursiveScriptModule.
Args:
nn_module: The original Python nn.Module that we are creating a ScriptModule for.
concrete_type: The fully initialized ConcreteType of the module.
stubs_fn: Lambda that takes an nn.Module and generates a list of ScriptMethodStubs to compile.
"""
cpp_module = torch._C._create_module_with_type(concrete_type.jit_type)
method_stubs = stubs_fn(nn_module)
property_stubs = get_property_stubs(nn_module)
hook_stubs, pre_hook_stubs = get_hook_stubs(nn_module)
ignored_properties = jit_ignored_properties(nn_module)
def init_fn(script_module) -> None:
# Initialize the ScriptModule:
# 1. Copy the attributes/parameters/buffers from the original `nn_module` to the new ScriptModule.
for name in concrete_type.get_attributes():
orig_value = getattr(nn_module, name)
orig_value = (
orig_value.value
if isinstance(orig_value, torch.jit.Attribute)
else orig_value
)
cpp_module.setattr(name, orig_value)
# 2. Copy the submodules from the original `nn_module` to the new ScriptModule,
# recursively scripting them.
for name, sub_concrete_type in concrete_type.get_modules():
orig_value = getattr(nn_module, name)
assert isinstance(orig_value, Module), (
f"Expected Module but got {type(orig_value)}"
)
module_type = sub_concrete_type.jit_type
if isinstance(module_type, torch._C.InterfaceType):
# use the interface inference rule to compile the module
scripted = interface_script(module_type, orig_value)
elif isinstance(orig_value, torch.jit.ScriptModule):
scripted = orig_value
else:
# always reuse the provided stubs_fn to infer the methods to compile
scripted = create_script_module_impl(
orig_value, sub_concrete_type, stubs_fn
)
cpp_module.setattr(name, scripted)
script_module._modules[name] = scripted
# 3. Copy @ignored/@unused methods and attrs from the original `nn_module` to the new ScriptModule.
# This ensures we can access these Python methods on the ScriptModule.
for name in dir(nn_module):
if name in ignored_properties:
continue
item = getattr(nn_module, name, None)
if inspect.ismethod(item) and _jit_internal.is_ignored_fn(item):
unbound_function = getattr(nn_module, name).__func__
bound_method = unbound_function.__get__(script_module)
setattr(script_module, name, bound_method)
elif concrete_type.is_ignored_attribute(name):
setattr(script_module, name, item)
# For convenience, attach the concrete type to the new ScriptModule
script_module._concrete_type = concrete_type
# Actually create the ScriptModule, initializing it with the function we just defined
script_module = torch.jit.RecursiveScriptModule._construct(cpp_module, init_fn)
# Compile methods if necessary
if concrete_type not in concrete_type_store.methods_compiled:
create_methods_and_properties_from_stubs(
concrete_type, method_stubs, property_stubs
)
# Create hooks after methods to ensure no name collisions between hooks and methods.
# If done before, hooks can overshadow methods that aren't exported.
create_hooks_from_stubs(concrete_type, hook_stubs, pre_hook_stubs)
torch._C._run_emit_module_hook(cpp_module)
concrete_type_store.methods_compiled.add(concrete_type)
# Copy the forward hooks and pre-hooks to the new ScriptModule
# to allow the hooks to be run from eager as ScriptFunctions
for idx, fn in enumerate(script_module._c._get_forward_pre_hooks()):
script_module._forward_pre_hooks[idx] = fn
for idx, fn in enumerate(script_module._c._get_forward_hooks()):
script_module._forward_hooks[idx] = fn
# Special handling so methods like __len__ work in script methods on classes derived from containers
if (
isinstance(
nn_module, (torch.nn.ModuleList, torch.nn.Sequential, torch.nn.ModuleDict)
)
and "__len__" not in cpp_module._method_names()
):
script_module.define(f"def __len__(self):\n return {len(nn_module)}\n")
if (
isinstance(nn_module, torch.nn.ModuleDict)
and "__contains__" not in cpp_module._method_names()
):
if len(nn_module.keys()):
keys = repr(list(nn_module.keys()))
script_module.define(
f"def __contains__(self, key: str):\n return key in {keys}\n"
)
else:
script_module.define("def __contains__(self, key: str):\n return False\n")
# Make the compiled methods available to the Python ScriptModule class.
for method_stub in method_stubs:
if method_stub.original_method is None:
# define()'d methods don't have an Python original_method, so we
# don't need to do any Python re-wrapping stuff
continue
name = method_stub.original_method.__name__
if name != method_stub.def_.name().name:
# TODO: Why skip this? Because @torch.jit._overload_method will
# mangle the name of the function.
continue
script_method = cpp_module._get_method(name)
# Wrap the original to propagate docstrings and such.
# TODO: we don't currently do this functions that are recursively
# compiled, we should.
wrapped_script_method = functools.wraps(method_stub.original_method)(
script_method
)
# Add the methods to the script_module directly. This ensures they will
# be found first when `name` is looked up (as opposed to the stubs or
# nn.Module.forward)
script_module.__dict__[name] = wrapped_script_method
# Make module properties available on the Python ScriptModule class.
for property_stub in property_stubs:
property_name = property_stub.def_.name().name
fget = cpp_module._get_method(property_stub.def_.getter_name().name)
# Setter is optional, so it may not exist.
setter_name = property_stub.def_.setter_name()
fset = cpp_module._get_method(setter_name.name) if setter_name else None
script_module.__dict__[property_name] = property(property_name, fget, fset) # type: ignore[arg-type]
# copy over python methods to script module if they aren't defined on the script module
# this is currently an internal api used only on module containers
for name in dir(nn_module):
if name in ignored_properties:
continue
item = getattr(nn_module, name, None)
if (
_jit_internal.get_torchscript_modifier(item)
is _jit_internal.FunctionModifiers.COPY_TO_SCRIPT_WRAPPER
):
add_python_attr_to_scripted_model(script_module, nn_module, name)
return script_module
# We define shims of certain attributes on the RecursiveScriptModule to support
# magic methods. To check if a script model defines an attribute we need
# to also check that the attribute is not the shim
def script_model_defines_attr(script_model, attr):
script_attr = getattr(script_model, attr, None)
if script_attr is None:
return False
default_attr = getattr(torch.jit.RecursiveScriptModule, attr, None)
if default_attr is None:
return False
return script_attr != default_attr
def add_python_attr_to_scripted_model(script_model, orig, attr) -> None:
if hasattr(orig, attr) and script_model_defines_attr(script_model, attr):
setattr(script_model, attr, getattr(orig, attr))
def get_overload_annotations(mod, jit_ignored_properties):
# original function => [(mangled overload name, overload function)]
overloads = {}
for name in dir(type(mod)):
if name in jit_ignored_properties:
continue
item = getattr(mod, name, None)
if not callable(item):
continue
# builtin functions like repr() in python 2 do not have __module__ defined
if hasattr(item, "__module__") and item.__module__ is not None:
method_overloads = _jit_internal._get_overloaded_methods(
item, mod.__class__
)
if method_overloads is None:
continue
# pyrefly: ignore [missing-attribute]
if item.__func__ in method_overloads:
raise RuntimeError(
_jit_internal.get_overload_no_implementation_error_message(
"method", item.__func__
)
)
names = [name + "__" + str(i) for i in range(len(method_overloads))]
overloads[item] = list(zip(names, method_overloads))
return overloads
def get_overload_name_mapping(overload_info):
# Same format as __overloads__
# original function => [overload names]
overload_name_mappings: dict[str, list[str]] = {}
for orig_fn, overloads in overload_info.items():
original_name = orig_fn.__name__
if original_name not in overload_name_mappings:
overload_name_mappings[original_name] = []
for overload_name, _ in overloads:
overload_name_mappings[original_name].append(overload_name)
return overload_name_mappings
def _check_no_signature(func) -> None:
signature = torch.jit.annotations.get_signature(
func, None, fake_range(), inspect.ismethod(func)
)
if signature is None:
qual_name = _jit_internal._qualified_name(func)
raise RuntimeError(
f"Must explicitly add type annotations to overloaded functions: {qual_name}"
)
def make_stubs_for_overloads(overload_info):
overload_stubs = []
for orig_fn, overloads in overload_info.items():
orig_ast = get_jit_def(
orig_fn, orig_fn.__name__, self_name="RecursiveScriptModule"
)
for overload_name, overload_fn in overloads:
_check_no_signature(overload_fn)
over_ast = get_jit_def(
overload_fn, overload_fn.__name__, self_name="RecursiveScriptModule"
)
new_ast = torch._C._replace_overloaded_method_decl(
over_ast.decl(), orig_ast, overload_name
)
_rcb = _jit_internal.createResolutionCallbackFromClosure(orig_fn)
overload_stubs.append(ScriptMethodStub(_rcb, new_ast, overload_fn))
return overload_stubs
def check_module_initialized(mod) -> None:
assert isinstance(mod, torch.nn.Module)
if not hasattr(mod, "_parameters"):
raise RuntimeError(
f"'{torch.typename(type(mod))}' has not been initialized, did you forget to call 'super()'?"
)
# This is to avoid importing torch.distributed.nn
if not hasattr(mod, "remote_parameters"):
for name, param in mod._parameters.items():
if param is not None and torch.nn.parameter.is_lazy(param):
raise RuntimeError(
f"'{torch.typename(type(mod))}' has uninitialized parameters {name}. Did you forget to run a forward pass?"
)
for name, buf in mod._buffers.items():
if buf is not None and torch.nn.parameter.is_lazy(buf):
raise RuntimeError(
f"'{torch.typename(type(mod))}' has uninitialized buffers {name}. Did you forget to run a forward pass?"
)
def infer_methods_to_compile(nn_module):
"""Implement the default rules for which methods should act as starting points for compilation.
(TODO add a link when the rules are published).
"""
check_module_initialized(nn_module)
ignored_properties = jit_ignored_properties(nn_module)
methods: list[str] = []
if hasattr(nn_module, "forward") and not _jit_internal.is_ignored_fn(
nn_module.forward
):
forward_func = getattr(nn_module.forward, "__func__", None)
module_forward = getattr(torch.nn.Module, "forward", None)
if forward_func != module_forward:
methods = ["forward"]
exported = []
for name in dir(nn_module):
if name in ignored_properties:
continue
item = getattr(nn_module, name, None)
if (
_jit_internal.get_torchscript_modifier(item)
is _jit_internal.FunctionModifiers.EXPORT
):
exported.append(name)
methods = methods + exported
overload_name_mappings = dict(getattr(nn_module, "__overloads__", {}))
overload_info = get_overload_annotations(nn_module, ignored_properties)
overload_name_mappings.update(get_overload_name_mapping(overload_info))
overload_stubs = make_stubs_for_overloads(overload_info)
nn_module.__overloads__ = overload_name_mappings
# we shouldn't directly compile overloaded methods, just its overloads
def ignore_overloaded(method_name):
return method_name not in overload_name_mappings
filtered_methods = filter(ignore_overloaded, methods)
# Unique the methods. We don't want to use a set to store the methods because it
# introduces non-determinism to compile order.
uniquer: set[str] = set()
uniqued_methods = []
for name in filtered_methods:
if name in uniquer:
continue
uniqued_methods.append(name)
uniquer.add(name)
stubs = [make_stub_from_method(nn_module, method) for method in uniqued_methods]
return overload_stubs + stubs
def get_hook_stubs(nn_module):
"""Return forward hook and pre_hook ScriptModuleStubs."""
check_module_initialized(nn_module)
hook_map: dict = {}
hook_stubs = []
for hook in nn_module._forward_hooks.values():
if hook.__name__ in hook_map:
if id(hook) != id(hook_map[hook.__name__]):
raise RuntimeError(
f"Hook '{hook.__name__}' on {type(nn_module).__name__} "
"has at least two different python definitions."
" Please use unique names for all hooks."
)
else:
hook_map[hook.__name__] = hook
hook_stubs.append(make_stub(hook, hook.__name__))
pre_hook_stubs = []
for pre_hook in nn_module._forward_pre_hooks.values():
if pre_hook.__name__ in hook_map:
if id(pre_hook) != id(hook_map[pre_hook.__name__]):
raise RuntimeError(
f"Pre-hook '{pre_hook.__name__}' on {type(nn_module).__name__} "
"has at least two different python definitions."
" Please use unique names for all hooks."
)
else:
hook_map[pre_hook.__name__] = pre_hook
pre_hook_stubs.append(make_stub(pre_hook, pre_hook.__name__))
return hook_stubs, pre_hook_stubs
def get_property_stubs(nn_module):
"""Create property stubs for the properties of the module by creating method stubs for the getter and setter."""
module_ty = type(nn_module)
properties_asts = get_class_properties(module_ty, self_name="RecursiveScriptModule")
rcbs = {}
for name in dir(module_ty):
item = getattr(module_ty, name, None)
if isinstance(item, property):
if not item.fget:
raise RuntimeError(
f"Property {name} of {nn_module.__name__} must have a getter"
)
rcbs[name] = _jit_internal.createResolutionCallbackFromClosure(item.fget)
stubs = [PropertyStub(rcbs[ast.name().name], ast) for ast in properties_asts]
return stubs
def interface_script(mod_interface, nn_module):
"""
Make a ScriptModule from an nn.Module, using the interface methods rule for determining which methods to compile.
Args:
mod_interface: the interface type that the module have
nn_module: The original Python nn.Module that we are creating a ScriptModule for.
"""
if isinstance(nn_module, torch.jit.ScriptModule):
return nn_module
check_module_initialized(nn_module)
def infer_interface_methods_to_compile(nn_module):
"""Rule to infer the methods from the interface type.
It is used to know which methods need to act as starting points for compilation.
"""
stubs = [
make_stub_from_method(nn_module, method)
for method in mod_interface.getMethodNames()
]
return stubs
return create_script_module(nn_module, infer_interface_methods_to_compile)
def try_compile_fn(fn, loc):
if _jit_internal.is_ignored_fn(fn):
# Don't do anything for @ignore'd functions
return None
if isinstance(fn, torch.nn.Module):
# Since modules are callable pybind recognizes them as functions, but
# don't do anything for them
return None
if not inspect.isfunction(fn) and not inspect.ismethod(fn):
raise RuntimeError(
f"`{fn}` is not a function. Recursive scripting only supports "
"Python functions or methods currently.\n"
f"Consider manually annotating `{fn}` with @torch.jit.script."
)
# The object returned by __prepare_scriptable__ might have a different closure.
# Resolve it here to get the right resolution callback.
fn = fn.__prepare_scriptable__() if hasattr(fn, "__prepare_scriptable__") else fn # type: ignore[operator]
# We don't have the actual scope where the function was defined, but we can
# extract the necessary info from the closed over variables on the function
# object
rcb = _jit_internal.createResolutionCallbackFromClosure(fn)
return torch.jit.script(fn, _rcb=rcb)
def wrap_cpp_class(cpp_class):
"""Wrap this torch._C.Object in a Python RecursiveScriptClass."""
return torch.jit.RecursiveScriptClass(cpp_class)
def wrap_cpp_module(cpp_module):
"""Wrap this torch._C.ScriptModule in a Python ScriptModule, recursively for all submodules."""
def init_fn(script_module) -> None:
for name, cpp_module in torch._C.ModuleDict(script_module._c).items():
setattr(script_module, name, wrap_cpp_module(cpp_module))
script_module._concrete_type = torch._C.ConcreteModuleType.from_jit_type(
script_module._c._type()
)
for idx, fn in enumerate(script_module._c._get_forward_pre_hooks()):
script_module._forward_pre_hooks[idx] = fn
for idx, fn in enumerate(script_module._c._get_forward_hooks()):
script_module._forward_hooks[idx] = fn
return torch.jit.RecursiveScriptModule._construct(cpp_module, init_fn)
def compile_unbound_method(concrete_type, fn):
if _jit_internal.is_ignored_fn(fn):
return None
stub = make_stub(fn, fn.__name__)
with torch._jit_internal._disable_emit_hooks():
# We don't want to call the hooks here since the graph that is calling
# this function is not yet complete
create_methods_and_properties_from_stubs(concrete_type, (stub,), ())
return stub
def lazy_bind(concrete_type, unbound_method):
"""
Return a function that lazily binds `unbound_method` to a provided Module IValue, then invokes the method.
We do this so that any Python shenanigans that
will poison type sharing are impossible at compile time.
"""
def lazy_binding_method(cpp_module, *args):
def init_fn(script_module) -> None:
orig_class = concrete_type.py_class
# Copy @ignored/@unused methods from the original module to the new one.
# This ensures they are available during execution.
for name in dir(orig_class):
item = getattr(orig_class, name, None)
if _jit_internal.is_ignored_fn(item):
setattr(script_module, name, item)
# Copy constants over so they are available during execution.
for name, value in concrete_type.get_constants().items():
setattr(script_module, name, value)
script_module = torch.jit.RecursiveScriptModule._construct(cpp_module, init_fn)
method = types.MethodType(unbound_method, script_module)
return method(*args)
# make the lazy binding method "look like" the original method
lazy_binding_method.original_fn = unbound_method # type: ignore[attr-defined]
lazy_binding_method.__name__ = unbound_method.__name__
torch._jit_internal.copy_torchscript_modifier(unbound_method, lazy_binding_method)
return lazy_binding_method
| ConcreteTypeStore |
python | apache__airflow | airflow-core/src/airflow/task/priority_strategy.py | {
"start": 3687,
"end": 5906
} | class ____(PriorityWeightStrategy):
"""Priority weight strategy that uses the sum of the priority weights of all upstream tasks."""
def get_weight(self, ti: TaskInstance):
if TYPE_CHECKING:
assert ti.task
dag = ti.task.get_dag()
if dag is None:
return ti.task.priority_weight
return ti.task.priority_weight + sum(
dag.task_dict[task_id].priority_weight for task_id in ti.task.get_flat_relative_ids(upstream=True)
)
airflow_priority_weight_strategies: dict[str, type[PriorityWeightStrategy]] = {
WeightRule.ABSOLUTE: _AbsolutePriorityWeightStrategy,
WeightRule.DOWNSTREAM: _DownstreamPriorityWeightStrategy,
WeightRule.UPSTREAM: _UpstreamPriorityWeightStrategy,
}
airflow_priority_weight_strategies_classes = {
cls: name for name, cls in airflow_priority_weight_strategies.items()
}
def validate_and_load_priority_weight_strategy(
priority_weight_strategy: str | PriorityWeightStrategy | None,
) -> PriorityWeightStrategy:
"""
Validate and load a priority weight strategy.
Returns the priority weight strategy if it is valid, otherwise raises an exception.
:param priority_weight_strategy: The priority weight strategy to validate and load.
:meta private:
"""
from airflow.serialization.serialized_objects import _get_registered_priority_weight_strategy
from airflow.utils.module_loading import qualname
if priority_weight_strategy is None:
return _AbsolutePriorityWeightStrategy()
if isinstance(priority_weight_strategy, str):
if priority_weight_strategy in airflow_priority_weight_strategies:
return airflow_priority_weight_strategies[priority_weight_strategy]()
priority_weight_strategy_class = priority_weight_strategy
else:
priority_weight_strategy_class = qualname(priority_weight_strategy)
loaded_priority_weight_strategy = _get_registered_priority_weight_strategy(priority_weight_strategy_class)
if loaded_priority_weight_strategy is None:
raise ValueError(f"Unknown priority strategy {priority_weight_strategy_class}")
return loaded_priority_weight_strategy()
| _UpstreamPriorityWeightStrategy |
python | great-expectations__great_expectations | contrib/experimental/great_expectations_experimental/expectations/expect_column_values_to_be_valid_github_username.py | {
"start": 1898,
"end": 3953
} | class ____(ColumnMapExpectation):
"""Expect column values to be valid github users."""
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"data": {
"valid_users": ["github", "git", "great-expectations"],
"invalid_users": [
"RANDOM_123",
"memememememe",
"crave_for_more_memes",
],
},
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "valid_users"},
"out": {"success": True},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "invalid_users"},
"out": {"success": False},
},
],
}
]
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.valid_github_username"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
success_keys = ("mostly",)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {}
# This object contains metadata for display in the public Gallery
library_metadata = {
"maturity": "experimental",
"tags": ["experimental", "expectation", "validation"],
"contributors": [
"@rdodev",
],
}
if __name__ == "__main__":
ExpectColumnValuesToBeValidGithubUsername().print_diagnostic_checklist()
| ExpectColumnValuesToBeValidGithubUsername |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/links/vertex_ai.py | {
"start": 6413,
"end": 6655
} | class ____(BaseGoogleLink):
"""Helper class for constructing Vertex AI PipelineJobList link."""
name = "Pipeline Job List"
key = "pipeline_job_list_conf"
format_str = VERTEX_AI_PIPELINE_JOB_LIST_LINK
| VertexAIPipelineJobListLink |
python | spack__spack | lib/spack/spack/repo.py | {
"start": 77867,
"end": 77962
} | class ____(spack.error.SpackError):
"""Superclass for repository-related errors."""
| RepoError |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/non_existing_conditional_dep/package.py | {
"start": 216,
"end": 451
} | class ____(Package):
"""Simple package with no source and one dependency"""
homepage = "http://www.example.com"
version("2.0")
version("1.0")
depends_on("dep-with-variants@999", when="@2.0")
| NonExistingConditionalDep |
python | pytorch__pytorch | torch/_inductor/cudagraph_utils.py | {
"start": 966,
"end": 1460
} | class ____:
"""
A serializable version of torch.fx.Node that contains information
pertinent to placeholder stack traces. We use these in logging and error messages
related to cudagraphs, and will cache these results.
"""
name: str
stack_trace: Optional[str]
# This field is recursive, but never cyclic (since a node never uses itself)
users: list[PlaceholderInfo]
mutating_use_stack_trace: Optional[str]
@dataclasses.dataclass(frozen=True)
| PlaceholderInfo |
python | coleifer__peewee | playhouse/sqlite_udf.py | {
"start": 7609,
"end": 8138
} | class ____(_datetime_heap_agg):
def finalize(self):
dtp = min_diff = None
while self.heap:
if min_diff is None:
if dtp is None:
dtp = heapq.heappop(self.heap)
continue
dt = heapq.heappop(self.heap)
diff = dt - dtp
if min_diff is None or min_diff > diff:
min_diff = diff
dtp = dt
if min_diff is not None:
return total_seconds(min_diff)
@aggregate(DATE)
| mintdiff |
python | wandb__wandb | tools/perf/scripts/bench_run_log.py | {
"start": 326,
"end": 759
} | class ____:
"""A simple timer class to measure execution time."""
def __init__(self):
self.start_time = None
def __enter__(self):
self.start()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.stop()
def start(self):
self.start_time = datetime.now()
def stop(self):
return round((datetime.now() - self.start_time).total_seconds(), 2)
| Timer |
python | pydata__xarray | xarray/tests/test_plot.py | {
"start": 79918,
"end": 84172
} | class ____(Common2dMixin, PlotTestCase):
plotfunc = staticmethod(xplt.surface)
subplot_kws = {"projection": "3d"}
@pytest.mark.xfail(
reason=(
"Failing inside matplotlib. Should probably be fixed upstream because "
"other plot functions can handle it. "
"Remove this test when it works, already in Common2dMixin"
)
)
def test_dates_are_concise(self) -> None:
import matplotlib.dates as mdates
time = pd.date_range("2000-01-01", "2000-01-10")
a = DataArray(np.random.randn(2, len(time)), [("xx", [1, 2]), ("t", time)])
self.plotfunc(a, x="t")
ax = plt.gca()
assert isinstance(ax.xaxis.get_major_locator(), mdates.AutoDateLocator)
assert isinstance(ax.xaxis.get_major_formatter(), mdates.ConciseDateFormatter)
def test_primitive_artist_returned(self) -> None:
artist = self.plotmethod()
assert isinstance(artist, mpl_toolkits.mplot3d.art3d.Poly3DCollection)
@pytest.mark.slow
def test_2d_coord_names(self) -> None:
self.plotmethod(x="x2d", y="y2d")
# make sure labels came out ok
ax = plt.gca()
assert isinstance(ax, mpl_toolkits.mplot3d.axes3d.Axes3D)
assert "x2d" == ax.get_xlabel()
assert "y2d" == ax.get_ylabel()
assert f"{self.darray.long_name} [{self.darray.units}]" == ax.get_zlabel()
def test_xyincrease_false_changes_axes(self) -> None:
# Does not make sense for surface plots
pytest.skip("does not make sense for surface plots")
def test_xyincrease_true_changes_axes(self) -> None:
# Does not make sense for surface plots
pytest.skip("does not make sense for surface plots")
def test_can_pass_in_axis(self) -> None:
self.pass_in_axis(self.plotmethod, subplot_kw={"projection": "3d"})
def test_default_cmap(self) -> None:
# Does not make sense for surface plots with default arguments
pytest.skip("does not make sense for surface plots")
def test_diverging_color_limits(self) -> None:
# Does not make sense for surface plots with default arguments
pytest.skip("does not make sense for surface plots")
def test_colorbar_kwargs(self) -> None:
# Does not make sense for surface plots with default arguments
pytest.skip("does not make sense for surface plots")
def test_cmap_and_color_both(self) -> None:
# Does not make sense for surface plots with default arguments
pytest.skip("does not make sense for surface plots")
def test_seaborn_palette_as_cmap(self) -> None:
# seaborn does not work with mpl_toolkits.mplot3d
with pytest.raises(ValueError):
super().test_seaborn_palette_as_cmap()
# Need to modify this test for surface(), because all subplots should have labels,
# not just left and bottom
@pytest.mark.filterwarnings("ignore:tight_layout cannot")
def test_convenient_facetgrid(self) -> None:
a = easy_array((10, 15, 4))
d = DataArray(a, dims=["y", "x", "z"])
g = self.plotfunc(d, x="x", y="y", col="z", col_wrap=2) # type: ignore[arg-type] # https://github.com/python/mypy/issues/15015
assert_array_equal(g.axs.shape, [2, 2])
for (_y, _x), ax in np.ndenumerate(g.axs):
assert ax.has_data()
assert "y" == ax.get_ylabel()
assert "x" == ax.get_xlabel()
# Inferring labels
g = self.plotfunc(d, col="z", col_wrap=2) # type: ignore[arg-type] # https://github.com/python/mypy/issues/15015
assert_array_equal(g.axs.shape, [2, 2])
for (_y, _x), ax in np.ndenumerate(g.axs):
assert ax.has_data()
assert "y" == ax.get_ylabel()
assert "x" == ax.get_xlabel()
def test_viridis_cmap(self) -> None:
return super().test_viridis_cmap()
def test_can_change_default_cmap(self) -> None:
return super().test_can_change_default_cmap()
def test_colorbar_default_label(self) -> None:
return super().test_colorbar_default_label()
def test_facetgrid_map_only_appends_mappables(self) -> None:
return super().test_facetgrid_map_only_appends_mappables()
| TestSurface |
python | vyperlang__vyper | vyper/builtins/functions.py | {
"start": 31638,
"end": 35569
} | class ____(BuiltinFunctionT):
_id = "as_wei_value"
_inputs = [("value", (IntegerT.any(), DecimalT())), ("unit", StringT.any())]
_return_type = UINT256_T
wei_denoms = {
("wei",): 1,
("femtoether", "kwei", "babbage"): 10**3,
("picoether", "mwei", "lovelace"): 10**6,
("nanoether", "gwei", "shannon"): 10**9,
("microether", "szabo"): 10**12,
("milliether", "finney"): 10**15,
("ether",): 10**18,
("kether", "grand"): 10**21,
}
def get_denomination(self, node):
value = node.args[1].get_folded_value()
if not isinstance(value, vy_ast.Str):
raise ArgumentException(
"Wei denomination must be given as a literal string", node.args[1]
)
try:
denom = next(v for k, v in self.wei_denoms.items() if value.value in k)
except StopIteration:
raise ArgumentException(f"Unknown denomination: {value.value}", node.args[1]) from None
return denom
def _try_fold(self, node):
validate_call_args(node, 2)
denom = self.get_denomination(node)
value = node.args[0].get_folded_value()
if not isinstance(value, (vy_ast.Decimal, vy_ast.Int)):
raise UnfoldableNode
value = value.value
if value < 0:
raise InvalidLiteral("Negative wei value not allowed", node.args[0])
return vy_ast.Int.from_node(node, value=int(value * denom))
def fetch_call_return(self, node):
self.infer_arg_types(node)
return self._return_type
def infer_arg_types(self, node, expected_return_typ=None):
self._validate_arg_types(node)
# return a concrete type instead of abstract type
value_type = get_possible_types_from_node(node.args[0]).pop()
unit_type = get_possible_types_from_node(node.args[1]).pop()
return [value_type, unit_type]
@process_inputs
def build_IR(self, expr, args, kwargs, context):
value = args[0]
denom_divisor = self.get_denomination(expr)
with value.cache_when_complex("value") as (b1, value):
if value.typ in IntegerT.unsigneds():
product = IRnode.from_list(["mul", value, denom_divisor])
with product.cache_when_complex("ans") as (b2, product):
irlist = ["seq"]
ok = ["or", ["eq", ["div", product, value], denom_divisor], ["iszero", value]]
irlist.append(["assert", ok])
irlist.append(product)
sub = b2.resolve(irlist)
elif value.typ in IntegerT.signeds():
product = IRnode.from_list(["mul", value, denom_divisor])
with product.cache_when_complex("ans") as (b2, product):
irlist = ["seq"]
positive = ["sge", value, 0]
safemul = [
"or",
["eq", ["div", product, value], denom_divisor],
["iszero", value],
]
ok = ["and", positive, safemul]
irlist.append(["assert", ok])
irlist.append(product)
sub = b2.resolve(irlist)
elif value.typ == DecimalT():
# sanity check (so we don't have to use safemul)
assert (SizeLimits.MAXDECIMAL * denom_divisor) < 2**256 - 1
sub = [
"seq",
["assert", ["sge", value, 0]],
["div", ["mul", value, denom_divisor], DECIMAL_DIVISOR],
]
else:
raise CompilerPanic(f"Unexpected type: {value.typ}")
return IRnode.from_list(b1.resolve(sub), typ=UINT256_T)
zero_value = IRnode.from_list(0, typ=UINT256_T)
empty_value = IRnode.from_list(0, typ=BYTES32_T)
| AsWeiValue |
python | getsentry__sentry | tests/sentry/integrations/api/serializers/test_external_actor.py | {
"start": 425,
"end": 7397
} | class ____(TestCase):
def setUp(self) -> None:
self.user = self.create_user()
self.organization = self.create_organization(owner=self.user)
self.integration, self.org_integration = self.create_provider_integration_for(
self.organization,
self.user,
provider="slack",
name="Team A",
external_id="TXXXXXXX1",
metadata={
"access_token": "xoxp-xxxxxxxxx-xxxxxxxxxx-xxxxxxxxxxxx",
"installation_type": "born_as_bot",
},
)
def test_user(self) -> None:
external_actor, _ = ExternalActor.objects.get_or_create(
user_id=self.user.id,
organization=self.organization,
integration_id=self.integration.id,
provider=ExternalProviders.SLACK.value,
external_name="Marcos",
external_id="Gaeta",
)
result = serialize(external_actor, self.user, key="user")
assert "actorId" not in result
assert result["id"] == str(external_actor.id)
assert result["externalName"] == "Marcos"
assert result["externalId"] == "Gaeta"
assert result["userId"] == str(self.user.id)
def test_team(self) -> None:
team = self.create_team(organization=self.organization, members=[self.user])
external_actor, _ = ExternalActor.objects.get_or_create(
team_id=team.id,
organization=self.organization,
integration_id=self.integration.id,
provider=ExternalProviders.SLACK.value,
external_name="Marcos",
external_id="Gaeta",
)
result = serialize(external_actor, self.user, key="team")
assert "actorId" not in result
assert result["id"] == str(external_actor.id)
assert result["externalName"] == "Marcos"
assert result["externalId"] == "Gaeta"
assert result["teamId"] == str(team.id)
def test_strict_external_user_name(self) -> None:
# Ensure user names must start with @
external_actor_user_data = {
"provider": get_provider_name(ExternalProviders.GITHUB.value),
"externalName": "raz",
"integrationId": self.integration.id,
"userId": self.user.id,
}
serializer = ExternalUserSerializer(
data=external_actor_user_data,
context={"organization": self.organization},
)
assert serializer.is_valid() is False
assert "externalName" in serializer.errors
# Ensure longer user names are limited in length
external_actor_user_data["externalName"] = "@" + ("razputin_aquato" * 20)
serializer = ExternalUserSerializer(
data=external_actor_user_data,
context={"organization": self.organization},
)
assert serializer.is_valid() is False
assert "externalName" in serializer.errors
# Ensure proper user names are valid
external_actor_user_data["externalName"] = "@raz"
serializer = ExternalUserSerializer(
data=external_actor_user_data,
context={"organization": self.organization},
)
assert serializer.is_valid() is True
def test_strict_external_team_name(self) -> None:
team = self.create_team(organization=self.organization, members=[self.user])
# Ensure team names must start with @
external_actor_team_data = {
"provider": get_provider_name(ExternalProviders.GITHUB.value),
"externalName": "the-psychic-six",
"integrationId": self.integration.id,
"team_id": team.id,
}
serializer = ExternalTeamSerializer(
data=external_actor_team_data,
context={"organization": self.organization},
)
assert serializer.is_valid() is False
assert "externalName" in serializer.errors
# Ensure longer team names are limited in length
external_actor_team_data["externalName"] = "@" + ("the-psychic-six" * 20)
serializer = ExternalTeamSerializer(
data=external_actor_team_data,
context={"organization": self.organization},
)
assert serializer.is_valid() is False
assert "externalName" in serializer.errors
# Ensure proper team names are valid
external_actor_team_data["externalName"] = "@the-psychic-six"
serializer = ExternalTeamSerializer(
data=external_actor_team_data,
context={"organization": self.organization},
)
assert serializer.is_valid() is True
def test_avoid_strict_external_name(self) -> None:
# Strict rules should only run for strict providers
provider = get_provider_name(ExternalProviders.SLACK.value)
assert provider not in STRICT_NAME_PROVIDERS
external_actor_user_data = {
"provider": get_provider_name(ExternalProviders.SLACK.value),
"externalName": "ford-cruller",
"integrationId": self.integration.id,
"userId": self.user.id,
}
serializer = ExternalUserSerializer(
data=external_actor_user_data,
context={"organization": self.organization},
)
assert serializer.is_valid() is True
def test_create_case_insensitive_team(self) -> None:
sentry_team = self.create_team(organization=self.organization, members=[self.user])
external_actor_team_data = {
"provider": get_provider_name(ExternalProviders.GITHUB.value),
"external_name": "@getsentry/example-team",
"integrationId": self.integration.id,
"team_id": sentry_team.id,
}
serializer = ExternalTeamSerializer(
data=external_actor_team_data,
context={"organization": self.organization},
)
assert serializer.is_valid() is True
external_actor1, created1 = serializer.create(serializer.validated_data)
assert created1 is True
assert external_actor1.external_name == "@getsentry/example-team"
# Try to create another with different case but different team - should match existing one
external_actor_team_data["external_name"] = "@GETSENTRY/EXAMPLE-TEAM"
external_actor_team_data["team_id"] = sentry_team.id
serializer = ExternalTeamSerializer(
data=external_actor_team_data,
context={"organization": self.organization},
)
assert serializer.is_valid() is True
external_actor2, created2 = serializer.create(serializer.validated_data)
# We should not have created a new external actor - we should have returned the existing one
assert created2 is False
assert external_actor2.id == external_actor1.id
assert external_actor2.external_name == "@getsentry/example-team"
| ExternalActorSerializerTest |
python | kamyu104__LeetCode-Solutions | Python/ways-to-split-array-into-good-subarrays.py | {
"start": 45,
"end": 470
} | class ____(object):
def numberOfGoodSubarraySplits(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
MOD = 10**9+7
result, prev = 1, -1
for i in xrange(len(nums)):
if nums[i] != 1:
continue
if prev != -1:
result = (result*(i-prev))%MOD
prev = i
return result if prev != -1 else 0
| Solution |
python | squidfunk__mkdocs-material | material/plugins/tags/structure/tag/options.py | {
"start": 1478,
"end": 3916
} | class ____(BaseConfigOption[Set[Tag]]):
"""
Setting for a set of tags.
This setting describes a set of tags, and is used to validate the actual
tags as defined in the front matter of pages, as well as for filters that
are used to include or exclude pages from a listing and to check if a tag
is allowed to be used.
"""
def __init__(self, *, allowed: set[Tag] = set()):
"""
Initialize the setting.
Arguments:
allowed: The tags allowed to be used.
"""
super().__init__()
self.allowed = allowed
# -------------------------------------------------------------------------
allowed: set[Tag]
"""
The tags allowed to be used.
"""
# -------------------------------------------------------------------------
def run_validation(self, value: object) -> set[Tag]:
"""
Validate list of tags.
If the value is `None`, an empty set is returned. Otherwise, the value
is expected to be a list of tags, which is converted to a set of tags.
This means that tags are automatically deduplicated. Note that tags are
not expanded here, as the set is intended to be checked exactly.
Arguments:
value: The value to validate.
Returns:
A set of tags.
"""
if value is None:
return set()
# Ensure tags are iterable
if not isinstance(value, Iterable) or isinstance(value, str):
raise ValidationError(
f"Expected iterable tags, but received: {value}"
)
# Ensure tags are valid
tags: set[Tag] = set()
for index, tag in enumerate(value):
if not isinstance(tag, (str, int, float, bool)):
raise ValidationError(
f"Expected a {str}, {int}, {float} or {bool} "
f"but received: {type(tag)} at index {index}"
)
# Coerce tag to string and add to set
tags.add(Tag(str(tag)))
# Ensure tags are in allow list, if any
if self.allowed:
invalid = tags.difference(self.allowed)
if invalid:
raise ValidationError(
"Tags not in allow list: " +
",".join([tag.name for tag in invalid])
)
# Return set of tags
return tags
| TagSet |
python | joke2k__faker | faker/providers/person/it_IT/__init__.py | {
"start": 44,
"end": 32681
} | class ____(PersonProvider):
formats_male = (
"{{first_name_male}} {{last_name}}",
"{{first_name_male}} {{last_name}}",
"{{first_name_male}} {{last_name}}",
"{{first_name_male}} {{last_name}}",
"{{first_name_male}} {{last_name}}-{{last_name}}",
"{{prefix_male}} {{first_name_male}} {{last_name}}",
)
formats_female = (
"{{first_name_female}} {{last_name}}",
"{{first_name_female}} {{last_name}}",
"{{first_name_female}} {{last_name}}",
"{{first_name_female}} {{last_name}}",
"{{first_name_female}} {{last_name}}-{{last_name}}",
"{{prefix_female}} {{first_name_female}} {{last_name}}",
)
formats = formats_male + formats_female
# source: https://en.wikipedia.org/w/index.php?title=Category:Italian_masculine_given_names
first_names_male = (
"Achille",
"Adamo",
"Adelmo",
"Adriano",
"Agnolo",
"Agostino",
"Alberico",
"Alberto",
"Alderano",
"Aldo",
"Alessandro",
"Alessio",
"Alfio",
"Alfredo",
"Alphons",
"Amadeo",
"Amedeo",
"Amico",
"Amleto",
"Angelo",
"Annibale",
"Ansaldo",
"Antonello",
"Antonino",
"Antonio",
"Armando",
"Arnaldo",
"Arnulfo",
"Arsenio",
"Arturo",
"Atenulf",
"Augusto",
"Azeglio",
"Baccio",
"Baldassare",
"Bartolomeo",
"Benedetto",
"Benito",
"Benvenuto",
"Beppe",
"Bernardo",
"Biagio",
"Bruno",
"Calcedonio",
"Calogero",
"Camillo",
"Carlo",
"Carmelo",
"Cesare",
"Cipriano",
"Cirillo",
"Ciro",
"Claudio",
"Coluccio",
"Coriolano",
"Corrado",
"Costantino",
"Costanzo",
"Damiano",
"Daniele",
"Danilo",
"Dante",
"Dario",
"Delfino",
"Dino",
"Dionigi",
"Domenico",
"Donatello",
"Donato",
"Durante",
"Edoardo",
"Elladio",
"Elmo",
"Emilio",
"Ennio",
"Enrico",
"Enzio",
"Enzo",
"Eraldo",
"Ermanno",
"Ermenegildo",
"Ermes",
"Ernesto",
"Ettore",
"Ezio",
"Fabio",
"Fabrizio",
"Fausto",
"Fedele",
"Federico",
"Federigo",
"Ferdinando",
"Filippo",
"Fiorenzo",
"Fiorino",
"Flavio",
"Francesco",
"Franco",
"Fredo",
"Fulvio",
"Gabriele",
"Gaetano",
"Galasso",
"Gaspare",
"Gastone",
"Geronimo",
"Giacinto",
"Giacobbe",
"Giacomo",
"Giampaolo",
"Giampiero",
"Gian",
"Giancarlo",
"Gianfrancesco",
"Gianfranco",
"Gianluca",
"Gianluigi",
"Gianmarco",
"Gianni",
"Gianpaolo",
"Gianpietro",
"Gilberto",
"Gino",
"Gioacchino",
"Gioachino",
"Gioele",
"Gioffre",
"Gionata",
"Giorgio",
"Giosuè",
"Giovanni",
"Girolamo",
"Giuliano",
"Giulio",
"Giuseppe",
"Giustino",
"Goffredo",
"Graziano",
"Greco",
"Guarino",
"Guglielmo",
"Guido",
"Gustavo",
"Hugo",
"Ignazio",
"Ippazio",
"Ivan",
"Ivo",
"Jacopo",
"Lamberto",
"Lando",
"Laureano",
"Lazzaro",
"Leonardo",
"Leone",
"Leopoldo",
"Liberto",
"Livio",
"Lodovico",
"Lorenzo",
"Luca",
"Luchino",
"Luciano",
"Lucio",
"Ludovico",
"Luigi",
"Manuel",
"Marcantonio",
"Marcello",
"Marco",
"Mariano",
"Mario",
"Martino",
"Martino",
"Massimiliano",
"Massimo",
"Matteo",
"Mattia",
"Maurilio",
"Maurizio",
"Mauro",
"Michelangelo",
"Michele",
"Micheletto",
"Michelotto",
"Milo",
"Mirco",
"Mirko",
"Nanni",
"Napoleone",
"Niccolò",
"Nico",
"Nicola",
"Nicolò",
"Nino",
"Orazio",
"Oreste",
"Orlando",
"Osvaldo",
"Ottavio",
"Ottone",
"Panfilo",
"Paolo",
"Paride",
"Pasqual",
"Pasquale",
"Patrizio",
"Pellegrino",
"Pier",
"Pierangelo",
"Piergiorgio",
"Piergiuseppe",
"Pierluigi",
"Piermaria",
"Piero",
"Pierpaolo",
"Piersanti",
"Pietro",
"Pompeo",
"Pomponio",
"Puccio",
"Raffaele",
"Raffaellino",
"Raffaello",
"Raimondo",
"Ranieri",
"Rembrandt",
"Renzo",
"Riccardo",
"Ricciotti",
"Roberto",
"Rocco",
"Rodolfo",
"Rolando",
"Roman",
"Romeo",
"Romolo",
"Ronaldo",
"Rosario",
"Ruggero",
"Ruggiero",
"Sabatino",
"Salvatore",
"Salvi",
"Sandro",
"Sante",
"Santino",
"Saverio",
"Sebastiano",
"Sergius",
"Severino",
"Silvestro",
"Silvio",
"Simone",
"Stefano",
"Telemaco",
"Temistocle",
"Tiziano",
"Toni",
"Tonino",
"Torquato",
"Tullio",
"Ubaldo",
"Uberto",
"Ugo",
"Ugolino",
"Umberto",
"Valerio",
"Venancio",
"Vincentio",
"Vincenzo",
"Virgilio",
"Vito",
"Vittorio",
)
# source: https://en.wikipedia.org/wiki/Category:Italian_feminine_given_names
first_names_female = (
"Adelasia",
"Adele",
"Adriana",
"Alessandra",
"Alessia",
"Alina",
"Allegra",
"Amalia",
"Amanda",
"Angelica",
"Angelina",
"Anita",
"Annalisa",
"Annamaria",
"Annetta",
"Annunziata",
"Antonella",
"Antonia",
"Antonietta",
"Antonina",
"Aria",
"Aurora",
"Barbara",
"Beatrice",
"Berenice",
"Bettina",
"Bianca",
"Bianca",
"Camilla",
"Carla",
"Carolina",
"Cassandra",
"Caterina",
"Cecilia",
"Chiara",
"Claudia",
"Clelia",
"Concetta",
"Cristina",
"Daria",
"Diana",
"Dina",
"Dolores",
"Donatella",
"Donna",
"Eleanora",
"Elena",
"Eliana",
"Elisa",
"Elvira",
"Emma",
"Erika",
"Etta",
"Eugenia",
"Eva",
"Evangelista",
"Fabia",
"Fabrizia",
"Federica",
"Fernanda",
"Fiamma",
"Filippa",
"Flavia",
"Flora",
"Fortunata",
"Francesca",
"Gabriella",
"Gelsomina",
"Gemma",
"Germana",
"Giada",
"Gianna",
"Giorgia",
"Giovanna",
"Giulia",
"Giuliana",
"Giulietta",
"Giuseppina",
"Gloria",
"Graziella",
"Greca",
"Griselda",
"Ida",
"Ilaria",
"Imelda",
"Iolanda",
"Irma",
"Isa",
"Isabella",
"Jolanda",
"Lara",
"Laura",
"Lauretta",
"Letizia",
"Liana",
"Licia",
"Lidia",
"Liliana",
"Lilla",
"Lina",
"Lisa",
"Livia",
"Lolita",
"Loredana",
"Loretta",
"Lucia",
"Luciana",
"Lucrezia",
"Ludovica",
"Luigina",
"Luisa",
"Marcella",
"Margherita",
"Maria",
"Maria",
"Maria",
"Mariana",
"Marina",
"Marisa",
"Marissa",
"Marta",
"Martina",
"Matilda",
"Maura",
"Melania",
"Melina",
"Melissa",
"Mercedes",
"Michela",
"Milena",
"Monica",
"Morena",
"Nadia",
"Natalia",
"Nedda",
"Nicoletta",
"Nina",
"Ninetta",
"Olga",
"Ornella",
"Paloma",
"Paola",
"Paoletta",
"Patrizia",
"Paulina",
"Pierina",
"Pina",
"Priscilla",
"Raffaella",
"Ramona",
"Renata",
"Rita",
"Roberta",
"Romana",
"Romina",
"Rosa",
"Rosalia",
"Rosaria",
"Rosina",
"Rossana",
"Sandra",
"Serafina",
"Serena",
"Silvia",
"Simonetta",
"Sole",
"Sonia",
"Sophia",
"Stefani",
"Stefania",
"Stella",
"Susanna",
"Sylvia",
"Tatiana",
"Teresa",
"Tina",
"Tiziana",
"Tonia",
"Valentina",
"Valeria",
"Vanessa",
"Veronica",
"Victoria",
"Vincenza",
"Virginia",
"Viridiana",
"Vittoria",
"Zaira",
)
first_names = first_names_male + first_names_female
# source: https://en.wiktionary.org/w/index.php?title=Category:Italian_surnames
last_names = (
"Abatantuono",
"Abate",
"Abba",
"Abbagnale",
"Accardo",
"Acerbi",
"Adinolfi",
"Agazzi",
"Agnesi",
"Agostinelli",
"Agostini",
"Ajello",
"Albertini",
"Alboni",
"Aldobrandi",
"Alfieri",
"Alfonsi",
"Alighieri",
"Almagià",
"Aloisio",
"Alonzi",
"Altera",
"Amaldi",
"Amato",
"Ammaniti",
"Anastasi",
"Andreotti",
"Andreozzi",
"Angeli",
"Angiolello",
"Anguillara",
"Anguissola",
"Anichini",
"Antelami",
"Antonacci",
"Antonelli",
"Antonello",
"Antonetti",
"Antonini",
"Antonioni",
"Antonucci",
"Aporti",
"Argan",
"Argentero",
"Argenti",
"Argento",
"Argurio",
"Ariasso",
"Ariosto",
"Armani",
"Armellini",
"Asmundo",
"Asprucci",
"Aulenti",
"Avogadro",
"Babati",
"Babato",
"Babbo",
"Bacosi",
"Badoer",
"Badoglio",
"Baggio",
"Baglioni",
"Bajamonti",
"Bajardi",
"Balbi",
"Balbo",
"Balla",
"Balotelli",
"Bandello",
"Baracca",
"Barbarigo",
"Barberini",
"Barcaccia",
"Barcella",
"Barese",
"Baresi",
"Barillaro",
"Baroffio",
"Barozzi",
"Barracco",
"Barsanti",
"Bartoli",
"Barzini",
"Basadonna",
"Bassi",
"Basso",
"Bataglia",
"Battaglia",
"Battelli",
"Battisti",
"Bazzi",
"Beccaria",
"Beccheria",
"Beffa",
"Belletini",
"Bellini",
"Bellocchio",
"Bellucci",
"Bellò",
"Bembo",
"Benedetti",
"Benigni",
"Benussi",
"Berengario",
"Bergoglio",
"Berlusconi",
"Bernardi",
"Bernardini",
"Bernetti",
"Bernini",
"Berrè",
"Bersani",
"Bertoli",
"Bertolucci",
"Bertoni",
"Bettin",
"Bettoni",
"Bevilacqua",
"Biagi",
"Biagiotti",
"Bianchi",
"Bianchini",
"Bignami",
"Bignardi",
"Binaghi",
"Bixio",
"Blasi",
"Boaga",
"Bocca",
"Boccaccio",
"Boccherini",
"Boccioni",
"Bocelli",
"Bodoni",
"Boezio",
"Boiardo",
"Boitani",
"Boito",
"Boldù",
"Bombieri",
"Bompiani",
"Bonanno",
"Bonatti",
"Bonaventura",
"Bondumier",
"Bongiorno",
"Bonino",
"Bonolis",
"Bonomo",
"Borghese",
"Borgia",
"Borrani",
"Borromeo",
"Borromini",
"Borroni",
"Borsellino",
"Borsiere",
"Borzomì",
"Bosio",
"Bossi",
"Bosurgi",
"Botta",
"Bottaro",
"Botticelli",
"Bottigliero",
"Bova",
"Bragadin",
"Bragaglia",
"Bramante",
"Brambilla",
"Brancaccio",
"Branciforte",
"Brenna",
"Bresciani",
"Briccialdi",
"Brichese",
"Broggini",
"Broschi",
"Brugnaro",
"Brunelleschi",
"Brunello",
"Bruno",
"Bruscantini",
"Bulzoni",
"Buonauro",
"Burcardo",
"Buscetta",
"Busoni",
"Cabibbo",
"Caboto",
"Cabrini",
"Caccianemico",
"Caccioppoli",
"Cadorna",
"Caetani",
"Cafarchia",
"Caffarelli",
"Cagnin",
"Cagnotto",
"Cainero",
"Caironi",
"Calarco",
"Calbo",
"Calgari",
"Callegari",
"Callegaro",
"Calvo",
"Camanni",
"Camicione",
"Camilleri",
"Camiscione",
"Cammarata",
"Campanella",
"Campano",
"Campise",
"Camuccini",
"Canali",
"Canetta",
"Canevascini",
"Canil",
"Cannizzaro",
"Canova",
"Cantimori",
"Capecchi",
"Capone",
"Cappelli",
"Capuana",
"Caracciolo",
"Cardano",
"Carducci",
"Carfagna",
"Carli",
"Carnera",
"Carocci",
"Carosone",
"Carpaccio",
"Carriera",
"Carullo",
"Caruso",
"Casadei",
"Casagrande",
"Casale",
"Casaleggio",
"Casalodi",
"Casarin",
"Casellati",
"Casini",
"Cassarà",
"Castelli",
"Castellitto",
"Castiglione",
"Castioni",
"Catalano",
"Catenazzi",
"Cattaneo",
"Cavalcanti",
"Cavanna",
"Ceci",
"Celentano",
"Cendron",
"Ceravolo",
"Ceri",
"Cerquiglini",
"Cerutti",
"Cesaroni",
"Cesarotti",
"Ceschi",
"Chechi",
"Cheda",
"Cherubini",
"Chiappetta",
"Chiaramonte",
"Chiesa",
"Chigi",
"Chindamo",
"Chinnici",
"Chittolini",
"Ciampi",
"Cianciolo",
"Ciani",
"Cibin",
"Cicala",
"Cicilia",
"Cignaroli",
"Cilea",
"Cilibrasi",
"Cimarosa",
"Cimini",
"Cipolla",
"Civaschi",
"Coardi",
"Cocci",
"Cociarelli",
"Colletti",
"Collina",
"Collodi",
"Columbo",
"Combi",
"Comboni",
"Comencini",
"Comeriato",
"Comisso",
"Comolli",
"Condoleo",
"Contarini",
"Conte",
"Conti",
"Contrafatto",
"Coppola",
"Corbo",
"Corcos",
"Corradi",
"Correr",
"Cortese",
"Cossiga",
"Costalonga",
"Costanzi",
"Cremonesi",
"Crespi",
"Crisafulli",
"Crispi",
"Cristoforetti",
"Cuda",
"Cugia",
"Cundari",
"Cuomo",
"Curatoli",
"Curci",
"Curiel",
"Cusano",
"Cutrufo",
"Cutuli",
"Cuzzocrea",
"Dalla",
"Dallapé",
"Dallara",
"Dandolo",
"Deledda",
"Delle",
"Dellucci",
"Depero",
"Desio",
"Detti",
"Dibiasi",
"Disdero",
"Doglioni",
"Donarelli",
"Donati",
"Donatoni",
"Donini",
"Donà",
"Doria",
"Dossetti",
"Dossi",
"Dovara",
"Draghi",
"Druso",
"Dulbecco",
"Duodo",
"Durante",
"Duse",
"Eco",
"Einaudi",
"Emanuelli",
"Emo",
"Endrizzi",
"Errani",
"Errigo",
"Esposito",
"Fabbri",
"Fabrizi",
"Faggiani",
"Fagiani",
"Fagotto",
"Falcone",
"Falier",
"Fallaci",
"Falloppio",
"Fantini",
"Fantoni",
"Fantozzi",
"Fanucci",
"Faranda",
"Farina",
"Farinelli",
"Farnese",
"Fattori",
"Faugno",
"Favata",
"Federici",
"Federico",
"Fermi",
"Ferrabosco",
"Ferragamo",
"Ferragni",
"Ferrante",
"Ferrara",
"Ferrari",
"Ferraris",
"Ferrata",
"Ferrazzi",
"Ferretti",
"Ferrucci",
"Fibonacci",
"Ficino",
"Fieramosca",
"Filangieri",
"Filippelli",
"Filippini",
"Filogamo",
"Filzi",
"Finetti",
"Finotto",
"Finzi",
"Fioravanti",
"Fiorucci",
"Fischetti",
"Fittipaldi",
"Flaiano",
"Florio",
"Fo",
"Foa",
"Foconi",
"Fogazzaro",
"Foletti",
"Folliero",
"Fornaciari",
"Forza",
"Foscari",
"Foà",
"Fracci",
"Franceschi",
"Franscini",
"Franzese",
"Frescobaldi",
"Fusani",
"Fuseli",
"Gabba",
"Gabbana",
"Gabrieli",
"Gadda",
"Gaggini",
"Gagliano",
"Gagliardi",
"Gaiatto",
"Gaito",
"Galeati",
"Galiazzo",
"Galilei",
"Galtarossa",
"Galuppi",
"Galvani",
"Gangemi",
"Gargallo",
"Garibaldi",
"Garobbio",
"Garozzo",
"Garrone",
"Garzoni",
"Gasperi",
"Gatto",
"Gelli",
"Gemito",
"Gentileschi",
"Gentili",
"Gentilini",
"Geraci",
"Germano",
"Giacconi",
"Giacometti",
"Giammusso",
"Gianetti",
"Gianinazzi",
"Giannelli",
"Giannetti",
"Giannini",
"Giannone",
"Giannotti",
"Giannuzzi",
"Gianvecchio",
"Gibilisco",
"Gigli",
"Gilardoni",
"Ginese",
"Ginesio",
"Gioberti",
"Giolitti",
"Giorgetti",
"Giovine",
"Giradello",
"Giulietti",
"Giunti",
"Giusti",
"Goldoni",
"Goldstein",
"Golgi",
"Golino",
"Gonzaga",
"Gori",
"Gottardi",
"Gotti",
"Govoni",
"Gozzano",
"Gozzi",
"Gradenigo",
"Gramsci",
"Granatelli",
"Grassi",
"Grasso",
"Gravina",
"Greco",
"Greggio",
"Gregori",
"Gregorio",
"Gremese",
"Grifeo",
"Grimani",
"Grisoni",
"Gritti",
"Grossi",
"Gualandi",
"Gualtieri",
"Guarana",
"Guarato",
"Guariento",
"Guarneri",
"Gucci",
"Guglielmi",
"Guicciardini",
"Guidone",
"Guidotti",
"Guinizzelli",
"Gullotta",
"Gulotta",
"Gussoni",
"Iacobucci",
"Iacovelli",
"Iadanza",
"Iannelli",
"Iannotti",
"Iannucci",
"Iannuzzi",
"Impastato",
"Infantino",
"Innocenti",
"Interiano",
"Interminei",
"Interminelli",
"Inzaghi",
"Ioppi",
"Jacuzzi",
"Jilani",
"Jovinelli",
"Juvara",
"Lamborghini",
"Lancisi",
"Lanfranchi",
"Lattuada",
"Leblanc",
"Legnante",
"Leonardi",
"Leoncavallo",
"Leone",
"Leonetti",
"Leopardi",
"Lercari",
"Lerner",
"Letta",
"Lettiere",
"Ligorio",
"Liguori",
"Lippomano",
"Littizzetto",
"Liverotti",
"Lollobrigida",
"Lombardi",
"Lombardo",
"Lombroso",
"Longhena",
"Lopresti",
"Loredan",
"Lovato",
"Lucarelli",
"Lucchesi",
"Lucciano",
"Luciani",
"Ludovisi",
"Luna",
"Lupo",
"Luria",
"Lussu",
"Luxardo",
"Luzi",
"Maccanelli",
"Maderna",
"Maderno",
"Maffei",
"Maggioli",
"Maglio",
"Magnani",
"Magrassi",
"Majewski",
"Majorana",
"Malacarne",
"Malaparte",
"Malatesta",
"Malenchini",
"Malipiero",
"Malpighi",
"Manacorda",
"Mancini",
"Mannoia",
"Manolesso",
"Mantegazza",
"Mantegna",
"Manunta",
"Manzoni",
"Marangoni",
"Marazzi",
"Marcacci",
"Marconi",
"Marenzio",
"Marinetti",
"Marini",
"Marino",
"Marrone",
"Marsili",
"Martinelli",
"Martucci",
"Marzorati",
"Mascagni",
"Mascheroni",
"Maspero",
"Mastandrea",
"Mastroianni",
"Mattarella",
"Matteotti",
"Mazzacurati",
"Mazzanti",
"Mazzeo",
"Mazzi",
"Mazzini",
"Mazzocchi",
"Medici",
"Mengolo",
"Mennea",
"Mercadante",
"Mercalli",
"Mercantini",
"Mercati",
"Merisi",
"Metella",
"Meucci",
"Mezzetta",
"Micca",
"Michelangeli",
"Micheletti",
"Migliaccio",
"Milanesi",
"Mimun",
"Miniati",
"Missoni",
"Moccia",
"Mocenigo",
"Modiano",
"Modigliani",
"Modugno",
"Mogherini",
"Molesini",
"Monaco",
"Mondadori",
"Mondaini",
"Monduzzi",
"Moneta",
"Monicelli",
"Montalcini",
"Montalti",
"Montanari",
"Montanariello",
"Montanelli",
"Monte",
"Montecchi",
"Montesano",
"Montessori",
"Monteverdi",
"Monti",
"Morabito",
"Morandi",
"Morandini",
"Morellato",
"Moresi",
"Moretti",
"Morgagni",
"Morlacchi",
"Morosini",
"Morpurgo",
"Morricone",
"Morrocco",
"Mortati",
"Morucci",
"Moschino",
"Mozart",
"Munari",
"Muratori",
"Murialdo",
"Murri",
"Musatti",
"Muti",
"Naccari",
"Nadi",
"Napolitano",
"Natta",
"Navarria",
"Navone",
"Necci",
"Nibali",
"Nicoletti",
"Nicolini",
"Nicolucci",
"Nievo",
"Niggli",
"Niscoromni",
"Nitti",
"Nitto",
"Nolcini",
"Nonis",
"Norbiato",
"Nordio",
"Nosiglia",
"Notarbartolo",
"Novaro",
"Nugnes",
"Odescalchi",
"Offredi",
"Oliboni",
"Olivetti",
"Omma",
"Onio",
"Onisto",
"Opizzi",
"Orengo",
"Orlando",
"Orsini",
"Ortese",
"Ortolani",
"Oscuro",
"Ossani",
"Ossola",
"Ostinelli",
"Ottino",
"Ovadia",
"Pace",
"Pacelli",
"Pacetti",
"Pacillo",
"Pacomio",
"Padovano",
"Paganini",
"Pagliaro",
"Pagnotto",
"Palazzo",
"Palladio",
"Palmisano",
"Palombi",
"Paltrinieri",
"Palumbo",
"Panatta",
"Panicucci",
"Panzera",
"Paoletti",
"Paolini",
"Paolucci",
"Papafava",
"Papetti",
"Pareto",
"Parini",
"Parisi",
"Parmitano",
"Parpinel",
"Parri",
"Paruta",
"Pascarella",
"Pasolini",
"Pasqua",
"Passalacqua",
"Pastine",
"Pausini",
"Pavanello",
"Pavarotti",
"Pavone",
"Peano",
"Pederiva",
"Pedersoli",
"Pedrazzini",
"Pedroni",
"Pellegrini",
"Pelli",
"Pellico",
"Pennetta",
"Pepe",
"Peranda",
"Pergolesi",
"Perini",
"Perozzo",
"Persico",
"Pertile",
"Pertini",
"Peruzzi",
"Petralli",
"Petrassi",
"Petrocelli",
"Petrucci",
"Petrucelli",
"Petruzzi",
"Pezzali",
"Piacentini",
"Piane",
"Piazzi",
"Piccinni",
"Piccio",
"Pietrangeli",
"Pigafetta",
"Pignatti",
"Pinamonte",
"Pincherle",
"Pininfarina",
"Piovani",
"Pirandello",
"Pirelli",
"Pisacane",
"Pisani",
"Pisano",
"Pisaroni",
"Pistoletto",
"Pizzamano",
"Pizzetti",
"Pizziol",
"Pizzo",
"Platini",
"Poerio",
"Polani",
"Polesel",
"Polizzi",
"Pometta",
"Pontecorvo",
"Ponti",
"Porcellato",
"Porzio",
"Pozzecco",
"Prada",
"Praga",
"Pratesi",
"Prati",
"Priuli",
"Procacci",
"Prodi",
"Proietti",
"Pucci",
"Puccini",
"Pugliese",
"Puglisi",
"Pulci",
"Quasimodo",
"Querini",
"Raimondi",
"Ramazzotti",
"Randazzo",
"Rapisardi",
"Rastelli",
"Raurica",
"Ravaglioli",
"Redi",
"Regge",
"Renault",
"Renier",
"Rensi",
"Renzi",
"Respighi",
"Riccardi",
"Riccati",
"Ricci",
"Ricciardi",
"Ricolfi",
"Rienzo",
"Righi",
"Rinaldi",
"Rismondo",
"Ritacca",
"Rizzo",
"Rizzoli",
"Rocca",
"Roccabonella",
"Roero",
"Romagnoli",
"Romano",
"Romiti",
"Roncalli",
"Rosiello",
"Rosmini",
"Rosselli",
"Rossellini",
"Rossetti",
"Rossi",
"Rossini",
"Roth",
"Rubbia",
"Ruberto",
"Ruffini",
"Ruggeri",
"Ruggieri",
"Russo",
"Rusticucci",
"Sabatini",
"Sabbatini",
"Saffi",
"Sagese",
"Sagnelli",
"Sagredo",
"Salandra",
"Salata",
"Salgari",
"Salieri",
"Salvemini",
"Salvini",
"Salvo",
"Samele",
"Sandi",
"Sanguineti",
"Sansoni",
"Santi",
"Santorio",
"Santoro",
"Sanudo",
"Saraceno",
"Saracino",
"Saragat",
"Satriani",
"Satta",
"Sauli",
"Sauro",
"Savorgnan",
"Sbarbaro",
"Scaduto",
"Scalera",
"Scalfaro",
"Scamarcio",
"Scandone",
"Scaramucci",
"Scarfoglio",
"Scarlatti",
"Scarpa",
"Scarpetta",
"Scarponi",
"Schiaparelli",
"Schiavo",
"Schiavone",
"Schicchi",
"Scialpi",
"Scotti",
"Scotto",
"Seddio",
"Segni",
"Segrè",
"Semitecolo",
"Serao",
"Serlupi",
"Sermonti",
"Serraglio",
"Sforza",
"Sgalambro",
"Sgarbi",
"Sibilia",
"Siffredi",
"Silvestri",
"Simeoni",
"Sinisi",
"Sismondi",
"Smirnoff",
"Sobrero",
"Soderini",
"Soffici",
"Sokolov",
"Solari",
"Solimena",
"Sollima",
"Sommaruga",
"Sonnino",
"Soprano",
"Soranzo",
"Sordi",
"Sorrentino",
"Spadafora",
"Spallanzani",
"Spanevello",
"Speri",
"Spinelli",
"Spinola",
"Squarcione",
"Sraffa",
"Staglieno",
"Stefanelli",
"Stein",
"Stoppani",
"Storladi",
"Stradivari",
"Strangio",
"Stucchi",
"Surian",
"Tacchini",
"Taccola",
"Tafuri",
"Tagliafierro",
"Taliani",
"Taliercio",
"Tamborini",
"Tamburello",
"Tamburi",
"Tamburini",
"Tanzini",
"Tarantini",
"Tarantino",
"Tarchetti",
"Tartaglia",
"Tartini",
"Tasca",
"Tasso",
"Tassoni",
"Tebaldi",
"Tedesco",
"Telesio",
"Tencalla",
"Terragni",
"Tiepolo",
"Tirabassi",
"Togliatti",
"Tognazzi",
"Toldo",
"Tolentino",
"Tomaselli",
"Tomasetti",
"Tomasini",
"Tomei",
"Tommaseo",
"Toninelli",
"Tonisto",
"Torlonia",
"Tornatore",
"Torricelli",
"Toscani",
"Toscanini",
"Toselli",
"Tosi",
"Toso",
"Tosto",
"Totino",
"Tozzi",
"Tozzo",
"Traetta",
"Trapanese",
"Trapani",
"Travaglia",
"Travaglio",
"Traversa",
"Travia",
"Trebbi",
"Treccani",
"Tremonti",
"Trentin",
"Trentini",
"Tresoldi",
"Treves",
"Trevisan",
"Trevisani",
"Trezzini",
"Trillini",
"Trincavelli",
"Trobbiani",
"Troisi",
"Trombetta",
"Tron",
"Tropea",
"Trotta",
"Trupiano",
"Trussardi",
"Turati",
"Turchetta",
"Turchi",
"Turci",
"Turrini",
"Tutino",
"Tuzzolino",
"Ubaldi",
"Udinese",
"Udinesi",
"Ughi",
"Ungaretti",
"Valentino",
"Valguarnera",
"Valier",
"Valmarana",
"Vanvitelli",
"Varano",
"Vasari",
"Vattimo",
"Vecellio",
"Vecoli",
"Veltroni",
"Vendetti",
"Venditti",
"Veneziano",
"Venier",
"Vento",
"Venturi",
"Vercelloni",
"Verdi",
"Verdone",
"Verga",
"Vergassola",
"Vergerio",
"Verri",
"Versace",
"Vespa",
"Vespucci",
"Vezzali",
"Vianello",
"Vidoni",
"Vigliotti",
"Vigorelli",
"Villadicani",
"Villarosa",
"Viola",
"Virgilio",
"Visconti",
"Visintini",
"Vismara",
"Vittadello",
"Vitturi",
"Vivaldi",
"Viviani",
"Volta",
"Volterra",
"Zabarella",
"Zaccagnini",
"Zaccardo",
"Zacchia",
"Zacco",
"Zaguri",
"Zamengo",
"Zamorani",
"Zampa",
"Zanazzo",
"Zanichelli",
"Zanzi",
"Zarlino",
"Zecchini",
"Zeffirelli",
"Zetticci",
"Ziani",
"Zichichi",
"Zito",
"Zola",
"Zoppetti",
"Zoppetto",
)
prefixes_female = ("Dott.", "Sig.ra")
prefixes_male = ("Dott.", "Sig.")
prefixes = ("Dott.", "Sig.", "Sig.ra")
| Provider |
python | numba__numba | numba/core/types/iterators.py | {
"start": 1029,
"end": 1771
} | class ____(SimpleIteratorType):
"""
Type class for Numba-compiled generator objects.
"""
def __init__(self, gen_func, yield_type, arg_types, state_types,
has_finalizer):
self.gen_func = gen_func
self.arg_types = tuple(arg_types)
self.state_types = tuple(state_types)
self.has_finalizer = has_finalizer
name = "%s generator(func=%s, args=%s, has_finalizer=%s)" % (
yield_type, self.gen_func, self.arg_types,
self.has_finalizer)
super(Generator, self).__init__(name, yield_type)
@property
def key(self):
return (self.gen_func, self.arg_types, self.yield_type,
self.has_finalizer, self.state_types)
| Generator |
python | cython__cython | tests/run/test_templatelib.py | {
"start": 631,
"end": 3364
} | class ____:
def assertInterpolationEqual(self, i, exp):
"""Test Interpolation equality.
The *i* argument must be an Interpolation instance.
The *exp* argument must be a tuple of the form
(value, expression, conversion, format_spec) where the final three
items may be omitted and are assumed to be '', None and '' respectively.
"""
if len(exp) == 4:
actual = (i.value, i.expression, i.conversion, i.format_spec)
self.assertEqual(actual, exp)
elif len(exp) == 3:
self.assertEqual((i.value, i.expression, i.conversion), exp)
self.assertEqual(i.format_spec, "")
elif len(exp) == 2:
self.assertEqual((i.value, i.expression), exp)
self.assertEqual(i.conversion, None)
self.assertEqual(i.format_spec, "")
elif len(exp) == 1:
self.assertEqual((i.value,), exp)
self.assertEqual(i.expression, "")
self.assertEqual(i.conversion, None)
self.assertEqual(i.format_spec, "")
def assertTStringEqual(self, t, strings, interpolations):
"""Test template string literal equality.
The *strings* argument must be a tuple of strings equal to *t.strings*.
The *interpolations* argument must be a sequence of tuples which are
compared against *t.interpolations*. Each tuple must match the form
described in the `assertInterpolationEqual` method.
"""
self.assertEqual(t.strings, strings)
self.assertEqual(len(t.interpolations), len(interpolations))
if len(t.interpolations) != len(interpolations):
# Handle Python <3.10 which doesn't have strict in zip
raise ValueError(f"Lengths differ {len(t.interpolations)} {len(interpolations)}")
for i, exp in zip(t.interpolations, interpolations):
self.assertInterpolationEqual(i, exp)
def _convert(value, conversion):
if conversion == "a":
return ascii(value)
elif conversion == "r":
return repr(value)
elif conversion == "s":
return str(value)
return value
def fstring(template):
parts = []
for item in template:
# adapted from match/case since we don't yet support it
if isinstance(item, str):
parts.append(item)
elif isinstance(item, Interpolation):
value = item.value
conversion = item.conversion
format_spec = item.format_spec
value = _convert(value, conversion)
value = format(value, format_spec)
parts.append(value)
return "".join(parts)
############ from test_templatelib.py ###############
| TStringBaseCase |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/pool/impl.py | {
"start": 10433,
"end": 14795
} | class ____(Pool):
"""A Pool that maintains one connection per thread.
Maintains one connection per each thread, never moving a connection to a
thread other than the one which it was created in.
.. warning:: the :class:`.SingletonThreadPool` will call ``.close()``
on arbitrary connections that exist beyond the size setting of
``pool_size``, e.g. if more unique **thread identities**
than what ``pool_size`` states are used. This cleanup is
non-deterministic and not sensitive to whether or not the connections
linked to those thread identities are currently in use.
:class:`.SingletonThreadPool` may be improved in a future release,
however in its current status it is generally used only for test
scenarios using a SQLite ``:memory:`` database and is not recommended
for production use.
The :class:`.SingletonThreadPool` class **is not compatible** with asyncio
and :func:`_asyncio.create_async_engine`.
Options are the same as those of :class:`_pool.Pool`, as well as:
:param pool_size: The number of threads in which to maintain connections
at once. Defaults to five.
:class:`.SingletonThreadPool` is used by the SQLite dialect
automatically when a memory-based database is used.
See :ref:`sqlite_toplevel`.
"""
_is_asyncio = False
def __init__(
self,
creator: Union[_CreatorFnType, _CreatorWRecFnType],
pool_size: int = 5,
**kw: Any,
):
Pool.__init__(self, creator, **kw)
self._conn = threading.local()
self._fairy = threading.local()
self._all_conns: Set[ConnectionPoolEntry] = set()
self.size = pool_size
def recreate(self) -> SingletonThreadPool:
self.logger.info("Pool recreating")
return self.__class__(
self._creator,
pool_size=self.size,
recycle=self._recycle,
echo=self.echo,
pre_ping=self._pre_ping,
logging_name=self._orig_logging_name,
reset_on_return=self._reset_on_return,
_dispatch=self.dispatch,
dialect=self._dialect,
)
def _transfer_from(
self, other_singleton_pool: SingletonThreadPool
) -> None:
# used by the test suite to make a new engine / pool without
# losing the state of an existing SQLite :memory: connection
assert not hasattr(other_singleton_pool._fairy, "current")
self._conn = other_singleton_pool._conn
self._all_conns = other_singleton_pool._all_conns
def dispose(self) -> None:
"""Dispose of this pool."""
for conn in self._all_conns:
try:
conn.close()
except Exception:
# pysqlite won't even let you close a conn from a thread
# that didn't create it
pass
self._all_conns.clear()
def _cleanup(self) -> None:
while len(self._all_conns) >= self.size:
c = self._all_conns.pop()
c.close()
def status(self) -> str:
return "SingletonThreadPool id:%d size: %d" % (
id(self),
len(self._all_conns),
)
def _do_return_conn(self, record: ConnectionPoolEntry) -> None:
try:
del self._fairy.current
except AttributeError:
pass
def _do_get(self) -> ConnectionPoolEntry:
try:
if TYPE_CHECKING:
c = cast(ConnectionPoolEntry, self._conn.current())
else:
c = self._conn.current()
if c:
return c
except AttributeError:
pass
c = self._create_connection()
self._conn.current = weakref.ref(c)
if len(self._all_conns) >= self.size:
self._cleanup()
self._all_conns.add(c)
return c
def connect(self) -> PoolProxiedConnection:
# vendored from Pool to include the now removed use_threadlocal
# behavior
try:
rec = cast(_ConnectionFairy, self._fairy.current())
except AttributeError:
pass
else:
if rec is not None:
return rec._checkout_existing()
return _ConnectionFairy._checkout(self, self._fairy)
| SingletonThreadPool |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/methodOverride1.py | {
"start": 3490,
"end": 8985
} | class ____(ParentClass):
# This should generate an error because the type of 'a' doesn't match.
def my_method1(self, a: str):
return 1
# This should generate an error because it's missing a param named 'b'.
def my_method2(self, a: int):
return 1
# This should generate an error because the name doesn't match.
def my_method3(self, a: int, c: int):
return 1
# This should generate an error because the param category for 'b'
# doesn't match.
def my_method4(self, a: int, **b: int):
return 1
def my_method5(self, a: int, _c: int):
return 1
def my_method6(self, not_a: int, /, b: int):
return 1
# This should generate an error because c is not a position-only parameter.
def my_method7(self, a: int, /, c: int):
return 1
# This should generate an error because it contains too many parameters.
def my_method8(self, a: int, b: int, c: int, d: str = ""):
return 1
def my_method9(self, a: int, b: int, c: int = 4):
return 1
def my_method10(self, a: int, b: int, *args):
return 1
def my_method11(self, a: int, b: int, *, c: str = "", **kwargs):
return 1
# This should generate an error because the type of 'a' is
# narrower than the original method.
def my_method12(self, a: int) -> int:
return 1
def my_method13(self, a: int | str) -> int:
return 1
# This should generate an error because the return type is
# wider than in the original method.
def my_method14(self, a: int) -> int | str:
return 1
# This should generate an error because we're overriding a
# method with a variable.
my_method15: int = 3
# This should generate an error because we're overriding a
# method with a class.
class my_method16:
pass
def my_method17(self, *args: object, **kwargs: object) -> None: ...
def my_method18(self, a: str, *args: object, **kwargs: object) -> None: ...
# This should generate an error because b param doesn't match a in name.
def my_method19(self, b: str, *args: object, **kwargs: object) -> None: ...
@classmethod
def my_method20(cls: type[T_ChildClass], a: str) -> T_ChildClass: ...
# This should generate an error.
@decorator
def my_method21(self, var: int) -> None: ...
# This should generate an error.
def _protected_method1(self):
return 1
def __private_method1(self):
return 1
# This should generate an error.
def my_method22(self, a: str, b: int, c: float, d: bool, /) -> None: ...
# This should generate an error because a is missing a default value.
def my_method23(self, a: str) -> None: ...
def my_method24(self, a: str = "") -> None: ...
# This should generate an error because a is missing a default value.
def my_method25(self, *, a: str) -> None: ...
def my_method26(self, *, a: str = "") -> None: ...
def my_method27(self, __a: object) -> None: ...
def my_method28(self, a: object, /) -> None: ...
# This should generate an error because it is not a classmethod.
def my_method29(self, /) -> None: ...
# This should generate an error because it is not a classmethod.
@staticmethod
def my_method30(a: type[ParentClass], /) -> None: ...
# This should generate an error because it is not a staticmethod.
@classmethod
def my_method31(cls, /) -> None: ...
# This should generate an error because it is not a staticmethod.
def my_method32(self, /) -> None: ...
# This should generate an error because it is not an instance method.
@classmethod
def my_method33(cls, /) -> None: ...
# This should generate an error because it is not an instance method.
@staticmethod
def my_method34(a: type[ParentClass], /) -> None: ...
def my_method35(self, **kwargs: int) -> None: ...
# This should generate an error because the method in the parent
# class has a keyword-only parameter that is type 'int', and this
# isn't compatible with 'str'.
def my_method36(self, **kwargs: str) -> None: ...
def my_method37(self, *args: Any) -> None: ...
# This should generate an error because the number of position-only
# parameters doesn't match.
def my_method38(self, **kwargs: Any) -> None: ...
def my_method39(self, *args: Any) -> None: ...
# This should generate an error because the number of position-only
# parameters doesn't match.
def my_method40(self, **kwargs: Any) -> None: ...
# This should generate an error because keyword parameters "a"
# and "b" are missing.
def my_method41(self, a: int, *args: str) -> None: ...
# This should generate an error because args doesn't have the right type.
def my_method42(self, a: int, *args: int) -> None: ...
def my_method43(self, a: int, b: str, c: str) -> None: ...
# This should generate an error because kwargs is missing.
def my_method44(self, *object) -> None: ...
def my_method45(self, i: int, /) -> None: ...
def __my_method46__(self, y: int) -> None: ...
# This should generate an error because of a type mismatch.
def __my_method47__(self, y: str) -> None: ...
# This should generate an error because the keyword-only parameter "x: int"
# is not compatible with the base method's "**kwargs: object".
def my_method48(self, /, *, x: int = 3, **kwargs: object) -> None: ...
| ChildClass |
python | chroma-core__chroma | chromadb/quota/__init__.py | {
"start": 632,
"end": 1817
} | class ____(Component):
"""
Exposes hooks to enforce quotas.
"""
def __init__(self, system: System) -> None:
super().__init__(system)
@abstractmethod
def set_context(self, context: Dict[str, Any]) -> None:
"""
Sets the context for a given request.
"""
pass
@abstractmethod
def enforce(
self,
action: Action,
tenant: str,
metadatas: Optional[Metadatas] = None,
documents: Optional[Documents] = None,
embeddings: Optional[Embeddings] = None,
uris: Optional[URIs] = None,
ids: Optional[IDs] = None,
name: Optional[str] = None,
new_name: Optional[str] = None,
metadata: Optional[CollectionMetadata] = None,
new_metadata: Optional[CollectionMetadata] = None,
limit: Optional[int] = None,
where: Optional[Where] = None,
where_document: Optional[WhereDocument] = None,
n_results: Optional[int] = None,
query_embeddings: Optional[Embeddings] = None,
collection_id: Optional[UUID] = None,
) -> None:
"""
Enforces a quota.
"""
pass
| QuotaEnforcer |
python | getsentry__sentry | src/sentry/rules/filters/issue_category.py | {
"start": 582,
"end": 2009
} | class ____(EventFilter):
id = "sentry.rules.filters.issue_category.IssueCategoryFilter"
form_fields = {"value": {"type": "choice", "choices": list(CATEGORY_CHOICES.items())}}
rule_type = "filter/event"
label = "The issue's category is equal to {value}"
prompt = "The issue's category is ..."
def _passes(self, group: Group) -> bool:
try:
value: GroupCategory = GroupCategory(int(self.get_option("value")))
except (TypeError, ValueError):
return False
if group:
return value == group.issue_category or value == group.issue_category_v2
return False
def passes(self, event: GroupEvent, state: EventState, **kwargs: Any) -> bool:
return self._passes(event.group)
def passes_activity(
self, condition_activity: ConditionActivity, event_map: dict[str, Any]
) -> bool:
try:
group = Group.objects.get_from_cache(id=condition_activity.group_id)
except Group.DoesNotExist:
return False
return self._passes(group)
def render_label(self) -> str:
value = self.data["value"]
title = CATEGORY_CHOICES.get(value)
group_category_name = title.title() if title else ""
return self.label.format(value=group_category_name)
def get_form_instance(self) -> IssueCategoryForm:
return IssueCategoryForm(self.data)
| IssueCategoryFilter |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_type_checking/TC004_10.py | {
"start": 141,
"end": 185
} | class ____:
x: List
def f():
x: Dict
| C |
python | apache__airflow | providers/weaviate/src/airflow/providers/weaviate/hooks/weaviate.py | {
"start": 2970,
"end": 44413
} | class ____(BaseHook):
"""
Interact with Weaviate database to store vectors. This hook uses the 'conn_id'.
:param conn_id: The connection id to use when connecting to Weaviate. <howto/connection:weaviate>
"""
conn_name_attr = "conn_id"
default_conn_name = "weaviate_default"
conn_type = "weaviate"
hook_name = "Weaviate"
def __init__(
self,
conn_id: str = default_conn_name,
*args: Any,
**kwargs: Any,
) -> None:
super().__init__(*args, **kwargs)
self.conn_id = conn_id
@classmethod
def get_connection_form_widgets(cls) -> dict[str, Any]:
"""Return connection widgets to add to connection form."""
from flask_appbuilder.fieldwidgets import BS3PasswordFieldWidget, BS3TextFieldWidget
from flask_babel import lazy_gettext
from wtforms import BooleanField, PasswordField, StringField
return {
"http_secure": BooleanField(lazy_gettext("Use https"), default=False),
"token": PasswordField(lazy_gettext("Weaviate API Key"), widget=BS3PasswordFieldWidget()),
"grpc_host": StringField(lazy_gettext("gRPC host"), widget=BS3TextFieldWidget()),
"grpc_port": StringField(lazy_gettext("gRPC port"), widget=BS3TextFieldWidget()),
"grpc_secure": BooleanField(
lazy_gettext("Use a secure channel for the underlying gRPC API"), default=False
),
}
@classmethod
def get_ui_field_behaviour(cls) -> dict[str, Any]:
"""Return custom field behaviour."""
return {
"hidden_fields": ["schema"],
"relabeling": {
"login": "OIDC Username",
"password": "OIDC Password",
},
}
def get_conn(self) -> WeaviateClient:
conn = self.get_connection(self.conn_id)
extras = conn.extra_dejson
http_secure = extras.pop("http_secure", False)
grpc_secure = extras.pop("grpc_secure", False)
return weaviate.connect_to_custom(
http_host=conn.host, # type: ignore[arg-type]
http_port=conn.port or (443 if http_secure else 80),
http_secure=http_secure,
grpc_host=extras.pop("grpc_host", conn.host),
grpc_port=extras.pop("grpc_port", 443 if grpc_secure else 80),
grpc_secure=grpc_secure,
headers=extras.pop("additional_headers", {}),
auth_credentials=self._extract_auth_credentials(conn), # type: ignore[arg-type]
)
def _extract_auth_credentials(self, conn: Connection) -> AuthCredentials:
extras = conn.extra_dejson
# previously token was used as api_key(backwards compatibility)
api_key = extras.get("api_key", None) or extras.get("token", None)
if api_key:
return Auth.api_key(api_key=api_key)
access_token = extras.get("access_token", None)
if access_token:
refresh_token = extras.get("refresh_token", None)
expires_in = extras.get("expires_in", 60)
return Auth.bearer_token(
access_token=access_token, expires_in=expires_in, refresh_token=refresh_token
)
scope = extras.get("scope", None) or extras.get("oidc_scope", None)
client_secret = extras.get("client_secret", None)
if client_secret:
return Auth.client_credentials(client_secret=client_secret, scope=scope)
username = conn.login or ""
password = conn.password or ""
return Auth.client_password(username=username, password=password, scope=scope)
@cached_property
def conn(self) -> WeaviateClient:
"""Returns a Weaviate client."""
return self.get_conn()
def test_connection(self) -> tuple[bool, str]:
try:
client = self.conn
client.collections.list_all()
return True, "Connection established!"
except Exception as e:
self.log.error("Error testing Weaviate connection: %s", e)
return False, str(e)
def create_collection(self, name: str, **kwargs) -> Collection:
"""Create a new collection."""
client = self.conn
return client.collections.create(name=name, **kwargs)
def get_collection(self, name: str) -> Collection:
"""
Get a collection by name.
:param name: The name of the collection to get.
"""
client = self.conn
return client.collections.get(name)
def delete_by_property(
self,
*,
collection_names: list[str] | str,
filter_criteria: _Filters,
if_error: str | None = None,
dry_run: bool = False,
verbose: bool = False,
) -> list[str] | None:
"""
Delete objects in collections using a provided Filter object. The maximum number of objects that can be deleted at once should be set through environment variable `QUERY_MAXIMUM_RESULTS`.
:param collection_names: The name(s) of the collection(s) to delete from.
:param filter_criteria: A `Filter` object defining the filter criteria for deletion.
:param if_error: define the actions to be taken if there is an error while deleting objects, possible
options are `None` and `continue`
:param dry_run: Use 'dry_run' to check how many objects would be deleted, without actually performing the deletion.
:param verbose: Set output to 'verbose' to see more details (ID and deletion status) for each deletion
:return: If `if_error="continue"`, returns list of failed collection names. Else, returns None.
Example:
>>> from weaviate.classes.query import Filter
>>> my_filter = (
>>> Filter.by_property("round").equal("Double Jeopardy!") &
>>> Filter.by_property("points").less_than(600)
>>> )
>>> delete_by_filter(
>>> collection_names=["collection_a", "collection_b"],
>>> filter_criteria=my_filter,
>>> if_error="continue"
>>> )
"""
collection_names = [collection_names] if isinstance(collection_names, str) else collection_names
failed_collection_list = []
for collection_name in collection_names:
try:
self.log.info("Attempting to delete objects from '%s'", collection_name)
for attempt in Retrying(
stop=stop_after_attempt(3),
retry=(
retry_if_exception(lambda exc: check_http_error_is_retryable(exc))
| retry_if_exception_type(REQUESTS_EXCEPTIONS_TYPES)
),
):
with attempt:
self.log.info(attempt)
collection = self.get_collection(collection_name)
delete_many_return = collection.data.delete_many(
where=filter_criteria, verbose=verbose, dry_run=dry_run
)
if dry_run:
self.log.info(delete_many_return)
except Exception as e:
# Capture generic exception to avoid missing any error, but we could anticipate the following errors:
# 1. weaviate.exceptions.UnexpectedStatusCodeException
# 2. weaviate.exceptions.WeaviateDeleteManyError
if if_error == "continue":
self.log.error(e)
failed_collection_list.append(collection_name)
else:
raise e
if if_error == "continue":
return failed_collection_list
return None
def delete_collections(
self, collection_names: list[str] | str, if_error: str = "stop"
) -> list[str] | None:
"""
Delete all or specific collections if collection_names are provided.
:param collection_names: list of collection names to be deleted.
:param if_error: define the actions to be taken if there is an error while deleting a collection, possible
options are `stop` and `continue`
:return: if `if_error=continue` return list of collections which we failed to delete.
if `if_error=stop` returns None.
"""
client = self.get_conn()
collection_names = (
[collection_names] if collection_names and isinstance(collection_names, str) else collection_names
)
failed_collection_list = []
for collection_name in collection_names:
try:
for attempt in Retrying(
stop=stop_after_attempt(3),
retry=(
retry_if_exception(lambda exc: check_http_error_is_retryable(exc))
| retry_if_exception_type(REQUESTS_EXCEPTIONS_TYPES)
),
):
with attempt:
self.log.info(attempt)
client.collections.delete(collection_name)
except Exception as e:
if if_error == "continue":
self.log.error(e)
failed_collection_list.append(collection_name)
elif if_error == "stop":
raise e
if if_error == "continue":
return failed_collection_list
return None
@retry(
reraise=True,
stop=stop_after_attempt(3),
retry=(
retry_if_exception(lambda exc: check_http_error_is_retryable(exc))
| retry_if_exception_type(REQUESTS_EXCEPTIONS_TYPES)
),
)
def get_collection_configuration(self, collection_name: str) -> CollectionConfig | CollectionConfigSimple:
"""
Get the collection configuration from Weaviate.
:param collection_name: The collection for which to return the collection configuration.
"""
client = self.get_conn()
return client.collections.get(collection_name).config.get()
def update_collection_configuration(self, collection_name: str, **kwargs) -> None:
"""Update the collection configuration."""
collection = self.get_collection(collection_name)
collection.config.update(**kwargs)
@staticmethod
def _convert_dataframe_to_list(data: list[dict[str, Any]] | pd.DataFrame | None) -> list[dict[str, Any]]:
"""
Convert dataframe to list of dicts.
In scenario where Pandas isn't installed and we pass data as a list of dictionaries, importing
Pandas will fail, which is invalid. This function handles this scenario.
"""
with contextlib.suppress(ImportError):
import pandas
if isinstance(data, pandas.DataFrame):
data = json.loads(data.to_json(orient="records"))
return cast("list[dict[str, Any]]", data)
def batch_create_links(
self,
collection_name: str,
data: list[dict[str, Any]] | pd.DataFrame | None,
from_property_col: str = "from_property",
from_uuid_col: str = "from_uuid",
to_uuid_col: str = "to",
retry_attempts_per_object: int = 5,
) -> list[ErrorReference] | None:
"""
Batch create links from an object to another other object through cross-references (https://weaviate.io/developers/weaviate/manage-data/import#import-with-references).
:param collection_name: The name of the collection containing the source objects.
:param data: list or dataframe of objects we want to create links.
:param from_property_col: name of the reference property column.
:param from_uuid_col: Name of the column containing the from UUID.
:param to_uuid_col: Name of the column containing the target UUID.
:param retry_attempts_per_object: number of time to try in case of failure before giving up.
"""
converted_data = self._convert_dataframe_to_list(data)
collection = self.get_collection(collection_name)
with collection.batch.dynamic() as batch:
# Batch create links
for data_obj in converted_data:
for attempt in Retrying(
stop=stop_after_attempt(retry_attempts_per_object),
retry=(
retry_if_exception(lambda exc: check_http_error_is_retryable(exc))
| retry_if_exception_type(REQUESTS_EXCEPTIONS_TYPES)
),
):
with attempt:
from_property = data_obj.pop(from_property_col, None)
from_uuid = data_obj.pop(from_uuid_col, None)
to_uuid = data_obj.pop(to_uuid_col, None)
self.log.debug(
"Attempt %s of create links between %s and %s using reference property %s",
attempt.retry_state.attempt_number,
from_uuid,
to_uuid,
from_property,
)
batch.add_reference(
from_property=from_property,
from_uuid=from_uuid,
to=to_uuid,
)
failed_references = collection.batch.failed_references
if failed_references:
self.log.error("Number of failed imports: %s", len(failed_references))
return failed_references
def batch_data(
self,
collection_name: str,
data: list[dict[str, Any]] | pd.DataFrame | None,
vector_col: str = "Vector",
uuid_col: str = "id",
retry_attempts_per_object: int = 5,
references: ReferenceInputs | None = None,
) -> None:
"""
Add multiple objects or object references at once into weaviate.
:param collection_name: The name of the collection that objects belongs to.
:param data: list or dataframe of objects we want to add.
:param vector_col: name of the column containing the vector.
:param uuid_col: Name of the column containing the UUID.
:param retry_attempts_per_object: number of time to try in case of failure before giving up.
:param references: The references of the object to be added as a dictionary. Use `wvc.Reference.to` to create the correct values in the dict.
"""
converted_data = self._convert_dataframe_to_list(data)
collection = self.get_collection(collection_name)
with collection.batch.dynamic() as batch:
# Batch import all data
for data_obj in converted_data:
for attempt in Retrying(
stop=stop_after_attempt(retry_attempts_per_object),
retry=(
retry_if_exception(lambda exc: check_http_error_is_retryable(exc))
| retry_if_exception_type(REQUESTS_EXCEPTIONS_TYPES)
),
):
with attempt:
vector = data_obj.pop(vector_col, None)
uuid = data_obj.pop(uuid_col, None)
self.log.debug(
"Attempt %s of inserting object with uuid: %s",
attempt.retry_state.attempt_number,
uuid,
)
batch.add_object(
properties=data_obj,
references=references,
uuid=uuid,
vector=vector,
)
self.log.debug("Inserted object with uuid: %s into batch", uuid)
def query_with_vector(
self,
embeddings: list[float],
collection_name: str,
properties: list[str],
certainty: float = 0.7,
limit: int = 1,
**kwargs,
) -> QuerySearchReturnType:
"""
Query weaviate database with near vectors.
This method uses a vector search using a Get query. we are using a with_near_vector to provide
weaviate with a query with vector itself. This is needed for query a Weaviate class with a custom,
external vectorizer. Weaviate then converts this into a vector through the inference API
(OpenAI in this particular example) and uses that vector as the basis for a vector search.
"""
client = self.conn
collection = client.collections.get(collection_name)
response = collection.query.near_vector(
near_vector=embeddings, certainty=certainty, limit=limit, return_properties=properties, **kwargs
)
return response
def query_with_text(
self, search_text: str, collection_name: str, properties: list[str], limit: int = 1, **kwargs
) -> QuerySearchReturnType:
"""
Query using near text.
This method uses a vector search using a Get query. we are using a nearText operator to provide
weaviate with a query search_text. Weaviate then converts this into a vector through the inference
API (OpenAI in this particular example) and uses that vector as the basis for a vector search.
"""
client = self.conn
collection = client.collections.get(collection_name)
response = collection.query.near_text(
query=search_text, limit=limit, return_properties=properties, **kwargs
)
return response
def create_object(self, data_object: dict, collection_name: str, **kwargs) -> UUID | None:
"""
Create a new object.
:param data_object: Object to be added. If type is str it should be either a URL or a file.
:param collection_name: Collection name associated with the object given.
:param kwargs: Additional parameters to be passed to weaviate_client.data_object.create()
"""
collection = self.get_collection(collection_name)
# generate deterministic uuid if not provided
uuid = kwargs.pop("uuid", generate_uuid5(data_object))
try:
return collection.data.insert(properties=data_object, uuid=uuid, **kwargs)
except ObjectAlreadyExistsException:
self.log.warning("Object with the UUID %s already exists", uuid)
return None
def get_or_create_object(
self,
collection_name,
data_object: dict,
vector: Sequence | None = None,
**kwargs,
) -> QueryReturnType | UUID | None:
"""
Get or Create a new object.
Returns the object if already exists, return UUID if not
:param collection_name: Collection name associated with the object given..
:param data_object: Object to be added.
:param vector: Vector associated with the object given. This argument is only used when creating object.
:param kwargs: parameters to be passed to collection.data.fetch_object_by_id() or
collection.data.fetch_objects()
"""
obj = self.get_object(collection_name=collection_name, **kwargs)
if not obj:
if not (data_object and collection_name):
raise ValueError("data_object and collection are required to create a new object")
uuid = kwargs.pop("uuid", generate_uuid5(data_object))
return self.create_object(
data_object=data_object, collection_name=collection_name, uuid=uuid, vector=vector, **kwargs
)
return obj
def get_object(self, collection_name: str, **kwargs) -> QueryReturnType:
"""
Get objects or an object from weaviate.
:param kwargs: parameters to be passed to collection.query.fetch_objects()
"""
collection = self.get_collection(collection_name)
return collection.query.fetch_objects(**kwargs)
def get_all_objects(
self, collection_name: str, after: str | UUID | None = None, as_dataframe: bool = False, **kwargs
) -> list[Object] | pd.DataFrame:
"""
Get all objects from weaviate.
if after is provided, it will be used as the starting point for the listing.
:param after: uuid of the object to start listing from
:param as_dataframe: if True, returns a pandas dataframe
:param kwargs: parameters to be passed to weaviate_client.data_object.get()
"""
all_objects: list[Object] = []
after = kwargs.pop("after", after)
while True:
results = self.get_object(collection_name=collection_name, after=after, **kwargs)
if not results or not results.objects:
break
all_objects.extend(results.objects)
after = results.objects[-1].uuid
if as_dataframe:
import pandas
# '_WeaviateUUIDInt' object has no attribute 'is_safe' which causes error
return pandas.DataFrame(
[
{
"collection": obj.collection,
"metadata": obj.metadata,
"properties": obj.properties,
"references": obj.references,
"uuid": str(obj.uuid),
"vector": obj.vector,
}
for obj in all_objects
]
)
return all_objects
def delete_object(self, collection_name: str, uuid: UUID | str) -> bool:
"""
Delete an object from weaviate.
:param collection_name: Collection name associated with the object given.
:param uuid: uuid of the object to be deleted
"""
collection = self.get_collection(collection_name)
return collection.data.delete_by_id(uuid=uuid)
def update_object(
self, collection_name: str, uuid: UUID | str, properties: Properties | None = None, **kwargs
) -> None:
"""
Update an object in weaviate.
:param collection_name: Collection name associated with the object given.
:param uuid: uuid of the object to be updated
:param properties: The properties of the object.
:param kwargs: Optional parameters to be passed to collection.data.update()
"""
collection = self.get_collection(collection_name)
collection.data.update(uuid=uuid, properties=properties, **kwargs)
def replace_object(
self,
collection_name: str,
uuid: UUID | str,
properties: Properties,
references: ReferenceInputs | None = None,
**kwargs,
) -> None:
"""
Replace an object in weaviate.
:param collection_name: Collection name associated with the object given.
:param uuid: uuid of the object to be updated
:param properties: The properties of the object.
:param references: Any references to other objects in Weaviate.
:param kwargs: Optional parameters to be passed to collection.data.replace()
"""
collection = self.get_collection(collection_name)
collection.data.replace(uuid=uuid, properties=properties, references=references, **kwargs)
def object_exists(self, collection_name: str, uuid: str | UUID) -> bool:
"""
Check if an object exists in weaviate.
:param collection_name: Collection name associated with the object given.
:param uuid: The UUID of the object that may or may not exist within Weaviate.
"""
collection = self.get_collection(collection_name)
return collection.data.exists(uuid=uuid)
def _delete_objects(
self, uuids: list[UUID], collection_name: str, retry_attempts_per_object: int = 5
) -> None:
"""
Delete multiple objects.
Helper function for `create_or_replace_objects()` to delete multiple objects.
:param uuids: Collection of uuids.
:param collection_name: Name of the collection in Weaviate schema where data is to be ingested.
:param retry_attempts_per_object: number of times to try in case of failure before giving up.
"""
for uuid in uuids:
for attempt in Retrying(
stop=stop_after_attempt(retry_attempts_per_object),
retry=(
retry_if_exception(lambda exc: check_http_error_is_retryable(exc))
| retry_if_exception_type(REQUESTS_EXCEPTIONS_TYPES)
),
):
with attempt:
try:
self.delete_object(uuid=uuid, collection_name=collection_name)
self.log.debug("Deleted object with uuid %s", uuid)
except weaviate.exceptions.UnexpectedStatusCodeException as e:
if e.status_code == 404:
self.log.debug("Tried to delete a non existent object with uuid %s", uuid)
else:
self.log.debug("Error occurred while trying to delete object with uuid %s", uuid)
raise e
self.log.info("Deleted %s objects.", len(uuids))
def _generate_uuids(
self,
df: pd.DataFrame,
collection_name: str,
unique_columns: list[str],
vector_column: str | None = None,
uuid_column: str | None = None,
) -> tuple[pd.DataFrame, str]:
"""
Add UUIDs to a DataFrame, useful for replace operations where UUIDs must be known before ingestion.
By default, UUIDs are generated using a custom function if 'uuid_column' is not specified.
The function can potentially ingest the same data multiple times with different UUIDs.
:param df: A dataframe with data to generate a UUID from.
:param collection_name: The name of the collection use as part of the uuid namespace.
:param uuid_column: Name of the column to create. Default is 'id'.
:param unique_columns: A list of columns to use for UUID generation. By default, all columns except
vector_column will be used.
:param vector_column: Name of the column containing the vector data. If specified the vector will be
removed prior to generating the uuid.
"""
column_names = df.columns.to_list()
difference_columns = set(unique_columns).difference(set(df.columns.to_list()))
if difference_columns:
raise ValueError(f"Columns {', '.join(difference_columns)} don't exist in dataframe")
if uuid_column is None:
self.log.info("No uuid_column provided. Generating UUIDs as column name `id`.")
if "id" in column_names:
raise ValueError(
"Property 'id' already in dataset. Consider renaming or specify 'uuid_column'."
)
uuid_column = "id"
if uuid_column in column_names:
raise ValueError(
f"Property {uuid_column} already in dataset. Consider renaming or specify a different"
f" 'uuid_column'."
)
df[uuid_column] = (
df[unique_columns]
.drop(columns=[vector_column], inplace=False, errors="ignore")
.apply(lambda row: generate_uuid5(identifier=row.to_dict(), namespace=collection_name), axis=1)
)
return df, uuid_column
def _get_documents_to_uuid_map(
self,
data: pd.DataFrame,
document_column: str,
uuid_column: str,
collection_name: str,
offset: int = 0,
limit: int = 2000,
) -> dict[str, set]:
"""
Get the document to uuid map of existing objects in db.
:param data: A single pandas DataFrame.
:param document_column: The name of the property to query.
:param collection_name: The name of the collection to query.
:param uuid_column: The name of the column containing the UUID.
:param offset: pagination parameter to indicate the which object to start fetching data.
:param limit: pagination param to indicate the number of records to fetch from start object.
"""
documents_to_uuid: dict = {}
document_keys = set(data[document_column])
while True:
collection = self.get_collection(collection_name)
data_objects = collection.query.fetch_objects(
filters=Filter.any_of(
[Filter.by_property(document_column).equal(key) for key in document_keys]
),
return_properties=[document_column],
limit=limit,
offset=offset,
)
if len(data_objects.objects) == 0:
break
offset = offset + limit
if uuid_column in data_objects.objects[0].properties:
data_object_properties = [obj.properties for obj in data_objects.objects]
else:
data_object_properties = []
for obj in data_objects.objects:
row = dict(obj.properties)
row[uuid_column] = str(obj.uuid)
data_object_properties.append(row)
documents_to_uuid.update(
self._prepare_document_to_uuid_map(
data=data_object_properties,
group_key=document_column,
get_value=lambda x: x[uuid_column],
)
)
return documents_to_uuid
@staticmethod
def _prepare_document_to_uuid_map(
data: Sequence[Mapping], group_key: str, get_value: Callable[[Mapping], str]
) -> dict[str, set]:
"""Prepare the map of grouped_key to set."""
grouped_key_to_set: dict = {}
for item in data:
document_url = item[group_key]
if document_url not in grouped_key_to_set:
grouped_key_to_set[document_url] = set()
grouped_key_to_set[document_url].add(get_value(item))
return grouped_key_to_set
def _get_segregated_documents(
self, data: pd.DataFrame, document_column: str, collection_name: str, uuid_column: str
) -> tuple[dict[str, set], set, set, set]:
"""
Segregate documents into changed, unchanged and new document, when compared to Weaviate db.
:param data: A single pandas DataFrame.
:param document_column: The name of the property to query.
:param collection_name: The name of the collection to query.
:param uuid_column: The name of the column containing the UUID.
"""
changed_documents = set()
unchanged_docs = set()
new_documents = set()
existing_documents_to_uuid = self._get_documents_to_uuid_map(
data=data,
uuid_column=uuid_column,
document_column=document_column,
collection_name=collection_name,
)
input_documents_to_uuid = self._prepare_document_to_uuid_map(
data=data.to_dict("records"),
group_key=document_column,
get_value=lambda x: x[uuid_column],
)
# segregate documents into changed, unchanged and non-existing documents.
for doc_url, doc_set in input_documents_to_uuid.items():
if doc_url in existing_documents_to_uuid:
if existing_documents_to_uuid[doc_url] != doc_set:
changed_documents.add(str(doc_url))
else:
unchanged_docs.add(str(doc_url))
else:
new_documents.add(str(doc_url))
return existing_documents_to_uuid, changed_documents, unchanged_docs, new_documents
def _delete_all_documents_objects(
self,
document_keys: list[str],
document_column: str,
collection_name: str,
total_objects_count: int = 1,
batch_delete_error: Sequence | None = None,
verbose: bool = False,
) -> Sequence[dict[str, UUID | str]]:
"""
Delete all object that belong to list of documents.
:param document_keys: list of unique documents identifiers.
:param document_column: Column in DataFrame that identifying source document.
:param collection_name: Name of the collection in Weaviate schema where data is to be ingested.
:param total_objects_count: total number of objects to delete, needed as max limit on one delete
query is 10,000, if we have more objects to delete we need to run query multiple times.
:param batch_delete_error: list to hold errors while inserting.
:param verbose: Flag to enable verbose output during the ingestion process.
"""
batch_delete_error = batch_delete_error or []
# This limit is imposed by Weavaite database
MAX_LIMIT_ON_TOTAL_DELETABLE_OBJECTS = 10000
collection = self.get_collection(collection_name)
delete_many_return = collection.data.delete_many(
where=Filter.any_of([Filter.by_property(document_column).equal(key) for key in document_keys]),
verbose=verbose,
dry_run=False,
)
total_objects_count = total_objects_count - MAX_LIMIT_ON_TOTAL_DELETABLE_OBJECTS
matched_objects = delete_many_return.matches
if delete_many_return.failed > 0 and delete_many_return.objects:
batch_delete_error = [
{"uuid": obj.uuid, "error": obj.error}
for obj in delete_many_return.objects
if obj.error is not None
]
if verbose:
self.log.info("Deleted %s Objects", matched_objects)
return batch_delete_error
def create_or_replace_document_objects(
self,
data: pd.DataFrame | list[dict[str, Any]] | list[pd.DataFrame],
collection_name: str,
document_column: str,
existing: str = "skip",
uuid_column: str | None = None,
vector_column: str = "Vector",
verbose: bool = False,
) -> Sequence[dict[str, UUID | str] | None]:
"""
Create or replace objects belonging to documents.
In real-world scenarios, information sources like Airflow docs, Stack Overflow, or other issues
are considered 'documents' here. It's crucial to keep the database objects in sync with these sources.
If any changes occur in these documents, this function aims to reflect those changes in the database.
.. note::
This function assumes responsibility for identifying changes in documents, dropping relevant
database objects, and recreating them based on updated information. It's crucial to handle this
process with care, ensuring backups and validation are in place to prevent data loss or
inconsistencies.
Provides users with multiple ways of dealing with existing values.
replace: replace the existing objects with new objects. This option requires to identify the
objects belonging to a document. which by default is done by using document_column field.
skip: skip the existing objects and only add the missing objects of a document.
error: raise an error if an object belonging to a existing document is tried to be created.
:param data: A single pandas DataFrame or a list of dicts to be ingested.
:param colleciton_name: Name of the collection in Weaviate schema where data is to be ingested.
:param existing: Strategy for handling existing data: 'skip', or 'replace'. Default is 'skip'.
:param document_column: Column in DataFrame that identifying source document.
:param uuid_column: Column with pre-generated UUIDs. If not provided, UUIDs will be generated.
:param vector_column: Column with embedding vectors for pre-embedded data.
:param verbose: Flag to enable verbose output during the ingestion process.
:return: list of UUID which failed to create
"""
if existing not in ["skip", "replace", "error"]:
raise ValueError("Invalid parameter for 'existing'. Choices are 'skip', 'replace', 'error'.")
import pandas as pd
if len(data) == 0:
return []
if isinstance(data, Sequence) and isinstance(data[0], dict):
# This is done to narrow the type to list[dict[str, Any].
data = pd.json_normalize(cast("list[dict[str, Any]]", data))
elif isinstance(data, Sequence) and isinstance(data[0], pd.DataFrame):
# This is done to narrow the type to list[pd.DataFrame].
data = pd.concat(cast("list[pd.DataFrame]", data), ignore_index=True)
else:
data = cast("pd.DataFrame", data)
unique_columns = sorted(data.columns.to_list())
if verbose:
self.log.info("%s objects came in for insertion.", data.shape[0])
if uuid_column is None or uuid_column not in data.columns:
(
data,
uuid_column,
) = self._generate_uuids(
df=data,
collection_name=collection_name,
unique_columns=unique_columns,
vector_column=vector_column,
uuid_column=uuid_column,
)
# drop duplicate rows, using uuid_column and unique_columns. Removed `None` as it can be added to
# set when `uuid_column` is None.
data = data.drop_duplicates(subset=[document_column, uuid_column], keep="first")
if verbose:
self.log.info("%s objects remain after deduplication.", data.shape[0])
batch_delete_error: Sequence[dict[str, UUID | str]] = []
(
documents_to_uuid_map,
changed_documents,
unchanged_documents,
new_documents,
) = self._get_segregated_documents(
data=data,
document_column=document_column,
uuid_column=uuid_column,
collection_name=collection_name,
)
if verbose:
self.log.info(
"Found %s changed documents, %s unchanged documents and %s non-existing documents",
len(changed_documents),
len(unchanged_documents),
len(new_documents),
)
for document in changed_documents:
self.log.info(
"Changed document: %s has %s objects.", document, len(documents_to_uuid_map[document])
)
self.log.info("Non-existing document: %s", ", ".join(new_documents))
if existing == "error" and len(changed_documents):
raise ValueError(
f"Documents {', '.join(changed_documents)} already exists. You can either skip or replace"
f" them by passing 'existing=skip' or 'existing=replace' respectively."
)
if existing == "skip":
data = data[data[document_column].isin(new_documents)]
if verbose:
self.log.info(
"Since existing=skip, ingesting only non-existing document's object %s", data.shape[0]
)
elif existing == "replace":
total_objects_count = sum([len(documents_to_uuid_map[doc]) for doc in changed_documents])
if verbose:
self.log.info(
"Since existing='replace', deleting %s objects belonging changed documents %s",
total_objects_count,
changed_documents,
)
if list(changed_documents):
batch_delete_error = self._delete_all_documents_objects(
document_keys=list(changed_documents),
document_column=document_column,
collection_name=collection_name,
total_objects_count=total_objects_count,
batch_delete_error=batch_delete_error,
verbose=verbose,
)
data = data[data[document_column].isin(new_documents.union(changed_documents))]
self.log.info("Batch inserting %s objects for non-existing and changed documents.", data.shape[0])
if data.shape[0]:
self.batch_data(
collection_name=collection_name,
data=data,
vector_col=vector_column,
uuid_col=uuid_column,
)
if batch_delete_error:
if batch_delete_error:
self.log.info("Failed to delete %s objects.", len(batch_delete_error))
# Rollback object that were not created properly
self._delete_objects(
[item["uuid"] for item in batch_delete_error],
collection_name=collection_name,
)
if verbose:
collection = self.get_collection(collection_name)
self.log.info(
"Total objects in collection %s : %s ",
collection_name,
collection.aggregate.over_all(total_count=True),
)
return batch_delete_error
| WeaviateHook |
python | pola-rs__polars | py-polars/src/polars/series/binary.py | {
"start": 364,
"end": 6241
} | class ____:
"""Series.bin namespace."""
_accessor = "bin"
def __init__(self, series: Series) -> None:
self._s: PySeries = series._s
def contains(self, literal: IntoExpr) -> Series:
r"""
Check if binaries in Series contain a binary substring.
Parameters
----------
literal
The binary substring to look for
Returns
-------
Series
Series of data type :class:`Boolean`.
Examples
--------
>>> s = pl.Series("colors", [b"\x00\x00\x00", b"\xff\xff\x00", b"\x00\x00\xff"])
>>> s.bin.contains(b"\xff")
shape: (3,)
Series: 'colors' [bool]
[
false
true
true
]
"""
def ends_with(self, suffix: IntoExpr) -> Series:
r"""
Check if string values end with a binary substring.
Parameters
----------
suffix
Suffix substring.
Examples
--------
>>> s = pl.Series("colors", [b"\x00\x00\x00", b"\xff\xff\x00", b"\x00\x00\xff"])
>>> s.bin.ends_with(b"\x00")
shape: (3,)
Series: 'colors' [bool]
[
true
true
false
]
"""
def starts_with(self, prefix: IntoExpr) -> Series:
r"""
Check if values start with a binary substring.
Parameters
----------
prefix
Prefix substring.
Examples
--------
>>> s = pl.Series("colors", [b"\x00\x00\x00", b"\xff\xff\x00", b"\x00\x00\xff"])
>>> s.bin.starts_with(b"\x00")
shape: (3,)
Series: 'colors' [bool]
[
true
false
true
]
"""
def decode(self, encoding: TransferEncoding, *, strict: bool = True) -> Series:
r"""
Decode values using the provided encoding.
Parameters
----------
encoding : {'hex', 'base64'}
The encoding to use.
strict
Raise an error if the underlying value cannot be decoded,
otherwise mask out with a null value.
Returns
-------
Series
Series of data type :class:`String`.
Examples
--------
Decode values using hexadecimal encoding.
>>> s = pl.Series("colors", [b"000000", b"ffff00", b"0000ff"])
>>> s.bin.decode("hex")
shape: (3,)
Series: 'colors' [binary]
[
b"\x00\x00\x00"
b"\xff\xff\x00"
b"\x00\x00\xff"
]
Decode values using Base64 encoding.
>>> s = pl.Series("colors", [b"AAAA", b"//8A", b"AAD/"])
>>> s.bin.decode("base64")
shape: (3,)
Series: 'colors' [binary]
[
b"\x00\x00\x00"
b"\xff\xff\x00"
b"\x00\x00\xff"
]
Set `strict=False` to set invalid values to null instead of raising an error.
>>> s = pl.Series("colors", [b"000000", b"ffff00", b"invalid_value"])
>>> s.bin.decode("hex", strict=False)
shape: (3,)
Series: 'colors' [binary]
[
b"\x00\x00\x00"
b"\xff\xff\x00"
null
]
"""
def encode(self, encoding: TransferEncoding) -> Series:
r"""
Encode values using the provided encoding.
Parameters
----------
encoding : {'hex', 'base64'}
The encoding to use.
Returns
-------
Series
Series of data type :class:`String`.
Examples
--------
Encode values using hexadecimal encoding.
>>> s = pl.Series("colors", [b"\x00\x00\x00", b"\xff\xff\x00", b"\x00\x00\xff"])
>>> s.bin.encode("hex")
shape: (3,)
Series: 'colors' [str]
[
"000000"
"ffff00"
"0000ff"
]
Encode values using Base64 encoding.
>>> s.bin.encode("base64")
shape: (3,)
Series: 'colors' [str]
[
"AAAA"
"//8A"
"AAD/"
]
"""
def size(self, unit: SizeUnit = "b") -> Series:
r"""
Get the size of the binary values in a Series in the given unit.
Returns
-------
Series
Series of data type :class:`UInt32`.
Examples
--------
>>> from os import urandom
>>> s = pl.Series("data", [urandom(n) for n in (512, 256, 2560, 1024)])
>>> s.bin.size("kb")
shape: (4,)
Series: 'data' [f64]
[
0.5
0.25
2.5
1.0
]
"""
def reinterpret(
self, *, dtype: PolarsDataType, endianness: Endianness = "little"
) -> Series:
r"""
Interpret bytes as another type.
Supported types are numerical or temporal dtypes, or an ``Array`` of
these dtypes.
Parameters
----------
dtype : PolarsDataType
Which type to interpret binary column into.
endianness : {"big", "little"}, optional
Which endianness to use when interpreting bytes, by default "little".
Returns
-------
Series
Series of data type `dtype`.
Note that rows of the binary array where the length does not match
the size in bytes of the output array (number of items * byte size
of item) will become NULL.
Examples
--------
>>> s = pl.Series("data", [b"\x05\x00\x00\x00", b"\x10\x00\x01\x00"])
>>> s.bin.reinterpret(dtype=pl.Int32, endianness="little")
shape: (2,)
Series: 'data' [i32]
[
5
65552
]
"""
| BinaryNameSpace |
python | huggingface__transformers | tests/models/data2vec/test_modeling_data2vec_text.py | {
"start": 1649,
"end": 13981
} | class ____:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_input_mask=True,
use_token_type_ids=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.scope = scope
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def get_config(self):
return Data2VecTextConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
initializer_range=self.initializer_range,
)
def prepare_config_and_inputs_for_decoder(self):
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = self.prepare_config_and_inputs()
config.is_decoder = True
encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def create_and_check_model(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = Data2VecTextModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
result = model(input_ids, token_type_ids=token_type_ids)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def create_and_check_model_as_decoder(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
):
config.add_cross_attention = True
model = Data2VecTextModel(config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
)
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
encoder_hidden_states=encoder_hidden_states,
)
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def create_and_check_for_causal_lm(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
):
model = Data2VecTextForCausalLM(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_decoder_model_past_large_inputs(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
):
config.is_decoder = True
config.add_cross_attention = True
model = Data2VecTextForCausalLM(config=config).to(torch_device).eval()
# make sure that ids don't start with pad token
mask = input_ids.ne(config.pad_token_id).long()
input_ids = input_ids * mask
# first forward pass
outputs = model(
input_ids,
attention_mask=input_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
use_cache=True,
)
past_key_values = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
# make sure that ids don't start with pad token
mask = next_tokens.ne(config.pad_token_id).long()
next_tokens = next_tokens * mask
next_mask = ids_tensor((self.batch_size, 3), vocab_size=2)
# append to next input_ids and
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
next_attention_mask = torch.cat([input_mask, next_mask], dim=-1)
output_from_no_past = model(
next_input_ids,
attention_mask=next_attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_hidden_states=True,
)["hidden_states"][0]
output_from_past = model(
next_tokens,
attention_mask=next_attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
output_hidden_states=True,
)["hidden_states"][0]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def create_and_check_for_masked_lm(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = Data2VecTextForMaskedLM(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_for_token_classification(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = Data2VecTextForTokenClassification(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def create_and_check_for_multiple_choice(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_choices = self.num_choices
model = Data2VecTextForMultipleChoice(config=config)
model.to(torch_device)
model.eval()
multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
result = model(
multiple_choice_inputs_ids,
attention_mask=multiple_choice_input_mask,
token_type_ids=multiple_choice_token_type_ids,
labels=choice_labels,
)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def create_and_check_for_question_answering(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = Data2VecTextForQuestionAnswering(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
start_positions=sequence_labels,
end_positions=sequence_labels,
)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
| Data2VecTextModelTester |
python | networkx__networkx | networkx/algorithms/tests/test_summarization.py | {
"start": 11804,
"end": 14331
} | class ____(AbstractSNAP):
def build_original_graph(self):
nodes = {
"A": {"color": "Red"},
"B": {"color": "Red"},
"C": {"color": "Red"},
"D": {"color": "Red"},
"E": {"color": "Blue"},
"F": {"color": "Blue"},
"G": {"color": "Blue"},
"H": {"color": "Blue"},
"I": {"color": "Yellow"},
"J": {"color": "Yellow"},
"K": {"color": "Yellow"},
"L": {"color": "Yellow"},
}
edges = [
("A", "B", "Strong"),
("A", "C", "Weak"),
("A", "E", "Strong"),
("A", "I", "Weak"),
("B", "D", "Weak"),
("B", "J", "Weak"),
("B", "F", "Strong"),
("C", "G", "Weak"),
("D", "H", "Weak"),
("I", "J", "Strong"),
("J", "K", "Strong"),
("I", "L", "Strong"),
]
G = nx.Graph()
for node in nodes:
attributes = nodes[node]
G.add_node(node, **attributes)
for source, target, type in edges:
G.add_edge(source, target, type=type)
return G
def build_summary_graph(self):
nodes = {
"Supernode-0": {"color": "Red"},
"Supernode-1": {"color": "Red"},
"Supernode-2": {"color": "Blue"},
"Supernode-3": {"color": "Blue"},
"Supernode-4": {"color": "Yellow"},
"Supernode-5": {"color": "Yellow"},
}
edges = [
("Supernode-0", "Supernode-0", "Strong"),
("Supernode-0", "Supernode-1", "Weak"),
("Supernode-0", "Supernode-2", "Strong"),
("Supernode-0", "Supernode-4", "Weak"),
("Supernode-1", "Supernode-3", "Weak"),
("Supernode-4", "Supernode-4", "Strong"),
("Supernode-4", "Supernode-5", "Strong"),
]
G = nx.Graph()
for node in nodes:
attributes = nodes[node]
G.add_node(node, **attributes)
for source, target, type in edges:
G.add_edge(source, target, types=[{"type": type}])
supernodes = {
"Supernode-0": {"A", "B"},
"Supernode-1": {"C", "D"},
"Supernode-2": {"E", "F"},
"Supernode-3": {"G", "H"},
"Supernode-4": {"I", "J"},
"Supernode-5": {"K", "L"},
}
nx.set_node_attributes(G, supernodes, "group")
return G
| TestSNAPUndirected |
python | tensorflow__tensorflow | tensorflow/python/framework/convert_to_constants.py | {
"start": 22410,
"end": 22761
} | class ____(_FunctionCaller):
"""Specialization of _Node to If-like operations."""
def __init__(self, node, function, enclosing_graph):
super(_If, self).__init__(
node,
function,
enclosing_graph,
first_function_input=1,
type_attribute="Tin",
function_attributes=["then_branch", "else_branch"])
| _If |
python | numba__numba | numba/core/typing/npydecl.py | {
"start": 7336,
"end": 9295
} | class ____(Numpy_rules_ufunc):
_op_map = {
operator.add: "add",
operator.sub: "subtract",
operator.mul: "multiply",
operator.truediv: "true_divide",
operator.floordiv: "floor_divide",
operator.mod: "remainder",
operator.pow: "power",
operator.lshift: "left_shift",
operator.rshift: "right_shift",
operator.and_: "bitwise_and",
operator.or_: "bitwise_or",
operator.xor: "bitwise_xor",
operator.eq: "equal",
operator.gt: "greater",
operator.ge: "greater_equal",
operator.lt: "less",
operator.le: "less_equal",
operator.ne: "not_equal",
}
@property
def ufunc(self):
return getattr(np, self._op_map[self.key])
@classmethod
def install_operations(cls):
for op, ufunc_name in cls._op_map.items():
infer_global(op)(
type("NumpyRulesArrayOperator_" + ufunc_name, (cls,), dict(key=op))
)
def generic(self, args, kws):
'''Overloads and calls base class generic() method, returning
None if a TypingError occurred.
Returning None for operators is important since operators are
heavily overloaded, and by suppressing type errors, we allow
type inference to check other possibilities before giving up
(particularly user-defined operators).
'''
try:
sig = super(NumpyRulesArrayOperator, self).generic(args, kws)
except TypingError:
return None
if sig is None:
return None
args = sig.args
# Only accept at least one array argument, otherwise the operator
# doesn't involve Numpy's ufunc machinery.
if not any(isinstance(arg, types.ArrayCompatible)
for arg in args):
return None
return sig
_binop_map = NumpyRulesArrayOperator._op_map
| NumpyRulesArrayOperator |
python | getsentry__sentry | src/sentry/auth_v2/endpoints/base.py | {
"start": 567,
"end": 644
} | class ____(Endpoint):
permission_classes = (AuthV2Permission,)
| AuthV2Endpoint |
python | catalyst-team__catalyst | catalyst/contrib/layers/common.py | {
"start": 137,
"end": 488
} | class ____(nn.Module):
"""Flattens the input. Does not affect the batch size.
@TODO: Docs (add `Example`). Contribution is welcome.
"""
def __init__(self):
"""@TODO: Docs. Contribution is welcome."""
super().__init__()
def forward(self, x):
"""Forward call."""
return x.view(x.shape[0], -1)
| Flatten |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/postgresql/psycopg.py | {
"start": 8087,
"end": 8135
} | class ____(JSONPathType):
pass
| _PGJSONPathType |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/dataclassTransform1.py | {
"start": 707,
"end": 779
} | class ____:
id: int
name: str
@create_model(frozen=True)
| Customer1 |
python | pytorch__pytorch | torch/_inductor/codegen/memory_planning.py | {
"start": 18593,
"end": 19723
} | class ____(PoolMemoryPlanningLine):
"""Similar to AllocationLine, but takes memory from a pool"""
is_first_pool_usage: bool = False
def codegen(self, code: IndentedBuffer):
allocation = self.group.allocation
assert allocation and allocation.pool
pool = allocation.pool
name = self.node.get_name()
if self.is_first_pool_usage:
pool.codegen_create(self.wrapper, code)
pool.names_to_del.extend(self.group.names)
alloc_from_pool, allocation_lines_to_write = allocation.codegen_alloc_from_pool(
self.wrapper
)
code.writelines(allocation_lines_to_write)
if alloc_from_pool in pool.creation_cache:
code.writeline(
self.wrapper.make_tensor_alias(
name, pool.creation_cache[alloc_from_pool], "alloc"
)
)
else:
pool.creation_cache[alloc_from_pool] = name
code.writeline(
f"{self.wrapper.declare}{name} = {alloc_from_pool}{self.wrapper.ending}"
)
@dataclasses.dataclass
| AllocFromPoolLine |
python | kamyu104__LeetCode-Solutions | Python/divisor-game.py | {
"start": 36,
"end": 650
} | class ____(object):
def divisorGame(self, n):
"""
:type n: int
:rtype: bool
"""
# 1. if we get an even, we can choose x = 1
# to make the opponent always get an odd
# 2. if the opponent gets an odd, he can only choose x = 1 or other odds
# and we can still get an even
# 3. at the end, the opponent can only choose x = 1 and we win
# 4. in summary, we win if only if we get an even and
# keeps even until the opponent loses
return n % 2 == 0
# Time: O(nlogn)
# Space: O(nlogn)
# dp, number theory
| Solution |
python | pyinstaller__pyinstaller | bootloader/waflib/Tools/javaw.py | {
"start": 6583,
"end": 6883
} | class ____(Task.Task):
def split_argfile(self, cmd):
inline = [cmd[0]]
infile = []
for x in cmd[1:]:
if x.startswith('-J'):
inline.append(x)
else:
infile.append(self.quote_flag(x))
return (inline, infile)
| JTask |
python | pydata__xarray | xarray/tests/test_datatree.py | {
"start": 85600,
"end": 85904
} | class ____:
@pytest.mark.xfail(reason="__array_ufunc__ not implemented yet")
def test_tree(self, create_test_datatree):
dt = create_test_datatree()
expected = create_test_datatree(modify=np.sin)
result_tree = np.sin(dt)
assert_equal(result_tree, expected)
| TestUFuncs |
python | getsentry__sentry | src/sentry/workflow_engine/models/data_source_detector.py | {
"start": 187,
"end": 791
} | class ____(DefaultFieldsModel):
"""
Lookup table that maps a DataSource to a Detector. This is used to determine which detectors are available for a given data source.
"""
__relocation_scope__ = RelocationScope.Organization
data_source = FlexibleForeignKey("workflow_engine.DataSource")
detector = FlexibleForeignKey("workflow_engine.Detector")
class Meta:
constraints = [
models.UniqueConstraint(
fields=["data_source", "detector"],
name="workflow_engine_uniq_datasource_detector",
)
]
| DataSourceDetector |
python | optuna__optuna | optuna/terminator/improvement/evaluator.py | {
"start": 3561,
"end": 7976
} | class ____(BaseImprovementEvaluator):
"""An error evaluator for upper bound on the regret with high-probability confidence.
This evaluator evaluates the regret of current best solution, which defined as the difference
between the objective value of the best solution and of the global optimum. To be specific,
this evaluator calculates the upper bound on the regret based on the fact that empirical
estimator of the objective function is bounded by lower and upper confidence bounds with
high probability under the Gaussian process model assumption.
Args:
top_trials_ratio:
A ratio of top trials to be considered when estimating the regret. Default to 0.5.
min_n_trials:
A minimum number of complete trials to estimate the regret. Default to 20.
seed:
Seed for random number generator.
For further information about this evaluator, please refer to the following paper:
- `Automatic Termination for Hyperparameter Optimization <https://proceedings.mlr.press/v188/makarova22a.html>`__
""" # NOQA: E501
def __init__(
self,
top_trials_ratio: float = DEFAULT_TOP_TRIALS_RATIO,
min_n_trials: int = DEFAULT_MIN_N_TRIALS,
seed: int | None = None,
) -> None:
self._top_trials_ratio = top_trials_ratio
self._min_n_trials = min_n_trials
self._log_prior = prior.default_log_prior
self._minimum_noise = prior.DEFAULT_MINIMUM_NOISE_VAR
self._optimize_n_samples = 2048
self._rng = LazyRandomState(seed)
def _get_top_n(
self, normalized_params: np.ndarray, values: np.ndarray
) -> tuple[np.ndarray, np.ndarray]:
assert len(normalized_params) == len(values)
n_trials = len(normalized_params)
top_n = np.clip(int(n_trials * self._top_trials_ratio), self._min_n_trials, n_trials)
top_n_val = np.partition(values, n_trials - top_n)[n_trials - top_n]
top_n_mask = values >= top_n_val
return normalized_params[top_n_mask], values[top_n_mask]
def evaluate(self, trials: list[FrozenTrial], study_direction: StudyDirection) -> float:
optuna_search_space = intersection_search_space(trials)
self._validate_input(trials, optuna_search_space)
complete_trials = [t for t in trials if t.state == TrialState.COMPLETE]
# _gp module assumes that optimization direction is maximization
sign = -1 if study_direction == StudyDirection.MINIMIZE else 1
values = np.array([t.value for t in complete_trials]) * sign
search_space = gp_search_space.SearchSpace(optuna_search_space)
normalized_params = search_space.get_normalized_params(complete_trials)
normalized_top_n_params, top_n_values = self._get_top_n(normalized_params, values)
top_n_values_mean = top_n_values.mean()
top_n_values_std = max(1e-10, top_n_values.std())
standarized_top_n_values = (top_n_values - top_n_values_mean) / top_n_values_std
gpr = gp.fit_kernel_params(
X=normalized_top_n_params,
Y=standarized_top_n_values,
is_categorical=search_space.is_categorical,
log_prior=self._log_prior,
minimum_noise=self._minimum_noise,
# TODO(contramundum53): Add option to specify this.
deterministic_objective=False,
# TODO(y0z): Add `kernel_params_cache` to speedup.
gpr_cache=None,
)
standardized_regret_bound = _compute_standardized_regret_bound(
gpr,
search_space,
normalized_top_n_params,
standarized_top_n_values,
rng=self._rng.rng,
)
return standardized_regret_bound * top_n_values_std # regret bound
@classmethod
def _validate_input(
cls, trials: list[FrozenTrial], search_space: dict[str, BaseDistribution]
) -> None:
if len([t for t in trials if t.state == TrialState.COMPLETE]) == 0:
raise ValueError(
"Because no trial has been completed yet, the regret bound cannot be evaluated."
)
if len(search_space) == 0:
raise ValueError(
"The intersection search space is empty. This condition is not supported by "
f"{cls.__name__}."
)
@experimental_class("3.4.0")
| RegretBoundEvaluator |
python | facebookresearch__faiss | tests/test_factory.py | {
"start": 8028,
"end": 8278
} | class ____(unittest.TestCase):
def test_itq_transform(self):
codec = faiss.index_factory(16, "ITQ8,LSHt")
itqt = faiss.downcast_VectorTransform(codec.chain.at(0))
itqt.pca_then_itq
# tests after re-factoring
| TestVTDowncast |
python | Textualize__textual | docs/examples/guide/widgets/tooltip01.py | {
"start": 214,
"end": 546
} | class ____(App):
CSS = """
Screen {
align: center middle;
}
"""
def compose(self) -> ComposeResult:
yield Button("Click me", variant="success")
def on_mount(self) -> None:
self.query_one(Button).tooltip = TEXT
if __name__ == "__main__":
app = TooltipApp()
app.run()
| TooltipApp |
python | plotly__plotly.py | plotly/graph_objs/layout/annotation/_font.py | {
"start": 235,
"end": 9888
} | class ____(_BaseLayoutHierarchyType):
_parent_path_str = "layout.annotation"
_path_str = "layout.annotation.font"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Font object
Sets the annotation text font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.annotation.Font`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Font
"""
super().__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.layout.annotation.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.annotation.Font`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("family", arg, family)
self._set_property("lineposition", arg, lineposition)
self._set_property("shadow", arg, shadow)
self._set_property("size", arg, size)
self._set_property("style", arg, style)
self._set_property("textcase", arg, textcase)
self._set_property("variant", arg, variant)
self._set_property("weight", arg, weight)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Font |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_comment12.py | {
"start": 315,
"end": 987
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("comment12.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with comments."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.set_row(0, 21)
worksheet.set_column("B:B", 10)
worksheet.write("A1", "Foo")
worksheet.write_comment("A1", "Some text")
worksheet.set_comments_author("John")
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/hg_top_level/package.py | {
"start": 217,
"end": 416
} | class ____(Package):
"""Test package that does fetching with mercurial."""
homepage = "http://www.hg-fetch-example.com"
hg = "https://example.com/some/hg/repo"
version("1.0")
| HgTopLevel |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.