language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | joke2k__faker | faker/providers/automotive/es_CL/__init__.py | {
"start": 96,
"end": 1965
} | class ____(AutomotiveProvider):
"""Implement automotive provider for ``es`` locale.
Sources:
- https://en.wikipedia.org/wiki/Vehicle_registration_plates_of_Chile
"""
license_plate_old_format_first_letters = "ABCDFGHJKLPRSTVWXYZ"
license_plate_old_format_second_letters = "ABCDFGHIJKLPRSTVWXYZ"
license_plate_new_format_letters = "BCDFGHJKLPRSTVWXYZ"
license_formats = OrderedDict(
[
("{{license_plate_new}}", 0.70),
("{{license_plate_old}}", 0.20),
("{{license_plate_police}}", 0.05),
("{{license_plate_temporary}}", 0.04),
("{{license_plate_diplomatic}}", 0.01),
]
)
def license_plate_old(self) -> str:
"""Generate an old format license plate."""
format = "-####"
letters = "".join(
(
self.random_element(self.license_plate_old_format_first_letters),
self.random_element(self.license_plate_old_format_second_letters),
)
)
return self.numerify(letters + format)
def license_plate_new(self) -> str:
format = "????-##"
temp = re.sub(r"\?", lambda x: self.random_element(self.license_plate_new_format_letters), format)
return self.numerify(temp)
def license_plate_police(self) -> str:
formats = ("RP-####", "Z-####")
return self.numerify(self.random_element(formats))
def license_plate_temporary(self) -> str:
format = "PR-###"
return self.numerify(format)
def license_plate_diplomatic(self) -> str:
formats = ("CC-####", "CD-####")
return self.numerify(self.random_element(formats))
def license_plate(self) -> str:
"""Generate a license plate."""
return self.numerify(self.generator.parse(self.random_element(self.license_formats)))
| Provider |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1146139,
"end": 1150504
} | class ____(sgqlc.types.Type, Node):
"""A draft issue within a project."""
__schema__ = github_schema
__field_names__ = (
"assignees",
"body",
"body_html",
"body_text",
"created_at",
"creator",
"project_v2_items",
"projects_v2",
"title",
"updated_at",
)
assignees = sgqlc.types.Field(
sgqlc.types.non_null(UserConnection),
graphql_name="assignees",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
"""A list of users to assigned to this draft issue.
Arguments:
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
"""
body = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="body")
"""The body of the draft issue."""
body_html = sgqlc.types.Field(sgqlc.types.non_null(HTML), graphql_name="bodyHTML")
"""The body of the draft issue rendered to HTML."""
body_text = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="bodyText")
"""The body of the draft issue rendered to text."""
created_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="createdAt")
"""Identifies the date and time when the object was created."""
creator = sgqlc.types.Field(Actor, graphql_name="creator")
"""The actor who created this draft issue."""
project_v2_items = sgqlc.types.Field(
sgqlc.types.non_null(ProjectV2ItemConnection),
graphql_name="projectV2Items",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
"""List of items linked with the draft issue (currently draft issue
can be linked to only one item).
Arguments:
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
"""
projects_v2 = sgqlc.types.Field(
sgqlc.types.non_null(ProjectV2Connection),
graphql_name="projectsV2",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
"""Projects that link to this draft issue (currently draft issue can
be linked to only one project).
Arguments:
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
"""
title = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="title")
"""The title of the draft issue"""
updated_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="updatedAt")
"""Identifies the date and time when the object was last updated."""
| DraftIssue |
python | mlflow__mlflow | mlflow/entities/model_registry/model_version_tag.py | {
"start": 174,
"end": 933
} | class ____(_ModelRegistryEntity):
"""Tag object associated with a model version."""
def __init__(self, key, value):
self._key = key
self._value = value
def __eq__(self, other):
if type(other) is type(self):
return self.__dict__ == other.__dict__
return False
@property
def key(self):
"""String name of the tag."""
return self._key
@property
def value(self):
"""String value of the tag."""
return self._value
@classmethod
def from_proto(cls, proto):
return cls(proto.key, proto.value)
def to_proto(self):
tag = ProtoModelVersionTag()
tag.key = self.key
tag.value = self.value
return tag
| ModelVersionTag |
python | ansible__ansible | lib/ansible/_internal/ansible_collections/ansible/_protomatter/plugins/filter/finalize.py | {
"start": 351,
"end": 472
} | class ____:
@staticmethod
def filters() -> dict[str, t.Callable]:
return dict(finalize=finalize)
| FilterModule |
python | crytic__slither | slither/utils/martin.py | {
"start": 1386,
"end": 1548
} | class ____:
"""Class to hold the information for a section of the report."""
title: str
pretty_table: MyPrettyTable
txt: str
@dataclass
| SectionInfo |
python | numpy__numpy | numpy/polynomial/tests/test_chebyshev.py | {
"start": 1186,
"end": 1496
} | class ____:
def test_chebdomain(self):
assert_equal(cheb.chebdomain, [-1, 1])
def test_chebzero(self):
assert_equal(cheb.chebzero, [0])
def test_chebone(self):
assert_equal(cheb.chebone, [1])
def test_chebx(self):
assert_equal(cheb.chebx, [0, 1])
| TestConstants |
python | doocs__leetcode | solution/1900-1999/1900.The Earliest and Latest Rounds Where Players Compete/Solution.py | {
"start": 741,
"end": 919
} | class ____:
def earliestAndLatest(
self, n: int, firstPlayer: int, secondPlayer: int
) -> List[int]:
return dfs(firstPlayer - 1, secondPlayer - 1, n)
| Solution |
python | miyuchina__mistletoe | test/test_html_renderer.py | {
"start": 6228,
"end": 6901
} | class ____(TestCase):
def setUp(self):
self.renderer = HtmlRenderer()
self.renderer.__enter__()
self.addCleanup(self.renderer.__exit__, None, None, None)
def test_footnote_image(self):
token = Document(['![alt][foo]\n', '\n', '[foo]: bar "title"\n'])
expected = '<p><img src="bar" alt="alt" title="title" /></p>\n'
self.assertEqual(self.renderer.render(token), expected)
def test_footnote_link(self):
token = Document(['[name][foo]\n', '\n', '[foo]: target\n'])
expected = '<p><a href="target">name</a></p>\n'
self.assertEqual(self.renderer.render(token), expected)
| TestHtmlRendererFootnotes |
python | huggingface__transformers | src/transformers/models/mistral3/modeling_mistral3.py | {
"start": 5481,
"end": 7069
} | class ____(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Language modeling loss (for next-token prediction).
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
`past_key_values` input) to speed up sequential decoding.
image_hidden_states (`torch.FloatTensor`, *optional*):
A `torch.FloatTensor` of size `(batch_size, num_images, sequence_length, hidden_size)`.
image_hidden_states of the model produced by the vision encoder and after projecting the last hidden state.
"""
loss: Optional[torch.FloatTensor] = None
logits: Optional[torch.FloatTensor] = None
past_key_values: Optional[Cache] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
image_hidden_states: Optional[torch.FloatTensor] = None
@dataclass
@auto_docstring(
custom_intro="""
Base class for Mistral3 outputs, with hidden states and attentions.
"""
)
| Mistral3CausalLMOutputWithPast |
python | Textualize__textual | tests/snapshot_tests/snapshot_apps/ansi_mapping.py | {
"start": 79,
"end": 704
} | class ____(App[None]):
def compose(self) -> ComposeResult:
ansi_colors = [
"ansi_red",
"ansi_green",
"ansi_yellow",
"ansi_blue",
"ansi_magenta",
"ansi_cyan",
"ansi_white",
"ansi_black",
]
yield Label("Foreground & background")
for color in ansi_colors:
color_name = color.partition("_")[-1]
yield Label(f"[{color}]{color_name}[/]")
yield Label(f"[dim {color}]dim {color_name}[/]")
app = AnsiMappingApp()
if __name__ == "__main__":
app.run()
| AnsiMappingApp |
python | getsentry__sentry | tests/sentry/integrations/api/endpoints/test_organization_integration_serverless_functions.py | {
"start": 8788,
"end": 28491
} | class ____(AbstractServerlessTest):
method = "post"
@responses.activate
@patch.object(AwsLambdaIntegration, "get_serialized_lambda_function")
@patch("sentry.integrations.aws_lambda.integration.gen_aws_client")
def test_enable_node_layer(
self, mock_gen_aws_client: MagicMock, mock_get_serialized_lambda_function: MagicMock
) -> None:
mock_client = mock_gen_aws_client.return_value
get_function_response = {
"Configuration": {
"FunctionName": "lambdaD",
"Runtime": "nodejs10.x",
"FunctionArn": "arn:aws:lambda:us-east-2:599817902985:function:lambdaD",
"Layers": ["arn:aws:lambda:us-east-2:1234:layer:something-else:2"],
},
}
update_function_configuration_kwargs = {
"FunctionName": "lambdaD",
"Layers": [
"arn:aws:lambda:us-east-2:1234:layer:something-else:2",
"arn:aws:lambda:us-east-2:1234:layer:my-layer:3",
],
"Environment": {
"Variables": {
"NODE_OPTIONS": "--import @sentry/aws-serverless/awslambda-auto",
"SENTRY_DSN": self.sentry_dsn,
"SENTRY_TRACES_SAMPLE_RATE": "1.0",
}
},
}
self.set_up_response_mocks(
get_function_response=get_function_response,
update_function_configuration_kwargs=update_function_configuration_kwargs,
)
mock_client.get_function = MagicMock(return_value=get_function_response)
mock_client.update_function_configuration = MagicMock()
return_value = {
"name": "lambdaD",
"runtime": "nodejs10.x",
"version": 3,
"outOfDate": False,
"enabled": True,
}
mock_get_serialized_lambda_function.return_value = return_value
assert self.get_response(action="enable", target="lambdaD").data == return_value
mock_client.get_function.assert_called_with(FunctionName="lambdaD")
mock_client.update_function_configuration.assert_called_with(
FunctionName="lambdaD",
Layers=[
"arn:aws:lambda:us-east-2:1234:layer:something-else:2",
"arn:aws:lambda:us-east-2:1234:layer:my-layer:3",
],
Environment={
"Variables": {
"NODE_OPTIONS": "--import @sentry/aws-serverless/awslambda-auto",
"SENTRY_DSN": self.sentry_dsn,
"SENTRY_TRACES_SAMPLE_RATE": "1.0",
}
},
)
@responses.activate
@patch.object(AwsLambdaIntegration, "get_serialized_lambda_function")
@patch("sentry.integrations.aws_lambda.integration.gen_aws_client")
def test_enable_python_layer(
self, mock_gen_aws_client: MagicMock, mock_get_serialized_lambda_function: MagicMock
) -> None:
mock_client = mock_gen_aws_client.return_value
get_function_response = {
"Configuration": {
"FunctionName": "lambdaE",
"Runtime": "python3.8",
"Handler": "lambda_handler.test_handler",
"FunctionArn": "arn:aws:lambda:us-east-2:599817902985:function:lambdaE",
"Layers": ["arn:aws:lambda:us-east-2:1234:layer:something-else:2"],
},
}
update_function_configuration_kwargs = {
"FunctionName": "lambdaE",
"Layers": [
"arn:aws:lambda:us-east-2:1234:layer:something-else:2",
"arn:aws:lambda:us-east-2:1234:layer:my-python-layer:34",
],
"Environment": {
"Variables": {
"SENTRY_INITIAL_HANDLER": "lambda_handler.test_handler",
"SENTRY_DSN": self.sentry_dsn,
"SENTRY_TRACES_SAMPLE_RATE": "1.0",
}
},
"Handler": "sentry_sdk.integrations.init_serverless_sdk.sentry_lambda_handler",
}
self.set_up_response_mocks(
get_function_response=get_function_response,
update_function_configuration_kwargs=update_function_configuration_kwargs,
)
mock_client.get_function = MagicMock(return_value=get_function_response)
mock_client.update_function_configuration = MagicMock()
return_value = {
"name": "lambdaE",
"runtime": "python3.8",
"version": 3,
"outOfDate": False,
"enabled": True,
}
mock_get_serialized_lambda_function.return_value = return_value
assert self.get_response(action="enable", target="lambdaE").data == return_value
mock_client.get_function.assert_called_with(FunctionName="lambdaE")
mock_client.update_function_configuration.assert_called_with(
FunctionName="lambdaE",
Layers=[
"arn:aws:lambda:us-east-2:1234:layer:something-else:2",
"arn:aws:lambda:us-east-2:1234:layer:my-python-layer:34",
],
Environment={
"Variables": {
"SENTRY_INITIAL_HANDLER": "lambda_handler.test_handler",
"SENTRY_DSN": self.sentry_dsn,
"SENTRY_TRACES_SAMPLE_RATE": "1.0",
}
},
Handler="sentry_sdk.integrations.init_serverless_sdk.sentry_lambda_handler",
)
@responses.activate
@patch.object(AwsLambdaIntegration, "get_serialized_lambda_function")
@patch("sentry.integrations.aws_lambda.integration.gen_aws_client")
def test_disable_node(
self, mock_gen_aws_client: MagicMock, mock_get_serialized_lambda_function: MagicMock
) -> None:
mock_client = mock_gen_aws_client.return_value
get_function_response = {
"Configuration": {
"FunctionName": "lambdaD",
"Runtime": "nodejs10.x",
"FunctionArn": "arn:aws:lambda:us-east-2:599817902985:function:lambdaD",
"Layers": [
{"Arn": "arn:aws:lambda:us-east-2:1234:layer:something-else:2"},
{"Arn": "arn:aws:lambda:us-east-2:1234:layer:my-layer:3"},
],
"Environment": {
"Variables": {
"NODE_OPTIONS": "-r @sentry/serverless/dist/awslambda-auto",
"SENTRY_DSN": self.sentry_dsn,
"SENTRY_TRACES_SAMPLE_RATE": "1.0",
"OTHER": "hi",
}
},
},
}
update_function_configuration_kwargs = {
"Environment": {"Variables": {"OTHER": "hi"}},
"FunctionName": "lambdaD",
"Layers": ["arn:aws:lambda:us-east-2:1234:layer:something-else:2"],
}
self.set_up_response_mocks(
get_function_response=get_function_response,
update_function_configuration_kwargs=update_function_configuration_kwargs,
)
mock_client.get_function = MagicMock(return_value=get_function_response)
mock_client.update_function_configuration = MagicMock()
return_value = {
"name": "lambdaD",
"runtime": "nodejs10.x",
"version": -1,
"outOfDate": False,
"enabled": False,
}
mock_get_serialized_lambda_function.return_value = return_value
assert self.get_response(action="disable", target="lambdaD").data == return_value
mock_client.get_function.assert_called_with(FunctionName="lambdaD")
mock_client.update_function_configuration.assert_called_with(
FunctionName="lambdaD",
Layers=["arn:aws:lambda:us-east-2:1234:layer:something-else:2"],
Environment={"Variables": {"OTHER": "hi"}},
)
@responses.activate
@patch.object(AwsLambdaIntegration, "get_serialized_lambda_function")
@patch("sentry.integrations.aws_lambda.integration.gen_aws_client")
def test_disable_python(
self, mock_gen_aws_client: MagicMock, mock_get_serialized_lambda_function: MagicMock
) -> None:
mock_client = mock_gen_aws_client.return_value
get_function_response = {
"Configuration": {
"FunctionName": "lambdaF",
"Runtime": "python3.6",
"FunctionArn": "arn:aws:lambda:us-east-2:599817902985:function:lambdaF",
"Handler": "sentry_sdk.integrations.init_serverless_sdk.sentry_lambda_handler",
"Layers": [
{"Arn": "arn:aws:lambda:us-east-2:1234:layer:something-else:2"},
{"Arn": "arn:aws:lambda:us-east-2:1234:layer:my-python-layer:34"},
],
"Environment": {
"Variables": {
"SENTRY_INITIAL_HANDLER": "lambda_handler.test_handler",
"SENTRY_DSN": self.sentry_dsn,
"SENTRY_TRACES_SAMPLE_RATE": "1.0",
"OTHER": "hi",
}
},
},
}
update_function_configuration_kwargs = {
"FunctionName": "lambdaF",
"Layers": ["arn:aws:lambda:us-east-2:1234:layer:something-else:2"],
"Environment": {"Variables": {"OTHER": "hi"}},
"Handler": "lambda_handler.test_handler",
}
self.set_up_response_mocks(
get_function_response=get_function_response,
update_function_configuration_kwargs=update_function_configuration_kwargs,
)
mock_client.get_function = MagicMock(return_value=get_function_response)
mock_client.update_function_configuration = MagicMock()
return_value = {
"name": "lambdaF",
"runtime": "python3.6",
"version": -1,
"outOfDate": False,
"enabled": False,
}
mock_get_serialized_lambda_function.return_value = return_value
assert self.get_response(action="disable", target="lambdaF").data == return_value
mock_client.get_function.assert_called_with(FunctionName="lambdaF")
mock_client.update_function_configuration.assert_called_with(
FunctionName="lambdaF",
Layers=["arn:aws:lambda:us-east-2:1234:layer:something-else:2"],
Environment={"Variables": {"OTHER": "hi"}},
Handler="lambda_handler.test_handler",
)
@responses.activate
@patch.object(AwsLambdaIntegration, "get_serialized_lambda_function")
@patch("sentry.integrations.aws_lambda.integration.gen_aws_client")
def test_update_node_version(
self, mock_gen_aws_client: MagicMock, mock_get_serialized_lambda_function: MagicMock
) -> None:
mock_client = mock_gen_aws_client.return_value
get_function_response = {
"Configuration": {
"FunctionName": "lambdaD",
"Runtime": "nodejs10.x",
"FunctionArn": "arn:aws:lambda:us-east-2:599817902985:function:lambdaD",
"Layers": [
{"Arn": "arn:aws:lambda:us-east-2:1234:layer:something-else:2"},
{"Arn": "arn:aws:lambda:us-east-2:1234:layer:my-layer:2"},
],
"Environment": {
"Variables": {
"NODE_OPTIONS": "-r @sentry/serverless/dist/awslambda-auto",
"SENTRY_DSN": self.sentry_dsn,
"SENTRY_TRACES_SAMPLE_RATE": "1.0",
"OTHER": "hi",
}
},
},
}
update_function_configuration_kwargs = {
"FunctionName": "lambdaD",
"Layers": [
"arn:aws:lambda:us-east-2:1234:layer:something-else:2",
"arn:aws:lambda:us-east-2:1234:layer:my-layer:3",
],
}
self.set_up_response_mocks(
get_function_response=get_function_response,
update_function_configuration_kwargs=update_function_configuration_kwargs,
)
mock_client.get_function = MagicMock(return_value=get_function_response)
mock_client.update_function_configuration = MagicMock()
return_value = {
"name": "lambdaD",
"runtime": "nodejs10.x",
"version": 3,
"outOfDate": False,
"enabled": True,
}
mock_get_serialized_lambda_function.return_value = return_value
assert self.get_response(action="updateVersion", target="lambdaD").data == return_value
mock_client.get_function.assert_called_with(FunctionName="lambdaD")
mock_client.update_function_configuration.assert_called_with(
# **update_function_configuration_kwargs
FunctionName="lambdaD",
Layers=[
"arn:aws:lambda:us-east-2:1234:layer:something-else:2",
"arn:aws:lambda:us-east-2:1234:layer:my-layer:3",
],
)
@responses.activate
@patch.object(AwsLambdaIntegration, "get_serialized_lambda_function")
@patch("sentry.integrations.aws_lambda.integration.gen_aws_client")
def test_update_python_version(
self, mock_gen_aws_client: MagicMock, mock_get_serialized_lambda_function: MagicMock
) -> None:
mock_client = mock_gen_aws_client.return_value
get_function_response = {
"Configuration": {
"FunctionName": "lambdaG",
"Runtime": "python3.6",
"Handler": "sentry_sdk.integrations.init_serverless_sdk.sentry_lambda_handler",
"FunctionArn": "arn:aws:lambda:us-east-2:599817902985:function:lambdaG",
"Layers": [
{"Arn": "arn:aws:lambda:us-east-2:1234:layer:something-else:2"},
{"Arn": "arn:aws:lambda:us-east-2:1234:layer:my-python-layer:2"},
],
"Environment": {
"Variables": {
"SENTRY_INITIAL_HANDLER": "lambda_test.lambda_handler",
"SENTRY_DSN": self.sentry_dsn,
"SENTRY_TRACES_SAMPLE_RATE": "1.0",
"OTHER": "hi",
}
},
},
}
update_function_configuration_kwargs = {
"FunctionName": "lambdaG",
"Layers": [
"arn:aws:lambda:us-east-2:1234:layer:something-else:2",
"arn:aws:lambda:us-east-2:1234:layer:my-python-layer:34",
],
}
self.set_up_response_mocks(
get_function_response=get_function_response,
update_function_configuration_kwargs=update_function_configuration_kwargs,
)
mock_client.get_function = MagicMock(return_value=get_function_response)
mock_client.update_function_configuration = MagicMock()
return_value = {
"name": "lambdaG",
"runtime": "python3.8",
"version": 3,
"outOfDate": False,
"enabled": True,
}
mock_get_serialized_lambda_function.return_value = return_value
assert self.get_response(action="updateVersion", target="lambdaG").data == return_value
mock_client.get_function.assert_called_with(FunctionName="lambdaG")
mock_client.update_function_configuration.assert_called_with(
FunctionName="lambdaG",
Layers=[
"arn:aws:lambda:us-east-2:1234:layer:something-else:2",
"arn:aws:lambda:us-east-2:1234:layer:my-python-layer:34",
],
)
@responses.activate
@patch.object(AwsLambdaIntegration, "get_serialized_lambda_function")
@patch("sentry.integrations.aws_lambda.integration.gen_aws_client")
def test_enable_python_layer_on_already_enabled(
self, mock_gen_aws_client, mock_get_serialized_lambda_function
):
"""
Test that ensures that if sentry-sdk is already enabled, then
re-enabling it should not override the env variables since it could be
problematic since the SENTRY_INITIAL_HANDLER env variable could be overridden
the second time with "sentry_sdk.integrations.init_serverless_sdk.
sentry_lambda_handler" and then disabling the sentry-sdk, would break
the function because the Handler will be updated with an incorrect
SENTRY_INITIAL_HANDLER value
"""
mock_client = mock_gen_aws_client.return_value
get_function_response = {
"Configuration": {
"FunctionName": "lambdaZ",
"Runtime": "python3.8",
"Handler": "sentry_sdk.integrations.init_serverless_sdk.sentry_lambda_handler",
"FunctionArn": "arn:aws:lambda:us-east-2:599817902985:function:lambdaZ",
"Layers": [
"arn:aws:lambda:us-east-2:1234:layer:something-else:2",
"arn:aws:lambda:us-east-2:1234:layer:my-python-layer:34",
],
"Environment": {
"Variables": {
"SENTRY_INITIAL_HANDLER": "lambda_handler.test_handler",
"SENTRY_DSN": self.sentry_dsn,
"SENTRY_TRACES_SAMPLE_RATE": "1.0",
}
},
},
}
update_function_configuration_kwargs = {
"FunctionName": "lambdaZ",
"Layers": [
"arn:aws:lambda:us-east-2:1234:layer:something-else:2",
"arn:aws:lambda:us-east-2:1234:layer:my-python-layer:34",
],
"Environment": {
"Variables": {
"SENTRY_INITIAL_HANDLER": "lambda_handler.test_handler",
"SENTRY_DSN": self.sentry_dsn,
"SENTRY_TRACES_SAMPLE_RATE": "1.0",
}
},
"Handler": "sentry_sdk.integrations.init_serverless_sdk.sentry_lambda_handler",
}
self.set_up_response_mocks(
get_function_response=get_function_response,
update_function_configuration_kwargs=update_function_configuration_kwargs,
)
mock_client.get_function = MagicMock(return_value=get_function_response)
mock_client.update_function_configuration = MagicMock()
return_value = {
"name": "lambdaZ",
"runtime": "python3.8",
"version": 3,
"outOfDate": False,
"enabled": True,
}
mock_get_serialized_lambda_function.return_value = return_value
assert self.get_response(action="enable", target="lambdaZ").data == return_value
mock_client.get_function.assert_called_with(FunctionName="lambdaZ")
mock_client.update_function_configuration.assert_called_with(
FunctionName="lambdaZ",
Layers=[
"arn:aws:lambda:us-east-2:1234:layer:something-else:2",
"arn:aws:lambda:us-east-2:1234:layer:my-python-layer:34",
],
Environment={
"Variables": {
"SENTRY_INITIAL_HANDLER": "lambda_handler.test_handler",
"SENTRY_DSN": self.sentry_dsn,
"SENTRY_TRACES_SAMPLE_RATE": "1.0",
}
},
Handler="sentry_sdk.integrations.init_serverless_sdk.sentry_lambda_handler",
)
| OrganizationIntegrationServerlessFunctionsPostTest |
python | tornadoweb__tornado | tornado/test/auth_test.py | {
"start": 20315,
"end": 21545
} | class ____(RequestHandler, GoogleOAuth2Mixin):
def initialize(self, test):
self.test = test
self._OAUTH_REDIRECT_URI = test.get_url("/client/login")
self._OAUTH_AUTHORIZE_URL = test.get_url("/google/oauth2/authorize")
self._OAUTH_ACCESS_TOKEN_URL = test.get_url("/google/oauth2/token")
@gen.coroutine
def get(self):
code = self.get_argument("code", None)
if code is not None:
# retrieve authenticate google user
access = yield self.get_authenticated_user(self._OAUTH_REDIRECT_URI, code)
user = yield self.oauth2_request(
self.test.get_url("/google/oauth2/userinfo"),
access_token=access["access_token"],
)
# return the user and access token as json
user["access_token"] = access["access_token"]
self.write(user)
else:
self.authorize_redirect(
redirect_uri=self._OAUTH_REDIRECT_URI,
client_id=self.settings["google_oauth"]["key"],
scope=["profile", "email"],
response_type="code",
extra_params={"prompt": "select_account"},
)
| GoogleLoginHandler |
python | networkx__networkx | networkx/algorithms/tests/test_cuts.py | {
"start": 3676,
"end": 4077
} | class ____:
"""Unit tests for the :func:`~networkx.edge_expansion` function."""
def test_graph(self):
G = nx.barbell_graph(5, 0)
S = set(range(5))
T = set(G) - S
expansion = nx.edge_expansion(G, S, T)
expected = 1 / 5
assert expected == expansion
# Test with no input T
assert expected == nx.edge_expansion(G, S)
| TestEdgeExpansion |
python | mamba-org__mamba | micromamba/tests/test_pkg_cache.py | {
"start": 2909,
"end": 9810
} | class ____:
def test_extracted_file_deleted(
self, tmp_home, tmp_cache_file_in_test_package, tmp_root_prefix
):
old_ino = tmp_cache_file_in_test_package.stat().st_ino
os.remove(tmp_cache_file_in_test_package)
env_name = "some_env"
helpers.create(package_to_check_requirements(), "-n", env_name, no_dry_run=True)
env_dir = tmp_root_prefix / "envs" / env_name
pkg_checker = helpers.PackageChecker(package_to_check, env_dir)
linked_file = pkg_checker.find_installed(file_to_find_in_package)
assert linked_file.exists()
linked_file_stats = linked_file.stat()
assert tmp_cache_file_in_test_package.stat().st_dev == linked_file_stats.st_dev
assert tmp_cache_file_in_test_package.stat().st_ino == linked_file_stats.st_ino
assert old_ino != linked_file_stats.st_ino
@pytest.mark.parametrize("safety_checks", ["disabled", "warn", "enabled"])
def test_extracted_file_corrupted(
self, tmp_home, tmp_root_prefix, tmp_cache_file_in_test_package, safety_checks
):
old_ino = tmp_cache_file_in_test_package.stat().st_ino
with open(tmp_cache_file_in_test_package, "w") as f:
f.write("//corruption")
env_name = "x1"
helpers.create(
package_to_check_requirements(),
"-n",
env_name,
"--json",
"--safety-checks",
safety_checks,
no_dry_run=True,
)
env_dir = tmp_root_prefix / "envs" / env_name
pkg_checker = helpers.PackageChecker(package_to_check, env_dir)
linked_file = pkg_checker.find_installed(file_to_find_in_package)
assert linked_file.exists()
linked_file_stats = linked_file.stat()
assert tmp_cache_file_in_test_package.stat().st_dev == linked_file_stats.st_dev
assert tmp_cache_file_in_test_package.stat().st_ino == linked_file_stats.st_ino
if safety_checks == "enabled":
assert old_ino != linked_file_stats.st_ino
else:
assert old_ino == linked_file_stats.st_ino
def test_tarball_deleted(
self,
tmp_home,
tmp_root_prefix,
tmp_cache_test_pkg,
tmp_cache_file_in_test_package,
tmp_cache,
):
assert tmp_cache_test_pkg.exists()
os.remove(tmp_cache_test_pkg)
env_name = "x1"
helpers.create(package_to_check_requirements(), "-n", env_name, "--json", no_dry_run=True)
env_dir = tmp_root_prefix / "envs" / env_name
pkg_checker = helpers.PackageChecker(package_to_check, env_dir)
linked_file = pkg_checker.find_installed(file_to_find_in_package)
assert linked_file.exists()
linked_file_stats = linked_file.stat()
assert not tmp_cache_test_pkg.exists()
assert tmp_cache_file_in_test_package.stat().st_dev == linked_file_stats.st_dev
assert tmp_cache_file_in_test_package.stat().st_ino == linked_file_stats.st_ino
def test_tarball_and_extracted_file_deleted(
self, tmp_home, tmp_root_prefix, tmp_cache_test_pkg, tmp_cache_file_in_test_package
):
test_pkg_size = tmp_cache_test_pkg.stat().st_size
old_ino = tmp_cache_file_in_test_package.stat().st_ino
os.remove(tmp_cache_file_in_test_package)
os.remove(tmp_cache_test_pkg)
env_name = "x1"
helpers.create(package_to_check_requirements(), "-n", env_name, "--json", no_dry_run=True)
env_dir = tmp_root_prefix / "envs" / env_name
pkg_checker = helpers.PackageChecker(package_to_check, env_dir)
linked_file = pkg_checker.find_installed(file_to_find_in_package)
assert linked_file.exists()
linked_file_stats = linked_file.stat()
assert tmp_cache_test_pkg.exists()
assert test_pkg_size == tmp_cache_test_pkg.stat().st_size
assert tmp_cache_file_in_test_package.stat().st_dev == linked_file_stats.st_dev
assert tmp_cache_file_in_test_package.stat().st_ino == linked_file_stats.st_ino
assert old_ino != linked_file_stats.st_ino
def test_tarball_corrupted_and_extracted_file_deleted(
self, tmp_home, tmp_root_prefix, tmp_cache_test_pkg, tmp_cache_file_in_test_package
):
test_pkg_size = tmp_cache_test_pkg.stat().st_size
old_ino = tmp_cache_file_in_test_package.stat().st_ino
os.remove(tmp_cache_file_in_test_package)
os.remove(tmp_cache_test_pkg)
with open(tmp_cache_test_pkg, "w") as f:
f.write("")
env_name = "x1"
helpers.create(package_to_check_requirements(), "-n", env_name, "--json", no_dry_run=True)
env_dir = tmp_root_prefix / "envs" / env_name
pkg_checker = helpers.PackageChecker(package_to_check, env_dir)
linked_file = pkg_checker.find_installed(file_to_find_in_package)
assert linked_file.exists()
linked_file_stats = linked_file.stat()
assert tmp_cache_test_pkg.exists()
assert test_pkg_size == tmp_cache_test_pkg.stat().st_size
assert tmp_cache_file_in_test_package.stat().st_dev == linked_file_stats.st_dev
assert tmp_cache_file_in_test_package.stat().st_ino == linked_file_stats.st_ino
assert old_ino != linked_file_stats.st_ino
@pytest.mark.parametrize("safety_checks", ("disabled", "warn", "enabled"))
def test_extracted_file_corrupted_no_perm(
self,
tmp_home,
tmp_root_prefix,
tmp_cache_test_pkg,
tmp_cache_file_in_test_package,
safety_checks,
):
with open(tmp_cache_file_in_test_package, "w") as f:
f.write("//corruption")
helpers.recursive_chmod(tmp_cache_test_pkg, 0o500)
# old_ino = tmp_cache_file_in_test_package.stat().st_ino
env = "x1"
cmd_args = (
package_to_check_requirements(),
"-n",
"--safety-checks",
safety_checks,
env,
"--json",
"-vv",
)
with pytest.raises(subprocess.CalledProcessError):
helpers.create(*cmd_args, no_dry_run=True)
@pytest.fixture
def tmp_cache_alt(tmp_root_prefix: Path, tmp_shared_cache_test_pkg: Path) -> Path:
"""Make an alternative package cache outside the root prefix."""
cache = tmp_root_prefix / "more-pkgs" # Creating under root prefix to leverage eager cleanup
shutil.copytree(tmp_shared_cache_test_pkg, cache, dirs_exist_ok=True)
return cache
def repodata_json(cache: Path) -> set[Path]:
return set((cache / "cache").glob("*.json")) - set((cache / "cache").glob("*.state.json"))
def repodata_solv(cache: Path) -> set[Path]:
return set((cache / "cache").glob("*.solv"))
def same_repodata_json_solv(cache: Path):
return {p.stem for p in repodata_json(cache)} == {p.stem for p in repodata_solv(cache)}
| TestPkgCache |
python | django__django | tests/queries/models.py | {
"start": 17391,
"end": 17538
} | class ____(models.Model):
field_c0 = models.FloatField()
# db_table names have capital letters to ensure they are quoted in queries.
| Ticket23605C |
python | ray-project__ray | python/ray/experimental/tqdm_ray.py | {
"start": 4646,
"end": 6458
} | class ____:
"""Manages a single virtual progress bar on the driver.
The actual position of individual bars is calculated as (pos_offset + position),
where `pos_offset` is the position offset determined by the BarManager.
"""
def __init__(self, state: ProgressBarState, pos_offset: int):
"""Initialize a bar.
Args:
state: The initial progress bar state.
pos_offset: The position offset determined by the BarManager.
"""
self.state = state
self.pos_offset = pos_offset
self.bar = real_tqdm.tqdm(
desc=state["desc"] + " " + str(state["pos"]),
total=state["total"],
unit=state["unit"],
position=pos_offset + state["pos"],
dynamic_ncols=True,
unit_scale=True,
)
if state["x"]:
self.bar.update(state["x"])
def update(self, state: ProgressBarState) -> None:
"""Apply the updated worker progress bar state."""
if state["desc"] != self.state["desc"]:
self.bar.set_description(state["desc"])
if state["total"] != self.state["total"]:
self.bar.total = state["total"]
self.bar.refresh()
delta = state["x"] - self.state["x"]
if delta:
self.bar.update(delta)
self.bar.refresh()
self.state = state
def close(self):
"""The progress bar has been closed."""
self.bar.close()
def update_offset(self, pos_offset: int) -> None:
"""Update the position offset assigned by the BarManager."""
if pos_offset != self.pos_offset:
self.pos_offset = pos_offset
self.bar.clear()
self.bar.pos = -(pos_offset + self.state["pos"])
self.bar.refresh()
| _Bar |
python | numba__numba | numba/tests/test_npdatetime.py | {
"start": 42736,
"end": 43680
} | class ____(TestCase):
def test_isinstance_datetime(self):
@njit
def is_complex(a):
return isinstance(a, complex)
@njit
def is_datetime(a):
return isinstance(a, np.datetime64)
@njit
def is_timedelta(a):
return isinstance(a, np.timedelta64)
dt_a = np.datetime64(1, 'ns')
dt_b = np.datetime64(2, 'ns')
td_c = dt_b - dt_a
def check(jit_func, x):
with self.subTest(f'{jit_func.__name__}({type(x).__name__})'):
got = jit_func(x)
expect = jit_func.py_func(x)
self.assertEqual(got, expect)
fns = [
is_complex,
is_datetime,
is_timedelta,
]
args = [
dt_a,
dt_b,
td_c,
]
for fn, arg in itertools.product(fns, args):
check(fn, arg)
| TestDatetimeTypeOps |
python | hynek__structlog | tests/test_testing.py | {
"start": 5587,
"end": 6584
} | class ____:
def test_factory_caches(self):
"""
CapturingLoggerFactory returns one CapturingLogger over and over again.
"""
clf = CapturingLoggerFactory()
cl1 = clf()
cl2 = clf()
assert cl1 is cl2
def test_repr(self):
"""
repr says how many calls there were.
"""
cl = CapturingLogger()
cl.info("hi")
cl.error("yolo")
assert "<CapturingLogger with 2 call(s)>" == repr(cl)
def test_captures(self):
"""
All calls to all names are captured.
"""
cl = CapturingLogger()
cl.info("hi", val=42)
cl.trololo("yolo", foo={"bar": "baz"})
assert [
CapturedCall(method_name="info", args=("hi",), kwargs={"val": 42}),
CapturedCall(
method_name="trololo",
args=("yolo",),
kwargs={"foo": {"bar": "baz"}},
),
] == cl.calls
| TestCapturingLogger |
python | astropy__astropy | astropy/convolution/utils.py | {
"start": 250,
"end": 338
} | class ____(Exception):
"""
Base error class for kernel errors.
"""
| KernelError |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/array_ops/init_ops_test.py | {
"start": 32225,
"end": 35010
} | class ____(test.TestCase):
@test_util.run_deprecated_v1
def testInitializerIdentical(self):
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.convolutional_delta_orthogonal(seed=1, dtype=dtype)
init2 = init_ops.convolutional_delta_orthogonal(seed=1, dtype=dtype)
self.assertTrue(identicaltest(self, init1, init2, (3, 3, 10, 10)))
@test_util.run_deprecated_v1
def testInitializerDifferent(self):
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.convolutional_delta_orthogonal(seed=1, dtype=dtype)
init2 = init_ops.convolutional_delta_orthogonal(seed=2, dtype=dtype)
self.assertFalse(identicaltest(self, init1, init2, (3, 3, 10, 10)))
@test_util.run_deprecated_v1
def testDuplicatedInitializer(self):
init = init_ops.convolutional_delta_orthogonal()
self.assertFalse(duplicated_initializer(self, init, 1, (3, 3, 10, 10)))
def testInvalidDataType(self):
self.assertRaises(
ValueError,
init_ops.convolutional_delta_orthogonal,
dtype=dtypes.string)
def testInvalidShape(self):
init1 = init_ops.convolutional_delta_orthogonal()
with self.session(graph=ops.Graph(), use_gpu=True):
self.assertRaises(ValueError, init1, shape=[3, 3, 6, 5])
@test_util.run_deprecated_v1
def testGain(self):
shape = (3, 3, 10, 10)
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.convolutional_delta_orthogonal(seed=1, dtype=dtype)
init2 = init_ops.convolutional_delta_orthogonal(
gain=3.14, seed=1, dtype=dtype)
with self.session(graph=ops.Graph(), use_gpu=True):
t1 = init1(shape).eval()
t2 = init2(shape).eval()
self.assertAllClose(t1, t2 / 3.14)
@test_util.run_deprecated_v1
def testNonuniformity(self):
value = 0
abs_value = 0
shape = [3, 3, 10, 10]
count = 70
tol = 1e-5
with self.session():
for i in range(count):
x = variable_scope.get_variable(
"{}".format(i),
shape=shape,
initializer=init_ops.convolutional_delta_orthogonal)
self.evaluate(x.initializer)
y = self.evaluate(x)[1, 1, :, :]
determinant = np.linalg.det(y)
value += determinant
abs_value += np.abs(determinant)
# Check there is some variation in the signs of the determinants
self.assertLess(value, count - tol)
self.assertLess(-count + tol, value)
# Check all determinants have absolute value 1
# Compute the sum of the absolute values of 'count' determinants
self.assertAllClose(abs_value, count, rtol=tol, atol=tol)
@test_util.run_all_without_tensor_float_32(
"Tests convolutional_orthogonal_1d, which calls matmul")
| ConvolutionDeltaOrthogonalInitializerTest |
python | pytorch__pytorch | torch/_inductor/codegen/cuda/cuda_kernel.py | {
"start": 6507,
"end": 20195
} | class ____(CUDAKernel):
"""
Template kernels defined by CUDA / Cutlass in C++.
"""
_EXTRA_CPP_ARGS = "size_t* workspace_size, uint8_t* workspace, cudaStream_t stream"
def __init__(
self,
kernel_name: str,
runtime_arg_info: list["ArgInfo"],
runtime_arg_values: list[Any],
) -> None:
"""
Initializes a new instance of the CUDATemplateKernel class.
Args:
kernel_name (str): The name of the kernel.
"""
super().__init__()
self.kernel_name = kernel_name
self.runtime_arg_info = runtime_arg_info
self.runtime_arg_values = runtime_arg_values
def check_not_null(self, node: IRNode) -> str:
"""
Generates code to check that a node is not null.
"""
if node is None:
return ""
size_str = self.size(node, 0, -1)
name_str = self.arg_name(node)
if name_str is None:
return ""
res = IndentedBuffer(initial_indent=2)
res.tabwidth = 1
res.splice(
f"""
{{
if (!{name_str}) {{
int64_t {name_str}_size = {size_str};
if ({name_str}_size > 0) {{
throw std::runtime_error("input {name_str} is null but size is not 0!");
}}
}}
}}
"""
)
return res.getvalue()
def get_signature(self) -> str:
return self.signature
def def_kernel(
self,
inputs: list[IRNode],
outputs: list[IRNode],
names_str: str = "",
input_reorder: Optional[list[int]] = None,
) -> str:
"""
Hook called from template code to generate function definition and
needed args.
Args:
inputs: List of input IRNodes
outputs: List of output IRNodes
names_str: Comma separated list of input + output argument names.
input_reorder: The actual order of input nodes.
e.g. The template might have input argument defined as [X, W, Bias],
and the actual input passed into this template could be [Bias, X, W].
In this case, the `input_reorder` would be [2, 0, 1].
additional_size_args: Additional size arguments for epilogue inputs
"""
# NB: name order matters here, it's used to match up offsets
names = [x.strip() for x in names_str.strip().split(",")]
if len(inputs) + len(outputs) != len(names):
raise RuntimeError(
f"{len(inputs) + len(outputs)=} != {len(names)=}, {inputs=}, {outputs=}, {names=}"
)
if input_reorder is not None:
assert len(inputs) == len(input_reorder)
else:
input_reorder = list(range(len(inputs)))
for idx in input_reorder:
name = names[idx]
node = inputs[idx]
if node is not None:
self.named_nodes[name] = node
self.args.input_buffers[node.get_name()] = name
free_symbols: OrderedSet[Expr] = OrderedSet()
for name, node in zip(names[len(inputs) : len(inputs) + len(outputs)], outputs):
if node is not None:
# NB: named nodes must be populated in the order of names
self.named_nodes[name] = node
self.args.output_buffers[node.get_name()] = name
if name not in (
"X",
"W",
"Bias",
"Y",
): # we handle these symbolic shapes explicitly
for expr in itertools.chain(node.get_size(), node.get_stride()):
if isinstance(expr, Expr):
for s in expr.free_symbols:
free_symbols.add(s) # type: ignore[arg-type]
arg_defs, *_ = self.args.cpp_argdefs(DTYPE_TO_CUTLASS_TYPE)
self.init_layout_args()
size_vars = ["M", "N", "K", "B", "lda", "ldb", "ldc", "ldd"]
size_vars.extend(str(s) for s in free_symbols)
self.size_args.extend(free_symbols)
size_args = [f"const int {s}" for s in size_vars]
offset_args = [f"const int {name}_offset" for name in self.named_nodes]
runtime_arg_decls = ",".join(
[f"{arg.ty} {arg.name}" for arg in self.runtime_arg_info]
)
if runtime_arg_decls:
runtime_arg_decls += ", "
signature = (
f"int {self.kernel_name}({', '.join(arg_defs + size_args + offset_args)},\
{runtime_arg_decls}{self._EXTRA_CPP_ARGS})"
)
self.signature = signature
return signature
def call_kernel(
self,
name: str,
node: "CUDATemplateBuffer", # type: ignore[name-defined]
) -> None:
"""
Generates code to call the kernel through V.graph.wrapper_code.
used from within torch._inductor.wrapper.PythonWrapperCodegen
name: Name of kernel function.
node: The CUDATemplateBuffer node which contains information about the kernel, it's fused epilogue nodes
as well as all required inputs and outputs.
"""
wrapper = V.graph.wrapper_code
arg_types: list[Any]
if V.graph.cpp_wrapper:
# Make sure we initialize these kernels since they're exported as
# C-style symbol names.
assert isinstance(wrapper, CppWrapperCpu)
wrapper.initialized_kernels[name] = self
# We always originally initialize name with "KERNEL_NAME". So, we
# we replace with the real kernel name passed as an arg to this function.
self.signature = self.signature.replace(str(Placeholder.KERNEL_NAME), name)
_, call_args, arg_types = self.args.cpp_argdefs(DTYPE_TO_CUTLASS_TYPE)
else:
_, call_args, _, arg_types = self.args.python_argdefs()
dynamic_shape_args = self.get_dynamic_shape_args()
offset_args = self.get_offset_args()
call_args.extend(dynamic_shape_args) # type: ignore[arg-type]
call_args.extend(offset_args) # type: ignore[arg-type]
for arg in self.runtime_arg_values:
call_args.append(str(arg))
arg_types.extend("const int" for _ in dynamic_shape_args)
arg_types.extend("const int" for _ in offset_args)
for arg in self.runtime_arg_info:
arg_types.append(arg.ty)
# dynamo wraps unspec variable as 0d CPU tensor, need convert to scalar
for i in range(len(call_args)):
if V.graph.is_unspec_arg(call_args[i]):
call_args[i] = call_args[i] + ".item()"
elif isinstance(arg_types[i], torch_dtype):
call_args[i] = (
call_args[i]
if V.graph.cpp_wrapper
else f"c_void_p({call_args[i]}.data_ptr())"
)
# workspace_size ptr is NULL to mark this call is not intended for retrieving workspace_size.
# workspace_size should have already been retrieved prior to this call.
# workspace_size is here.
call_args.append("nullptr" if V.graph.cpp_wrapper else "None")
if V.graph.cpp_wrapper:
arg_types.append("size_t*")
if node.get_workspace_size() > 0:
ws = WorkspaceArg(
count=node.get_workspace_size(),
device=V.graph.get_current_device_or_throw(),
zero_mode=WorkspaceZeroMode.UNINITIALIZED,
outer_name=WorkspaceArg.unique_name(),
)
wrapper.generate_workspace_allocation(ws)
workspace = str(ws.outer_name)
call_args.append(
workspace
if V.graph.cpp_wrapper
else f"c_void_p({workspace}.data_ptr())"
)
else:
ws = None
call_args.append("nullptr" if V.graph.cpp_wrapper else "None")
if V.graph.cpp_wrapper:
arg_types.append("uint8_t*")
wrapper.generate_kernel_call(
name,
call_args,
triton=False,
arg_types=arg_types,
)
if ws:
wrapper.generate_workspace_deallocation(ws)
def dtype(self, node: IRNode) -> Optional[str]:
"""
Generates code which represents dtype of a given node.
"""
if node is None:
return "void"
return DTYPE_TO_CPP.get(node.get_layout().dtype)
def cutlass_dtype(self, node: IRNode, default_dtype="void") -> Optional[str]:
# Helper method, called into from CUTLASSGemmTemplate
if node is None:
return default_dtype
from torch._inductor.codegen.cuda.cuda_template import CUTLASSTemplate
return CUTLASSTemplate._DTYPE_TO_CUTLASS[node.get_layout().dtype]
def max_valid_index(self, node: IRNode, default=-1):
# Helper method, called into from CUTLASSGemmTemplate
if node is None:
return default
max_valid_offset = 0
for i in range(len(node.get_size())):
max_valid_offset += (node.get_size()[i] - 1) * node.get_stride()[i]
return max_valid_offset
def ptr(self, node: IRNode) -> str:
"""
Generates code which represents pointer of a given node.
"""
if node is None:
return "nullptr"
arg_name = self.arg_name(node)
if arg_name is None:
return "nullptr"
return f"{arg_name} + {arg_name}_offset"
def size(
self,
node: IRNode,
start_index: int,
end_index: Optional[int] = None,
default_value: int = 0,
) -> str:
"""
Hook called from template code to get the size of an arg.
Generates code which represents size of a given node in [start_index, end_index).
If node is None, returns default_value.
TODO: Will add needed args to pass it in if it is dynamic.
"""
if node is None:
return str(default_value)
start_index = _normalize_idx(start_index, len(node.get_size()))
if end_index is None:
end_index = start_index
end_index = _normalize_idx(end_index, len(node.get_size()))
sizes = [
self.find_symbol(node, "size", dim=i) or node.get_size()[i]
for i in range(start_index, end_index + 1)
]
if len(sizes) == 0:
return str(default_value)
sizes = [symbols(v) if isinstance(v, str) else v for v in sizes]
val = sympy_product(sizes)
return val
def stride(self, node: IRNode, index: int, default_value: int = 0) -> str:
"""
Hook called from template code to get the stride of an arg.
Generates code which represents stride of a given node at index.
If node is None, returns default_value.
TODO: Will add needed args to pass it in if it is dynamic.
"""
if node is None:
return str(default_value)
index = _normalize_idx(index, len(node.get_size()))
if index < 0:
return str(default_value)
stride = node.get_stride()[index]
if V.graph.sizevars.statically_known_leq(stride, 1):
return str(stride)
return self.find_symbol(node, "stride", dim=index) or str(stride)
def batch_stride(self, node: IRNode, default_value: int = 0) -> str:
"""
Hook called from template code to get the batch stride of an arg.
Returns 0 if batch dim is not present.
This method assumes that batch stride is the largest stride.
"""
if node is None:
return str(default_value)
if len(node.get_size()) < 3:
return str(default_value)
batch_stride = node.get_stride()[0]
if V.graph.sizevars.statically_known_leq(batch_stride, 1):
return str(batch_stride)
return "{}*{}".format(
self.find_symbol(node, "size", dim=1) or node.get_size()[1],
self.find_symbol(node, "size", dim=2) or node.get_size()[2],
)
def row_or_column_stride(self, node: IRNode, default_value: int = 0) -> str:
"""
Hook called from template code to get the row or column stride of an arg.
This is required by some CUTLASS 2.X APIs.
If the node is in row_major, it returns stride[-2].
If the node is in column_major, it returns stride[-1].
TODO: Will add needed args to pass it in if it is dynamic.
"""
if node is None or len(node.get_stride()) < 2:
return str(default_value)
stride0 = node.get_stride()[-1]
stride1 = node.get_stride()[-2]
if stride0 == 1:
return cexpr(self.rename_indexing(stride1))
elif stride1 == 1:
return cexpr(self.rename_indexing(stride0))
else:
raise RuntimeError(
f"At least 1 stride should be 1. Strides: {node.get_stride()=}"
)
def load(self, name: str, index: Expr, mode: Any = None) -> CSEVariable:
"""
Mock load function for memory planning to optimize allocations properly.
"""
return self.create_cse_var(name, bounds=ValueRanges.unknown())
def store(self, name: str, index: Expr, value: Any, mode: Any = None) -> None:
"""
Mock store function for memory planning to optimize allocations properly.
"""
self.store_buffer_names.add(name)
| CUDATemplateKernel |
python | pypa__virtualenv | src/virtualenv/activation/cshell/__init__.py | {
"start": 106,
"end": 336
} | class ____(ViaTemplateActivator):
@classmethod
def supports(cls, interpreter):
return interpreter.os != "nt"
def templates(self):
yield "activate.csh"
__all__ = [
"CShellActivator",
]
| CShellActivator |
python | matplotlib__matplotlib | lib/matplotlib/testing/compare.py | {
"start": 8399,
"end": 20122
} | class ____(_SVGConverter):
"""
A SVG converter which explicitly adds the fonts shipped by Matplotlib to
Inkspace's font search path, to better support `svg.fonttype = "none"`
(which is in particular used by certain mathtext tests).
"""
def __call__(self, orig, dest):
if not hasattr(self, "_tmpdir"):
self._tmpdir = TemporaryDirectory()
shutil.copytree(cbook._get_data_path("fonts/ttf"),
Path(self._tmpdir.name, "fonts"))
return super().__call__(orig, dest)
def _update_converter():
try:
mpl._get_executable_info("magick")
except mpl.ExecutableNotFoundError:
pass
else:
converter['gif'] = _MagickConverter()
try:
mpl._get_executable_info("gs")
except mpl.ExecutableNotFoundError:
pass
else:
converter['pdf'] = converter['eps'] = _GSConverter()
try:
mpl._get_executable_info("inkscape")
except mpl.ExecutableNotFoundError:
pass
else:
converter['svg'] = _SVGConverter()
#: A dictionary that maps filename extensions to functions which themselves
#: convert between arguments `old` and `new` (filenames).
converter = {}
_update_converter()
_svg_with_matplotlib_fonts_converter = _SVGWithMatplotlibFontsConverter()
def comparable_formats():
"""
Return the list of file formats that `.compare_images` can compare
on this system.
Returns
-------
list of str
E.g. ``['png', 'pdf', 'svg', 'eps']``.
"""
return ['png', *converter]
def convert(filename, cache):
"""
Convert the named file to png; return the name of the created file.
If *cache* is True, the result of the conversion is cached in
`matplotlib.get_cachedir() + '/test_cache/'`. The caching is based on a
hash of the exact contents of the input file. Old cache entries are
automatically deleted as needed to keep the size of the cache capped to
twice the size of all baseline images.
"""
path = Path(filename)
if not path.exists():
raise OSError(f"{path} does not exist")
if path.suffix[1:] not in converter:
import pytest
pytest.skip(f"Don't know how to convert {path.suffix} files to png")
newpath = path.parent / f"{path.stem}_{path.suffix[1:]}.png"
# Only convert the file if the destination doesn't already exist or
# is out of date.
if not newpath.exists() or newpath.stat().st_mtime < path.stat().st_mtime:
cache_dir = _get_cache_path() if cache else None
if cache_dir is not None:
_register_conversion_cache_cleaner_once()
hash_value = get_file_hash(path)
cached_path = cache_dir / (hash_value + newpath.suffix)
if cached_path.exists():
_log.debug("For %s: reusing cached conversion.", filename)
shutil.copyfile(cached_path, newpath)
return str(newpath)
_log.debug("For %s: converting to png.", filename)
convert = converter[path.suffix[1:]]
if path.suffix == ".svg":
contents = path.read_text(encoding="utf-8")
# NOTE: This check should be kept in sync with font styling in
# `lib/matplotlib/backends/backend_svg.py`. If it changes, then be sure to
# re-generate any SVG test files using this mode, or else such tests will
# fail to use the converter for the expected images (but will for the
# results), and the tests will fail strangely.
if re.search(
# searches for attributes :
# style=[font|font-size|font-weight|
# font-family|font-variant|font-style]
# taking care of the possibility of multiple style attributes
# before the font styling (i.e. opacity)
r'style="[^"]*font(|-size|-weight|-family|-variant|-style):',
contents # raw contents of the svg file
):
# for svg.fonttype = none, we explicitly patch the font search
# path so that fonts shipped by Matplotlib are found.
convert = _svg_with_matplotlib_fonts_converter
convert(path, newpath)
if cache_dir is not None:
_log.debug("For %s: caching conversion result.", filename)
shutil.copyfile(newpath, cached_path)
return str(newpath)
def _clean_conversion_cache():
# This will actually ignore mpl_toolkits baseline images, but they're
# relatively small.
baseline_images_size = sum(
path.stat().st_size
for path in Path(mpl.__file__).parent.glob("**/baseline_images/**/*"))
# 2x: one full copy of baselines, and one full copy of test results
# (actually an overestimate: we don't convert png baselines and results).
max_cache_size = 2 * baseline_images_size
# Reduce cache until it fits.
with cbook._lock_path(_get_cache_path()):
cache_stat = {
path: path.stat() for path in _get_cache_path().glob("*")}
cache_size = sum(stat.st_size for stat in cache_stat.values())
paths_by_atime = sorted( # Oldest at the end.
cache_stat, key=lambda path: cache_stat[path].st_atime,
reverse=True)
while cache_size > max_cache_size:
path = paths_by_atime.pop()
cache_size -= cache_stat[path].st_size
path.unlink()
@functools.cache # Ensure this is only registered once.
def _register_conversion_cache_cleaner_once():
atexit.register(_clean_conversion_cache)
def crop_to_same(actual_path, actual_image, expected_path, expected_image):
# clip the images to the same size -- this is useful only when
# comparing eps to pdf
if actual_path[-7:-4] == 'eps' and expected_path[-7:-4] == 'pdf':
aw, ah, ad = actual_image.shape
ew, eh, ed = expected_image.shape
actual_image = actual_image[int(aw / 2 - ew / 2):int(
aw / 2 + ew / 2), int(ah / 2 - eh / 2):int(ah / 2 + eh / 2)]
return actual_image, expected_image
def calculate_rms(expected_image, actual_image):
"""
Calculate the per-pixel errors, then compute the root mean square error.
"""
if expected_image.shape != actual_image.shape:
raise ImageComparisonFailure(
f"Image sizes do not match expected size: {expected_image.shape} "
f"actual size {actual_image.shape}")
# Convert to float to avoid overflowing finite integer types.
return np.sqrt(((expected_image - actual_image).astype(float) ** 2).mean())
# NOTE: compare_image and save_diff_image assume that the image does not have
# 16-bit depth, as Pillow converts these to RGB incorrectly.
def _load_image(path):
img = Image.open(path)
# In an RGBA image, if the smallest value in the alpha channel is 255, all
# values in it must be 255, meaning that the image is opaque. If so,
# discard the alpha channel so that it may compare equal to an RGB image.
if img.mode != "RGBA" or img.getextrema()[3][0] == 255:
img = img.convert("RGB")
return np.asarray(img)
def compare_images(expected, actual, tol, in_decorator=False):
"""
Compare two "image" files checking differences within a tolerance.
The two given filenames may point to files which are convertible to
PNG via the `!converter` dictionary. The underlying RMS is calculated
in a similar way to the `.calculate_rms` function.
Parameters
----------
expected : str
The filename of the expected image.
actual : str
The filename of the actual image.
tol : float
The tolerance (a color value difference, where 255 is the
maximal difference). The test fails if the average pixel
difference is greater than this value.
in_decorator : bool
Determines the output format. If called from image_comparison
decorator, this should be True. (default=False)
Returns
-------
None or dict or str
Return *None* if the images are equal within the given tolerance.
If the images differ, the return value depends on *in_decorator*.
If *in_decorator* is true, a dict with the following entries is
returned:
- *rms*: The RMS of the image difference.
- *expected*: The filename of the expected image.
- *actual*: The filename of the actual image.
- *diff_image*: The filename of the difference image.
- *tol*: The comparison tolerance.
Otherwise, a human-readable multi-line string representation of this
information is returned.
Examples
--------
::
img1 = "./baseline/plot.png"
img2 = "./output/plot.png"
compare_images(img1, img2, 0.001)
"""
actual = os.fspath(actual)
if not os.path.exists(actual):
raise Exception(f"Output image {actual} does not exist.")
if os.stat(actual).st_size == 0:
raise Exception(f"Output image file {actual} is empty.")
# Convert the image to png
expected = os.fspath(expected)
if not os.path.exists(expected):
raise OSError(f'Baseline image {expected!r} does not exist.')
extension = expected.split('.')[-1]
if extension != 'png':
actual = convert(actual, cache=True)
expected = convert(expected, cache=True)
# open the image files
expected_image = _load_image(expected)
actual_image = _load_image(actual)
actual_image, expected_image = crop_to_same(
actual, actual_image, expected, expected_image)
diff_image = make_test_filename(actual, 'failed-diff')
if tol <= 0:
if np.array_equal(expected_image, actual_image):
return None
rms, abs_diff = _image.calculate_rms_and_diff(expected_image, actual_image)
if rms <= tol:
return None
Image.fromarray(abs_diff).save(diff_image, format="png")
results = dict(rms=rms, expected=str(expected),
actual=str(actual), diff=str(diff_image), tol=tol)
if not in_decorator:
# Then the results should be a string suitable for stdout.
template = ['Error: Image files did not match.',
'RMS Value: {rms}',
'Expected: \n {expected}',
'Actual: \n {actual}',
'Difference:\n {diff}',
'Tolerance: \n {tol}', ]
results = '\n '.join([line.format(**results) for line in template])
return results
def save_diff_image(expected, actual, output):
"""
Parameters
----------
expected : str
File path of expected image.
actual : str
File path of actual image.
output : str
File path to save difference image to.
"""
expected_image = _load_image(expected)
actual_image = _load_image(actual)
actual_image, expected_image = crop_to_same(
actual, actual_image, expected, expected_image)
expected_image = np.array(expected_image, float)
actual_image = np.array(actual_image, float)
if expected_image.shape != actual_image.shape:
raise ImageComparisonFailure(
f"Image sizes do not match expected size: {expected_image.shape} "
f"actual size {actual_image.shape}")
abs_diff = np.abs(expected_image - actual_image)
# expand differences in luminance domain
abs_diff *= 10
abs_diff = np.clip(abs_diff, 0, 255).astype(np.uint8)
if abs_diff.shape[2] == 4: # Hard-code the alpha channel to fully solid
abs_diff[:, :, 3] = 255
Image.fromarray(abs_diff).save(output, format="png")
| _SVGWithMatplotlibFontsConverter |
python | anthropics__anthropic-sdk-python | src/anthropic/lib/tools/_beta_functions.py | {
"start": 1427,
"end": 1812
} | class ____(ABC):
@abstractmethod
def to_dict(self) -> BetaToolUnionParam: ...
@abstractmethod
async def call(self, input: object) -> BetaFunctionToolResultType: ...
@property
def name(self) -> str:
raw = self.to_dict()
if "mcp_server_name" in raw:
return raw["mcp_server_name"]
return raw["name"]
| BetaAsyncBuiltinFunctionTool |
python | pypa__pip | tests/unit/test_link.py | {
"start": 160,
"end": 8541
} | class ____:
@pytest.mark.parametrize(
"url, expected",
[
(
"https://user:password@example.com/path/page.html",
"<Link https://user:****@example.com/path/page.html>",
),
],
)
def test_repr(self, url: str, expected: str) -> None:
link = Link(url)
assert repr(link) == expected
@pytest.mark.parametrize(
"url, expected",
[
("http://yo/wheel.whl", "wheel.whl"),
("http://yo/wheel", "wheel"),
("https://example.com/path/page.html", "page.html"),
# Test a quoted character.
("https://example.com/path/page%231.html", "page#1.html"),
(
"http://yo/myproject-1.0%2Bfoobar.0-py2.py3-none-any.whl",
"myproject-1.0+foobar.0-py2.py3-none-any.whl",
),
# Test a path that ends in a slash.
("https://example.com/path/", "path"),
("https://example.com/path//", "path"),
# Test a url with no filename.
("https://example.com/", "example.com"),
# Test a url with no filename and with auth information.
(
"https://user:password@example.com/",
"example.com",
),
],
)
def test_filename(self, url: str, expected: str) -> None:
link = Link(url)
assert link.filename == expected
def test_splitext(self) -> None:
assert ("wheel", ".whl") == Link("http://yo/wheel.whl").splitext()
def test_no_ext(self) -> None:
assert "" == Link("http://yo/wheel").ext
def test_ext(self) -> None:
assert ".whl" == Link("http://yo/wheel.whl").ext
def test_ext_fragment(self) -> None:
assert ".whl" == Link("http://yo/wheel.whl#frag").ext
def test_ext_query(self) -> None:
assert ".whl" == Link("http://yo/wheel.whl?a=b").ext
def test_is_wheel(self) -> None:
assert Link("http://yo/wheel.whl").is_wheel
def test_is_wheel_false(self) -> None:
assert not Link("http://yo/not_a_wheel").is_wheel
def test_fragments(self) -> None:
url = "git+https://example.com/package#egg=eggname"
assert "eggname" == Link(url).egg_fragment
assert None is Link(url).subdirectory_fragment
url = "git+https://example.com/package#egg=eggname&subdirectory=subdir"
assert "eggname" == Link(url).egg_fragment
assert "subdir" == Link(url).subdirectory_fragment
url = "git+https://example.com/package#subdirectory=subdir&egg=eggname"
assert "eggname" == Link(url).egg_fragment
assert "subdir" == Link(url).subdirectory_fragment
# Extras are supported and preserved in the egg fragment,
# even the empty extras specifier.
# This behavior is deprecated and will change in pip 25.
url = "git+https://example.com/package#egg=eggname[extra]"
assert "eggname[extra]" == Link(url).egg_fragment
assert None is Link(url).subdirectory_fragment
url = "git+https://example.com/package#egg=eggname[extra1,extra2]"
assert "eggname[extra1,extra2]" == Link(url).egg_fragment
assert None is Link(url).subdirectory_fragment
url = "git+https://example.com/package#egg=eggname[]"
assert "eggname[]" == Link(url).egg_fragment
assert None is Link(url).subdirectory_fragment
@pytest.mark.xfail(reason="Behavior change scheduled for 25.0", strict=True)
@pytest.mark.parametrize(
"fragment",
[
# Package names in egg fragments must be in PEP 508 form.
"~invalid~package~name~",
# Version specifiers are not valid in egg fragments.
"eggname==1.2.3",
"eggname>=1.2.3",
# The extras specifier must be in PEP 508 form.
"eggname[!]",
],
)
def test_invalid_egg_fragments(self, fragment: str) -> None:
url = f"git+https://example.com/package#egg={fragment}"
with pytest.raises(ValueError):
Link(url)
@pytest.mark.parametrize(
"yanked_reason, expected",
[
(None, False),
("", True),
("there was a mistake", True),
],
)
def test_is_yanked(self, yanked_reason: str | None, expected: bool) -> None:
link = Link(
"https://example.com/wheel.whl",
yanked_reason=yanked_reason,
)
assert link.is_yanked == expected
@pytest.mark.parametrize(
"hash_name, hex_digest, expected",
[
# Test a value that matches but with the wrong hash_name.
("sha384", 128 * "a", False),
# Test matching values, including values other than the first.
("sha512", 128 * "a", True),
("sha512", 128 * "b", True),
# Test a matching hash_name with a value that doesn't match.
("sha512", 128 * "c", False),
# Test a link without a hash value.
("sha512", "", False),
],
)
def test_is_hash_allowed(
self, hash_name: str, hex_digest: str, expected: bool
) -> None:
url = f"https://example.com/wheel.whl#{hash_name}={hex_digest}"
link = Link(url)
hashes_data = {
"sha512": [128 * "a", 128 * "b"],
}
hashes = Hashes(hashes_data)
assert link.is_hash_allowed(hashes) == expected
def test_is_hash_allowed__no_hash(self) -> None:
link = Link("https://example.com/wheel.whl")
hashes_data = {
"sha512": [128 * "a"],
}
hashes = Hashes(hashes_data)
assert not link.is_hash_allowed(hashes)
@pytest.mark.parametrize(
"hashes, expected",
[
(None, False),
# Also test a success case to show the test is correct.
(Hashes({"sha512": [128 * "a"]}), True),
],
)
def test_is_hash_allowed__none_hashes(
self, hashes: Hashes | None, expected: bool
) -> None:
url = "https://example.com/wheel.whl#sha512={}".format(128 * "a")
link = Link(url)
assert link.is_hash_allowed(hashes) == expected
@pytest.mark.parametrize(
"url, expected",
[
("git+https://github.com/org/repo", True),
("bzr+http://bzr.myproject.org/MyProject/trunk/#egg=MyProject", True),
("hg+file://hg.company.com/repo", True),
("https://example.com/some.whl", False),
("file://home/foo/some.whl", False),
],
)
def test_is_vcs(self, url: str, expected: bool) -> None:
link = Link(url)
assert link.is_vcs is expected
@pytest.mark.parametrize(
"url1, url2",
[
pytest.param(
"https://example.com/foo#egg=foo",
"https://example.com/foo",
id="drop-egg",
),
pytest.param(
"https://example.com/foo#subdirectory=bar&egg=foo",
"https://example.com/foo#subdirectory=bar&egg=bar",
id="drop-egg-only",
),
pytest.param(
"https://example.com/foo#subdirectory=bar&egg=foo",
"https://example.com/foo#egg=foo&subdirectory=bar",
id="fragment-ordering",
),
pytest.param(
"https://example.com/foo?a=1&b=2",
"https://example.com/foo?b=2&a=1",
id="query-opordering",
),
],
)
def test_links_equivalent(url1: str, url2: str) -> None:
assert links_equivalent(Link(url1), Link(url2))
@pytest.mark.parametrize(
"url1, url2",
[
pytest.param(
"https://example.com/foo#sha512=1234567890abcdef",
"https://example.com/foo#sha512=abcdef1234567890",
id="different-keys",
),
pytest.param(
"https://example.com/foo#sha512=1234567890abcdef",
"https://example.com/foo#md5=1234567890abcdef",
id="different-values",
),
pytest.param(
"https://example.com/foo#subdirectory=bar&egg=foo",
"https://example.com/foo#subdirectory=rex",
id="drop-egg-still-different",
),
],
)
def test_links_equivalent_false(url1: str, url2: str) -> None:
assert not links_equivalent(Link(url1), Link(url2))
| TestLink |
python | fsspec__filesystem_spec | fsspec/implementations/sftp.py | {
"start": 239,
"end": 5923
} | class ____(AbstractFileSystem):
"""Files over SFTP/SSH
Peer-to-peer filesystem over SSH using paramiko.
Note: if using this with the ``open`` or ``open_files``, with full URLs,
there is no way to tell if a path is relative, so all paths are assumed
to be absolute.
"""
protocol = "sftp", "ssh"
def __init__(self, host, **ssh_kwargs):
"""
Parameters
----------
host: str
Hostname or IP as a string
temppath: str
Location on the server to put files, when within a transaction
ssh_kwargs: dict
Parameters passed on to connection. See details in
https://docs.paramiko.org/en/3.3/api/client.html#paramiko.client.SSHClient.connect
May include port, username, password...
"""
if self._cached:
return
super().__init__(**ssh_kwargs)
self.temppath = ssh_kwargs.pop("temppath", "/tmp") # remote temp directory
self.host = host
self.ssh_kwargs = ssh_kwargs
self._connect()
def _connect(self):
logger.debug("Connecting to SFTP server %s", self.host)
self.client = paramiko.SSHClient()
self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.client.connect(self.host, **self.ssh_kwargs)
self.ftp = self.client.open_sftp()
@classmethod
def _strip_protocol(cls, path):
return infer_storage_options(path)["path"]
@staticmethod
def _get_kwargs_from_urls(urlpath):
out = infer_storage_options(urlpath)
out.pop("path", None)
out.pop("protocol", None)
return out
def mkdir(self, path, create_parents=True, mode=511):
path = self._strip_protocol(path)
logger.debug("Creating folder %s", path)
if self.exists(path):
raise FileExistsError(f"File exists: {path}")
if create_parents:
self.makedirs(path)
else:
self.ftp.mkdir(path, mode)
def makedirs(self, path, exist_ok=False, mode=511):
if self.exists(path) and not exist_ok:
raise FileExistsError(f"File exists: {path}")
parts = path.split("/")
new_path = "/" if path[:1] == "/" else ""
for part in parts:
if part:
new_path = f"{new_path}/{part}" if new_path else part
if not self.exists(new_path):
self.ftp.mkdir(new_path, mode)
def rmdir(self, path):
path = self._strip_protocol(path)
logger.debug("Removing folder %s", path)
self.ftp.rmdir(path)
def info(self, path):
path = self._strip_protocol(path)
stat = self._decode_stat(self.ftp.stat(path))
stat["name"] = path
return stat
@staticmethod
def _decode_stat(stat, parent_path=None):
if S_ISDIR(stat.st_mode):
t = "directory"
elif S_ISLNK(stat.st_mode):
t = "link"
else:
t = "file"
out = {
"name": "",
"size": stat.st_size,
"type": t,
"uid": stat.st_uid,
"gid": stat.st_gid,
"time": datetime.datetime.fromtimestamp(
stat.st_atime, tz=datetime.timezone.utc
),
"mtime": datetime.datetime.fromtimestamp(
stat.st_mtime, tz=datetime.timezone.utc
),
}
if parent_path:
out["name"] = "/".join([parent_path.rstrip("/"), stat.filename])
return out
def ls(self, path, detail=False):
path = self._strip_protocol(path)
logger.debug("Listing folder %s", path)
stats = [self._decode_stat(stat, path) for stat in self.ftp.listdir_iter(path)]
if detail:
return stats
else:
paths = [stat["name"] for stat in stats]
return sorted(paths)
def put(self, lpath, rpath, callback=None, **kwargs):
rpath = self._strip_protocol(rpath)
logger.debug("Put file %s into %s", lpath, rpath)
self.ftp.put(lpath, rpath)
def get_file(self, rpath, lpath, **kwargs):
if self.isdir(rpath):
os.makedirs(lpath, exist_ok=True)
else:
self.ftp.get(self._strip_protocol(rpath), lpath)
def _open(self, path, mode="rb", block_size=None, **kwargs):
"""
block_size: int or None
If 0, no buffering, if 1, line buffering, if >1, buffer that many
bytes, if None use default from paramiko.
"""
logger.debug("Opening file %s", path)
if kwargs.get("autocommit", True) is False:
# writes to temporary file, move on commit
path2 = "/".join([self.temppath, str(uuid.uuid4())])
f = self.ftp.open(path2, mode, bufsize=block_size if block_size else -1)
f.temppath = path2
f.targetpath = path
f.fs = self
f.commit = types.MethodType(commit_a_file, f)
f.discard = types.MethodType(discard_a_file, f)
else:
f = self.ftp.open(path, mode, bufsize=block_size if block_size else -1)
return f
def _rm(self, path):
if self.isdir(path):
self.ftp.rmdir(path)
else:
self.ftp.remove(path)
def mv(self, old, new):
new = self._strip_protocol(new)
old = self._strip_protocol(old)
logger.debug("Renaming %s into %s", old, new)
self.ftp.posix_rename(old, new)
def commit_a_file(self):
self.fs.mv(self.temppath, self.targetpath)
def discard_a_file(self):
self.fs._rm(self.temppath)
| SFTPFileSystem |
python | getsentry__sentry | src/sentry/search/base.py | {
"start": 547,
"end": 1465
} | class ____(Service):
__read_methods__ = ("query",)
__write_methods__ = ()
__all__ = tuple(set(__read_methods__ + __write_methods__))
def __init__(self, **options: Mapping[str, Any] | None):
pass
def query(
self,
projects: Sequence[Project],
environments: Sequence[Environment] | None = None,
sort_by: str = "date",
limit: int = 100,
cursor: Cursor | None = None,
count_hits: bool = False,
paginator_options: Mapping[str, Any] | None = None,
search_filters: Sequence[SearchFilter] | None = None,
date_from: datetime | None = None,
date_to: datetime | None = None,
max_hits: int | None = None,
actor: Any | None = None,
aggregate_kwargs: TrendsSortWeights | None = None,
*,
referrer: str,
) -> CursorResult[Group]:
raise NotImplementedError
| SearchBackend |
python | django__django | tests/check_framework/tests.py | {
"start": 3376,
"end": 6013
} | class ____(SimpleTestCase):
def test_printing(self):
e = Error("Message", hint="Hint", obj=DummyObj())
expected = "obj: Message\n\tHINT: Hint"
self.assertEqual(str(e), expected)
def test_printing_no_hint(self):
e = Error("Message", obj=DummyObj())
expected = "obj: Message"
self.assertEqual(str(e), expected)
def test_printing_no_object(self):
e = Error("Message", hint="Hint")
expected = "?: Message\n\tHINT: Hint"
self.assertEqual(str(e), expected)
def test_printing_with_given_id(self):
e = Error("Message", hint="Hint", obj=DummyObj(), id="ID")
expected = "obj: (ID) Message\n\tHINT: Hint"
self.assertEqual(str(e), expected)
def test_printing_field_error(self):
field = SimpleModel._meta.get_field("field")
e = Error("Error", obj=field)
expected = "check_framework.SimpleModel.field: Error"
self.assertEqual(str(e), expected)
def test_printing_model_error(self):
e = Error("Error", obj=SimpleModel)
expected = "check_framework.SimpleModel: Error"
self.assertEqual(str(e), expected)
def test_printing_manager_error(self):
manager = SimpleModel.manager
e = Error("Error", obj=manager)
expected = "check_framework.SimpleModel.manager: Error"
self.assertEqual(str(e), expected)
def test_equal_to_self(self):
e = Error("Error", obj=SimpleModel)
self.assertEqual(e, e)
def test_equal_to_same_constructed_check(self):
e1 = Error("Error", obj=SimpleModel)
e2 = Error("Error", obj=SimpleModel)
self.assertEqual(e1, e2)
def test_not_equal_to_different_constructed_check(self):
e1 = Error("Error", obj=SimpleModel)
e2 = Error("Error2", obj=SimpleModel)
self.assertNotEqual(e1, e2)
def test_not_equal_to_non_check(self):
e = Error("Error", obj=DummyObj())
self.assertNotEqual(e, "a string")
def test_invalid_level(self):
msg = "The first argument should be level."
with self.assertRaisesMessage(TypeError, msg):
CheckMessage("ERROR", "Message")
def simple_system_check(**kwargs):
simple_system_check.kwargs = kwargs
return []
def tagged_system_check(**kwargs):
tagged_system_check.kwargs = kwargs
return [checks.Warning("System Check")]
tagged_system_check.tags = ["simpletag"]
def deployment_system_check(**kwargs):
deployment_system_check.kwargs = kwargs
return [checks.Warning("Deployment Check")]
deployment_system_check.tags = ["deploymenttag"]
| MessageTests |
python | huggingface__transformers | src/transformers/models/hunyuan_v1_moe/modeling_hunyuan_v1_moe.py | {
"start": 10449,
"end": 11191
} | class ____(nn.Module):
def __init__(self, config: HunYuanMoEV1Config, layer_idx: Optional[int] = None):
super().__init__()
self.config = config
self.layer_idx = layer_idx
num_experts = config.num_experts if isinstance(config.num_experts, int) else config.num_experts[layer_idx]
self.wg = nn.Linear(config.hidden_size, num_experts, bias=False, dtype=torch.float32)
def forward(self, hidden_states):
bsz, seq_len, hidden_size = hidden_states.shape
hidden_states = hidden_states.reshape(-1, hidden_size)
if self.wg.weight.dtype == torch.float32:
hidden_states = hidden_states.float()
logits = self.wg(hidden_states)
return logits
| HunYuanMoEV1Gate |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/nn_ops/rnn_cell_test.py | {
"start": 67833,
"end": 72043
} | class ____(test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
@test_util.run_v1_only("b/124229375")
def testMultiDimensionalLSTMAllRNNContainers(self):
feature_dims = (3, 4, 5)
input_size = feature_dims
batch_size = 2
max_length = 8
sequence_length = [4, 6]
with self.session(graph=ops.Graph()) as sess:
inputs = max_length * [
array_ops.placeholder(dtypes.float32, shape=(None,) + input_size)
]
inputs_using_dim = max_length * [
array_ops.placeholder(
dtypes.float32, shape=(batch_size,) + input_size)
]
inputs_c = array_ops_stack.stack(inputs)
# Create a cell for the whole test. This is fine because the cell has no
# variables.
cell = DummyMultiDimensionalLSTM(feature_dims)
state_saver = TestStateSaver(batch_size, input_size)
outputs_static, state_static = rnn.static_rnn(
cell, inputs, dtype=dtypes.float32, sequence_length=sequence_length)
outputs_dynamic, state_dynamic = rnn.dynamic_rnn(
cell,
inputs_c,
dtype=dtypes.float32,
time_major=True,
sequence_length=sequence_length)
outputs_bid, state_fw, state_bw = rnn.static_bidirectional_rnn(
cell,
cell,
inputs_using_dim,
dtype=dtypes.float32,
sequence_length=sequence_length)
outputs_sav, state_sav = rnn.static_state_saving_rnn(
cell,
inputs_using_dim,
sequence_length=sequence_length,
state_saver=state_saver,
state_name=("h", "c"))
self.assertEqual(outputs_dynamic.get_shape().as_list(),
inputs_c.get_shape().as_list())
for out, inp in zip(outputs_static, inputs):
self.assertEqual(out.get_shape().as_list(), inp.get_shape().as_list())
for out, inp in zip(outputs_bid, inputs_using_dim):
input_shape_list = inp.get_shape().as_list()
# fwd and bwd activations are concatenated along the second dim.
input_shape_list[1] *= 2
self.assertEqual(out.get_shape().as_list(), input_shape_list)
variables_lib.global_variables_initializer().run()
input_total_size = (batch_size,) + input_size
input_value = np.random.randn(*input_total_size)
outputs_static_v = sess.run(
outputs_static, feed_dict={
inputs[0]: input_value
})
outputs_dynamic_v = sess.run(
outputs_dynamic, feed_dict={
inputs[0]: input_value
})
outputs_bid_v = sess.run(
outputs_bid, feed_dict={
inputs_using_dim[0]: input_value
})
outputs_sav_v = sess.run(
outputs_sav, feed_dict={
inputs_using_dim[0]: input_value
})
self.assertAllEqual(outputs_static_v, outputs_dynamic_v)
self.assertAllEqual(outputs_static_v, outputs_sav_v)
outputs_static_array = np.array(outputs_static_v)
outputs_static_array_double = np.concatenate(
(outputs_static_array, outputs_static_array), axis=2)
outputs_bid_array = np.array(outputs_bid_v)
self.assertAllEqual(outputs_static_array_double, outputs_bid_array)
state_static_v = sess.run(
state_static, feed_dict={
inputs[0]: input_value
})
state_dynamic_v = sess.run(
state_dynamic, feed_dict={
inputs[0]: input_value
})
state_bid_fw_v = sess.run(
state_fw, feed_dict={
inputs_using_dim[0]: input_value
})
state_bid_bw_v = sess.run(
state_bw, feed_dict={
inputs_using_dim[0]: input_value
})
state_sav_v = sess.run(
state_sav, feed_dict={
inputs_using_dim[0]: input_value
})
self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_dynamic_v))
self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_sav_v))
self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_bid_fw_v))
self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_bid_bw_v))
| MultiDimensionalLSTMTest |
python | ansible__ansible | test/lib/ansible_test/_util/controller/sanity/validate-modules/validate_modules/main.py | {
"start": 7936,
"end": 8610
} | class ____(metaclass=abc.ABCMeta):
"""Validator instances are intended to be run on a single object. if you
are scanning multiple objects for problems, you'll want to have a separate
Validator for each one."""
def __init__(self, reporter=None):
self.reporter = reporter
@property
@abc.abstractmethod
def object_name(self):
"""Name of the object we validated"""
pass
@property
@abc.abstractmethod
def object_path(self):
"""Path of the object we validated"""
pass
@abc.abstractmethod
def validate(self):
"""Run this method to generate the test results"""
pass
| Validator |
python | vyperlang__vyper | vyper/compiler/output_bundle.py | {
"start": 4812,
"end": 6787
} | class ____:
def __init__(self, compiler_data: CompilerData):
self.compiler_data = compiler_data
@cached_property
def bundle(self):
return OutputBundle(self.compiler_data)
def write_sources(self, sources: dict[str, CompilerInput]):
raise NotImplementedError(f"write_sources: {self.__class__}")
def write_storage_layout_overrides(
self, compilation_target_path: str, storage_layout_override: JSONInput
):
raise NotImplementedError(f"write_storage_layout_overrides: {self.__class__}")
def write_search_paths(self, search_paths: list[str]):
raise NotImplementedError(f"write_search_paths: {self.__class__}")
def write_settings(self, settings: Optional[Settings]):
raise NotImplementedError(f"write_settings: {self.__class__}")
def write_integrity(self, integrity_sum: str):
raise NotImplementedError(f"write_integrity: {self.__class__}")
def write_compilation_target(self, targets: list[str]):
raise NotImplementedError(f"write_compilation_target: {self.__class__}")
def write_compiler_version(self, version: str):
raise NotImplementedError(f"write_compiler_version: {self.__class__}")
def output(self):
raise NotImplementedError(f"output: {self.__class__}")
def write(self):
long_version = get_long_version()
self.write_version(f"v{long_version}")
self.write_compilation_target([self.bundle.compilation_target_path])
self.write_search_paths(self.bundle.used_search_paths)
self.write_settings(self.compiler_data.original_settings)
self.write_integrity(self.compiler_data.integrity_sum)
self.write_sources(self.bundle.source_codes)
if self.compiler_data.storage_layout_override is not None:
self.write_storage_layout_overrides(
self.bundle.compilation_target_path, self.compiler_data.storage_layout_override
)
| OutputBundleWriter |
python | astropy__astropy | astropy/units/core.py | {
"start": 42617,
"end": 50386
} | class ____:
"""
Manages a registry of the enabled units.
"""
def __init__(self, init=[], equivalencies=[], aliases={}):
if isinstance(init, _UnitRegistry):
# If passed another registry we don't need to rebuild everything.
# but because these are mutable types we don't want to create
# conflicts so everything needs to be copied.
self._equivalencies = init._equivalencies.copy()
self._aliases = init._aliases.copy()
self._all_units = init._all_units.copy()
self._registry = init._registry.copy()
self._non_prefix_units = init._non_prefix_units.copy()
# The physical type is a dictionary containing sets as values.
# All of these must be copied otherwise we could alter the old
# registry.
self._by_physical_type = {
k: v.copy() for k, v in init._by_physical_type.items()
}
else:
self._reset_units()
self._reset_equivalencies()
self._reset_aliases()
self.add_enabled_units(init)
self.add_enabled_equivalencies(equivalencies)
self.add_enabled_aliases(aliases)
def _reset_units(self) -> None:
self._all_units = set()
self._non_prefix_units = set()
self._registry = {}
self._by_physical_type = {}
def _reset_equivalencies(self) -> None:
self._equivalencies = set()
def _reset_aliases(self) -> None:
self._aliases = {}
@property
def registry(self) -> dict[str, UnitBase]:
return self._registry
@property
def all_units(self) -> set[UnitBase]:
return self._all_units
@property
def non_prefix_units(self) -> set[UnitBase]:
return self._non_prefix_units
def set_enabled_units(self, units: object) -> None:
"""
Sets the units enabled in the unit registry.
These units are searched when using
`UnitBase.find_equivalent_units`, for example.
Parameters
----------
units : list of sequence, dict, or module
This is a list of things in which units may be found
(sequences, dicts or modules), or units themselves. The
entire set will be "enabled" for searching through by
methods like `UnitBase.find_equivalent_units` and
`UnitBase.compose`.
"""
self._reset_units()
return self.add_enabled_units(units)
def add_enabled_units(self, units: object) -> None:
"""
Adds to the set of units enabled in the unit registry.
These units are searched when using
`UnitBase.find_equivalent_units`, for example.
Parameters
----------
units : list of sequence, dict, or module
This is a list of things in which units may be found
(sequences, dicts or modules), or units themselves. The
entire set will be added to the "enabled" set for
searching through by methods like
`UnitBase.find_equivalent_units` and `UnitBase.compose`.
"""
units = _flatten_units_collection(units)
for unit in units:
# Loop through all of the names first, to ensure all of them
# are new, then add them all as a single "transaction" below.
for st in unit._names:
if st in self._registry and unit != self._registry[st]:
raise ValueError(
f"Object with name {st!r} already exists in namespace. "
"Filter the set of units to avoid name clashes before "
"enabling them."
)
for st in unit._names:
self._registry[st] = unit
self._all_units.add(unit)
if not isinstance(unit, PrefixUnit):
self._non_prefix_units.add(unit)
self._by_physical_type.setdefault(unit._physical_type_id, set()).add(unit)
def get_units_with_physical_type(self, unit: UnitBase) -> set[UnitBase]:
"""
Get all units in the registry with the same physical type as
the given unit.
Parameters
----------
unit : UnitBase instance
"""
return self._by_physical_type.get(unit._physical_type_id, set())
@property
def equivalencies(self):
return list(self._equivalencies)
def set_enabled_equivalencies(self, equivalencies):
"""
Sets the equivalencies enabled in the unit registry.
These equivalencies are used if no explicit equivalencies are given,
both in unit conversion and in finding equivalent units.
This is meant in particular for allowing angles to be dimensionless.
Use with care.
Parameters
----------
equivalencies : list of tuple
List of equivalent pairs, e.g., as returned by
`~astropy.units.dimensionless_angles`.
"""
self._reset_equivalencies()
return self.add_enabled_equivalencies(equivalencies)
def add_enabled_equivalencies(self, equivalencies):
"""
Adds to the set of equivalencies enabled in the unit registry.
These equivalencies are used if no explicit equivalencies are given,
both in unit conversion and in finding equivalent units.
This is meant in particular for allowing angles to be dimensionless.
Use with care.
Parameters
----------
equivalencies : list of tuple
List of equivalent pairs, e.g., as returned by
`~astropy.units.dimensionless_angles`.
"""
# pre-normalize list to help catch mistakes
equivalencies = _normalize_equivalencies(equivalencies)
self._equivalencies |= set(equivalencies)
@property
def aliases(self) -> dict[str, UnitBase]:
return self._aliases
def set_enabled_aliases(self, aliases: dict[str, UnitBase]) -> None:
"""
Set aliases for units.
Parameters
----------
aliases : dict of str, Unit
The aliases to set. The keys must be the string aliases, and values
must be the `astropy.units.Unit` that the alias will be mapped to.
Raises
------
ValueError
If the alias already defines a different unit.
"""
self._reset_aliases()
self.add_enabled_aliases(aliases)
def add_enabled_aliases(self, aliases: dict[str, UnitBase]) -> None:
"""
Add aliases for units.
Parameters
----------
aliases : dict of str, Unit
The aliases to add. The keys must be the string aliases, and values
must be the `astropy.units.Unit` that the alias will be mapped to.
Raises
------
ValueError
If the alias already defines a different unit.
"""
for alias, unit in aliases.items():
if alias in self._registry and unit != self._registry[alias]:
raise ValueError(
f"{alias} already means {self._registry[alias]}, so "
f"cannot be used as an alias for {unit}."
)
if alias in self._aliases and unit != self._aliases[alias]:
raise ValueError(
f"{alias} already is an alias for {self._aliases[alias]}, so "
f"cannot be used as an alias for {unit}."
)
for alias, unit in aliases.items():
if alias not in self._registry and alias not in self._aliases:
self._aliases[alias] = unit
| _UnitRegistry |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/class_interval.py | {
"start": 312,
"end": 492
} | class ____:
def m1(self, x):
self.m2(x)
def m2(self, x):
# TODO(T114456058): Unexpected position -1 in the sinks of
# override models
pass
| A0 |
python | tensorflow__tensorflow | tensorflow/python/autograph/pyct/cfg.py | {
"start": 3420,
"end": 5315
} | class ____(
collections.namedtuple(
'Graph',
['entry', 'exit', 'error', 'index', 'stmt_prev', 'stmt_next'])):
"""A Control Flow Graph.
The CFG maintains an index to allow looking up a CFG node by the AST node to
which it is associated. The index can also be enumerated in top-down, depth
first order.
Walking the graph in forward or reverse order is supported by double
parent-child links.
Note: the error nodes are not wired to their corresponding finally guards,
because these are shared, and wiring them would create a reverse path from
normal control flow into the error nodes, which we want to avoid.
The graph also maintains edges corresponding to higher level statements
like for-else loops. A node is considered successor of a statement if there
is an edge from a node that is lexically a child of that statement to a node
that is not. Statement predecessors are analogously defined.
Attributes:
entry: Node, the entry node
exit: FrozenSet[Node, ...], the exit nodes
error: FrozenSet[Node, ...], nodes that exit due to an explicitly raised
error (errors propagated from function calls are not accounted)
index: Dict[ast.Node, Node], mapping AST nodes to the respective CFG node
stmt_prev: Dict[ast.Node, FrozenSet[Node, ...]], mapping statement AST nodes
to their predecessor CFG nodes
stmt_next: Dict[ast.Node, FrozenSet[Node, ...]], mapping statement AST nodes
to their successor CFG nodes
"""
def __repr__(self):
return self.as_dot()
def as_dot(self):
"""Print CFG in DOT format."""
result = 'digraph CFG {\n'
for node in self.index.values():
result += ' %s [label="%s"];\n' % (id(node), node)
for node in self.index.values():
for next_ in node.next:
result += ' %s -> %s;\n' % (id(node), id(next_))
result += '}'
return result
| Graph |
python | openai__openai-python | src/openai/_base_client.py | {
"start": 64830,
"end": 66840
} | class ____:
def __init__(self, name: str) -> None:
self.name = name
@override
def __str__(self) -> str:
return f"Other:{self.name}"
Platform = Union[
OtherPlatform,
Literal[
"MacOS",
"Linux",
"Windows",
"FreeBSD",
"OpenBSD",
"iOS",
"Android",
"Unknown",
],
]
def get_platform() -> Platform:
try:
system = platform.system().lower()
platform_name = platform.platform().lower()
except Exception:
return "Unknown"
if "iphone" in platform_name or "ipad" in platform_name:
# Tested using Python3IDE on an iPhone 11 and Pythonista on an iPad 7
# system is Darwin and platform_name is a string like:
# - Darwin-21.6.0-iPhone12,1-64bit
# - Darwin-21.6.0-iPad7,11-64bit
return "iOS"
if system == "darwin":
return "MacOS"
if system == "windows":
return "Windows"
if "android" in platform_name:
# Tested using Pydroid 3
# system is Linux and platform_name is a string like 'Linux-5.10.81-android12-9-00001-geba40aecb3b7-ab8534902-aarch64-with-libc'
return "Android"
if system == "linux":
# https://distro.readthedocs.io/en/latest/#distro.id
distro_id = distro.id()
if distro_id == "freebsd":
return "FreeBSD"
if distro_id == "openbsd":
return "OpenBSD"
return "Linux"
if platform_name:
return OtherPlatform(platform_name)
return "Unknown"
@lru_cache(maxsize=None)
def platform_headers(version: str, *, platform: Platform | None) -> Dict[str, str]:
return {
"X-Stainless-Lang": "python",
"X-Stainless-Package-Version": version,
"X-Stainless-OS": str(platform or get_platform()),
"X-Stainless-Arch": str(get_architecture()),
"X-Stainless-Runtime": get_python_runtime(),
"X-Stainless-Runtime-Version": get_python_version(),
}
| OtherPlatform |
python | numba__numba | numba/tests/test_dictimpl.py | {
"start": 5639,
"end": 6042
} | class ____(types.Type):
"""this is essentially UniTuple(unicode_type, n)
BUT type name is the same for all n"""
def __init__(self, value):
super(ParametrizedType, self).__init__('ParametrizedType')
self.dtype = types.unicode_type
self.n = len(value)
@property
def key(self):
return self.n
def __len__(self):
return self.n
| ParametrizedType |
python | ZoranPandovski__al-go-rithms | dp/Shortest common Supersequence/shortest_common_supersequence.py | {
"start": 32,
"end": 1384
} | class ____:
def shortestCommonSupersequence(self, str1: str, str2: str) -> str:
m = len(str1)
n = len(str2)
t = [[-1]*(n+1) for i in range(m+1)]
for i in range(m+1):
for j in range(n+1):
if(i==0 or j==0):
t[i][j] = 0
for i in range(1, m+1):
for j in range(1, n+1):
if str1[i-1] == str2[j-1]:
t[i][j] = 1 + t[i-1][j-1]
else:
t[i][j] = max(t[i-1][j], t[i][j-1])
o = m
p = n
s = ''
while o > 0 and p > 0:
if str1[o-1] == str2[p-1]:
s = s + str1[o-1]
o -= 1
p -= 1
else:
if t[o-1][p] > t[o][p-1]:
o -= 1
else:
p -= 1
s = s[::-1]
res, i, j = "", 0, 0
for a in s:
while str1[i] != a:
res += str1[i]
i+=1
while str2[j] != a:
res += str2[j]
j +=1
res += a
i+=1
j+=1
return res + str1[i:] + str2[j:] | Solution |
python | matplotlib__matplotlib | lib/matplotlib/tests/test_units.py | {
"start": 10483,
"end": 11657
} | class ____:
def __init__(self, array):
self._array = np.asanyarray(array)
def __array__(self, dtype=None, copy=None):
if dtype is not None and dtype != self._array.dtype:
if copy is not None and not copy:
raise ValueError(
f"Converting array from {self._array.dtype} to "
f"{dtype} requires a copy"
)
arr = np.asarray(self._array, dtype=dtype)
return (arr if not copy else np.copy(arr))
@property
def shape(self):
return self._array.shape
def test_plot_kernel():
# just a smoketest that fail
kernel = Kernel([1, 2, 3, 4, 5])
plt.plot(kernel)
def test_connection_patch_units(pd):
# tests that this doesn't raise an error
fig, (ax1, ax2) = plt.subplots(nrows=2, figsize=(10, 5))
x = pd.Timestamp('2017-01-01T12')
ax1.axvline(x)
y = "test test"
ax2.axhline(y)
arr = mpatches.ConnectionPatch((x, 0), (0, y),
coordsA='data', coordsB='data',
axesA=ax1, axesB=ax2)
fig.add_artist(arr)
fig.draw_without_rendering()
| Kernel |
python | ipython__ipython | tests/test_zzz_autoreload.py | {
"start": 25862,
"end": 39729
} | class ____: # old-style class
def foo(self):
return 2
""",
)
def check_module_contents():
self.assertEqual(mod.x, 10)
self.assertFalse(hasattr(mod, "z"))
self.assertEqual(old_foo(0), 4) # superreload magic!
self.assertEqual(mod.foo(0), 4)
obj = mod.Baz(9)
self.assertEqual(old_obj.bar(1), 11) # superreload magic!
self.assertEqual(obj.bar(1), 11)
self.assertEqual(old_obj.quux, 43)
self.assertEqual(obj.quux, 43)
self.assertFalse(hasattr(old_obj, "zzz"))
self.assertFalse(hasattr(obj, "zzz"))
obj2 = mod.Bar()
self.assertEqual(old_obj2.foo(), 2)
self.assertEqual(obj2.foo(), 2)
self.shell.run_code("pass") # trigger reload
check_module_contents()
#
# Another failure case: deleted file (shouldn't reload)
#
os.unlink(mod_fn)
self.shell.run_code("pass") # trigger reload
check_module_contents()
#
# Disable autoreload and rewrite module: no reload should occur
#
if use_aimport:
self.shell.magic_aimport("-" + mod_name)
stream = StringIO()
self.shell.magic_aimport("", stream=stream)
self.assertTrue(("Modules to skip:\n%s" % mod_name) in stream.getvalue())
# This should succeed, although no such module exists
self.shell.magic_aimport("-tmpmod_as318989e89ds")
else:
self.shell.magic_autoreload("0")
self.write_file(
mod_fn,
"""
x = -99
""",
)
self.shell.run_code("pass") # trigger reload
self.shell.run_code("pass")
check_module_contents()
#
# Re-enable autoreload: reload should now occur
#
if use_aimport:
self.shell.magic_aimport(mod_name)
else:
self.shell.magic_autoreload("")
self.shell.run_code("pass") # trigger reload
self.assertEqual(mod.x, -99)
def test_smoketest_aimport(self):
self._check_smoketest(use_aimport=True)
def test_smoketest_autoreload(self):
self._check_smoketest(use_aimport=False)
def test_autoreload_with_user_defined_In_variable(self):
"""
Check that autoreload works when the user has defined an In variable.
"""
mod_name, mod_fn = self.new_module(
textwrap.dedent(
"""
def hello():
return "Hello"
"""
)
)
self.shell.magic_autoreload("2")
self.shell.run_code(f"import {mod_name}")
self.shell.run_code(f"res = {mod_name}.hello()")
assert self.shell.user_ns["res"] == "Hello"
self.shell.user_ns["In"] = "some_value"
self.write_file(
mod_fn,
textwrap.dedent(
"""
def hello():
return "Changed"
"""
),
)
self.shell.run_code(f"res = {mod_name}.hello()")
assert self.shell.user_ns["res"] == "Changed"
def test_import_from_tracker_conflict_resolution(self):
"""Test that ImportFromTracker properly handles import conflicts"""
from IPython.extensions.autoreload import ImportFromTracker
from unittest.mock import Mock
# Create a test module with both 'foo' and 'bar' attributes
mod_name, mod_fn = self.new_module(
textwrap.dedent(
"""
foo = "original_foo"
bar = "original_bar"
"""
)
)
# Mock the module in sys.modules instead of actually importing it
mock_module = Mock()
mock_module.foo = "original_foo"
mock_module.bar = "original_bar"
sys.modules[mod_name] = mock_module
try:
# Create a tracker
tracker = ImportFromTracker({}, {})
# Test case 1: "from x import y as z" then "from x import z"
# First import: from mod_name import foo as bar
tracker.add_import(mod_name, "foo", "bar")
# Verify initial state
assert mod_name in tracker.imports_froms
assert "foo" in tracker.imports_froms[mod_name]
assert tracker.symbol_map[mod_name]["foo"] == ["bar"]
# Second import: from mod_name import bar (conflicts with previous "bar")
tracker.add_import(mod_name, "bar", "bar")
# The second import should take precedence since "bar" is a valid import
assert "bar" in tracker.imports_froms[mod_name]
assert "foo" not in tracker.imports_froms[mod_name] # Should be removed
assert tracker.symbol_map[mod_name]["bar"] == ["bar"]
assert "foo" not in tracker.symbol_map[mod_name] # Should be removed
finally:
# Clean up sys.modules
if mod_name in sys.modules:
del sys.modules[mod_name]
def test_import_from_tracker_reverse_conflict(self):
"""Test the reverse case: 'from x import z' then 'from x import y as z'"""
from IPython.extensions.autoreload import ImportFromTracker
from unittest.mock import Mock
# Create a test module
mod_name, mod_fn = self.new_module(
textwrap.dedent(
"""
foo = "original_foo"
bar = "original_bar"
"""
)
)
# Mock the module in sys.modules instead of actually importing it
mock_module = Mock()
mock_module.foo = "original_foo"
mock_module.bar = "original_bar"
sys.modules[mod_name] = mock_module
try:
# Create a tracker
tracker = ImportFromTracker({}, {})
# First import: from mod_name import bar
tracker.add_import(mod_name, "bar", "bar")
# Verify initial state
assert "bar" in tracker.imports_froms[mod_name]
assert tracker.symbol_map[mod_name]["bar"] == ["bar"]
# Second import: from mod_name import foo as bar (conflicts with previous "bar")
tracker.add_import(mod_name, "foo", "bar")
# The second import should take precedence since "foo" is a valid import
assert "foo" in tracker.imports_froms[mod_name]
assert "bar" not in tracker.imports_froms[mod_name] # Should be removed
assert tracker.symbol_map[mod_name]["foo"] == ["bar"]
assert "bar" not in tracker.symbol_map[mod_name] # Should be removed
finally:
# Clean up sys.modules
if mod_name in sys.modules:
del sys.modules[mod_name]
def test_import_from_tracker_invalid_import(self):
"""Test that ImportFromTracker works correctly with the post-execution approach"""
from IPython.extensions.autoreload import ImportFromTracker
# Create a test module with only 'foo' attribute
mod_name, mod_fn = self.new_module(
textwrap.dedent(
"""
foo = "original_foo"
"""
)
)
# Create a tracker
tracker = ImportFromTracker({}, {})
# First import: from mod_name import foo as bar
# Since we're simulating post-execution, this is a valid import
tracker.add_import(mod_name, "foo", "bar")
# Verify initial state
assert "foo" in tracker.imports_froms[mod_name]
assert tracker.symbol_map[mod_name]["foo"] == ["bar"]
# Second import: from mod_name import foo2 as bar (conflicting import)
# In the new approach, this would only be called if the import actually succeeded
# So this represents a case where the module was updated to have foo2
tracker.add_import(mod_name, "foo2", "bar")
# The new mapping should replace the old one since it's more recent
assert "foo2" in tracker.imports_froms[mod_name]
assert "foo" not in tracker.imports_froms[mod_name] # Should be replaced
assert tracker.symbol_map[mod_name]["foo2"] == ["bar"]
assert "foo" not in tracker.symbol_map[mod_name] # Should be replaced
def test_import_from_tracker_integration(self):
"""Test the integration of ImportFromTracker with autoreload"""
# Create a test module
mod_name, mod_fn = self.new_module(
textwrap.dedent(
"""
foo = "original_foo"
bar = "original_bar"
"""
)
)
# Enable autoreload mode 3 (complete)
self.shell.magic_autoreload("3")
# First import: from mod_name import foo as bar
# This will naturally load the module into sys.modules
self.shell.run_code(f"from {mod_name} import foo as bar")
assert self.shell.user_ns["bar"] == "original_foo"
# Second import: from mod_name import bar (should override the alias)
# The module is already in sys.modules, so this should work with our validation
self.shell.run_code(f"from {mod_name} import bar")
assert self.shell.user_ns["bar"] == "original_bar" # Should now be the real bar
# Modify the module
self.write_file(
mod_fn,
textwrap.dedent(
"""
foo = "modified_foo"
bar = "modified_bar"
"""
),
)
# Trigger autoreload by running any code
self.shell.run_code("x = 1")
# The 'bar' variable should now contain the modified 'bar', not 'foo'
assert self.shell.user_ns["bar"] == "modified_bar"
def test_autoreload3_double_import(self):
"""Test the integration of ImportFromTracker with autoreload"""
# Create a test module
mod_name, mod_fn = self.new_module(
textwrap.dedent(
"""
foo = "original_foo"
bar = "original_bar"
"""
)
)
# Enable autoreload mode 3 (complete)
self.shell.magic_autoreload("3")
# First import: from mod_name import foo as bar
# This will naturally load the module into sys.modules
self.shell.run_code(f"from {mod_name} import foo as bar")
self.shell.run_code(f"from {mod_name} import foo")
assert self.shell.user_ns["bar"] == "original_foo"
assert self.shell.user_ns["foo"] == "original_foo"
# Modify the module
self.write_file(
mod_fn,
textwrap.dedent(
"""
foo = "modified_foo"
bar = "modified_bar"
"""
),
)
self.shell.run_code("pass")
assert self.shell.user_ns["bar"] == "modified_foo"
assert self.shell.user_ns["foo"] == "modified_foo"
def test_import_from_tracker_unloaded_module(self):
"""Test that ImportFromTracker works with the post-execution approach"""
from IPython.extensions.autoreload import ImportFromTracker
# With the new approach, we only track imports after successful execution
# So even if a module isn't initially in sys.modules, if an import executed
# successfully, we should track it
fake_mod_name = "test_module_12345"
# Create a tracker
tracker = ImportFromTracker({}, {})
# Simulate an import that executed successfully
# (In reality, this would only be called if the import actually succeeded)
tracker.add_import(fake_mod_name, "some_attr", "some_name")
# Since the import "succeeded", it should be tracked
assert fake_mod_name in tracker.imports_froms
assert fake_mod_name in tracker.symbol_map
assert "some_attr" in tracker.imports_froms[fake_mod_name]
assert tracker.symbol_map[fake_mod_name]["some_attr"] == ["some_name"]
# Simulate a conflict scenario - another import succeeded
tracker.add_import(fake_mod_name, "another_attr", "some_name")
# The newer import should replace the older one
assert fake_mod_name in tracker.imports_froms
assert fake_mod_name in tracker.symbol_map
assert "another_attr" in tracker.imports_froms[fake_mod_name]
assert (
"some_attr" not in tracker.imports_froms[fake_mod_name]
) # Should be replaced
assert tracker.symbol_map[fake_mod_name]["another_attr"] == ["some_name"]
assert (
"some_attr" not in tracker.symbol_map[fake_mod_name]
) # Should be replaced
def test_import_from_tracker_multiple_resolved_names(self):
"""Test that the same original name can map to multiple resolved names"""
from IPython.extensions.autoreload import ImportFromTracker
# Create a tracker
tracker = ImportFromTracker({}, {})
fake_mod_name = "test_module_abc"
# Simulate: from test_module_abc import foo as bar
tracker.add_import(fake_mod_name, "foo", "bar")
# Verify initial state
assert "foo" in tracker.imports_froms[fake_mod_name]
assert tracker.symbol_map[fake_mod_name]["foo"] == ["bar"]
# Simulate: from test_module_abc import foo (same original name, different resolved name)
tracker.add_import(fake_mod_name, "foo", "foo")
# Both resolved names should be tracked for the same original name
assert "foo" in tracker.imports_froms[fake_mod_name]
assert set(tracker.symbol_map[fake_mod_name]["foo"]) == {"bar", "foo"}
| Bar |
python | joke2k__faker | faker/providers/lorem/hy_AM/__init__.py | {
"start": 68,
"end": 3810
} | class ____(LoremProvider):
"""Implement lorem provider for ``hy_AM`` locale.
Sources:
- https://www.101languages.net/armenian/armenian-word-list
"""
word_list = (
"ես",
"դու",
"նա",
"մենք",
"դուք",
"նրանք",
"այս",
"այն",
"այստեղ",
"այնտեղ",
"ով",
"ինչ",
"որտեղ",
"ուր",
"երբ",
"ինչպես",
"ոչ",
"բոլոր",
"շատ",
"որոշ",
"քիչ",
"այլ",
"ուրիշ",
"մեկ",
"երկու",
"երեք",
"չորս",
"հինգ",
"մեծ",
"երկար",
"լայն",
"հաստ",
"ծանր",
"փոքր",
"կարճ",
"նեղ",
"բարակ",
"կին",
"տղամարդ",
"մարդ",
"երեխա",
"կին",
"ամուսին",
"մայր",
"հայր",
"կենդանի",
"ձուկ",
"թռչուն",
"շուն",
"ոջիլ",
"օձ",
"ճիճու",
"ծառ",
"անտառ",
"փայտ",
"պտուղ",
"սերմ",
"տերև",
"արմատ",
"կեղև",
"ծաղիկ",
"խոտ",
"պարան",
"մաշկ",
"կաշի",
"միս",
"արյուն",
"ոսկոր",
"ճարպ",
"ձու",
"եղջյուր",
"պոզ",
"պոչ",
"փետուր",
"մազ",
"գլուխ",
"ականջ",
"աչք",
"քիթ",
"բերան",
"ատամ",
"լեզու",
"եղունգ",
"ոտք",
"ծունկ",
"ձեռք",
"թև",
"փոր",
"փորոտիք",
"աղիք",
"վիզ",
"մեջք",
"կուրծք",
"սիրտ",
"լյարդ",
"խմել",
"ուտել",
"կծել",
"ծծել",
"թքել",
"ործկալ",
"փչել",
"շնչել",
"ծիծաղել",
"տեսնել",
"լսել",
"իմանալ",
"գիտենալ",
"մտածել",
"զգալ",
"վախենալ",
"քնել",
"ապրել",
"մեռնել",
"սպանել",
"կռվել",
"որսալ",
"խփել",
"հարվածել",
"կտրել",
"բաժանել",
"խոցել",
"քերծել",
"քորել",
"փորել",
"լողալ",
"թռչել",
"քայլել",
"գալ",
"պառկել",
"նստել",
"կանգնել",
"շրջվել",
"ընկնել",
"տալ",
"պահել",
"բռնել",
"սեղմել",
"շփել",
"լվալ",
"սրբել",
"ձգել",
"քաշել",
"հրել",
"նետել",
"կապել",
"կարել",
"հաշվել",
"ասել",
"երգել",
"խաղալ",
"լողալ",
"հոսել",
"սառչել",
"ուռել",
"արև",
"լուսին",
"աստղ",
"ջուր",
"անձրև",
"գետ",
"լիճ",
"ծով",
"աղ",
"քար",
"ավազ",
"փոշի",
"հող",
"ամպ",
"մառախուղ",
"մշուշ",
"երկինք",
"քամի",
"ձյուն",
"սառույց",
"ծուխ",
"հուր",
"կրակ",
"մոխիր",
"վառվել",
"այրվել",
"ճամփա",
"ճանապարհ",
"լեռ",
"սար",
"կարմիր",
"կանաչ",
"դեղին",
"սպիտակ",
"սև",
"գիշեր",
"օր",
"տարի",
"տաք",
"ցուրտ",
"լիքը",
"նոր",
"հին",
"լավ",
"վատ",
"փտած",
"կեղտոտ",
"ուղիղ",
"կլոր",
"սուր",
"բութ",
"հարթ",
"թաց",
"չոր",
"ճիշտ",
"մոտ",
"հեռու",
"աջ",
)
parts_of_speech: Dict[str, tuple] = {}
| Provider |
python | pypa__warehouse | tests/unit/test_views.py | {
"start": 28411,
"end": 28713
} | class ____:
def test_valid(self):
with pytest.raises(HTTPBadRequest):
force_status(pretend.stub(matchdict={"status": "400"}))
def test_invalid(self):
with pytest.raises(HTTPNotFound):
force_status(pretend.stub(matchdict={"status": "599"}))
| TestForceStatus |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_format25.py | {
"start": 315,
"end": 978
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("format25.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with automatic color."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
format1 = workbook.add_format(
{
"border_color": "#FF9966",
"border": 1,
}
)
worksheet.write(2, 2, "", format1)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | django__django | tests/bulk_create/models.py | {
"start": 1727,
"end": 1819
} | class ____(models.Model):
id = models.SmallAutoField(primary_key=True)
| SmallAutoFieldModel |
python | kamyu104__LeetCode-Solutions | Python/check-if-n-and-its-double-exist.py | {
"start": 29,
"end": 360
} | class ____(object):
def checkIfExist(self, arr):
"""
:type arr: List[int]
:rtype: bool
"""
lookup = set()
for x in arr:
if 2*x in lookup or \
(x%2 == 0 and x//2 in lookup):
return True
lookup.add(x)
return False
| Solution |
python | eventlet__eventlet | tests/dagpool_test.py | {
"start": 4028,
"end": 19429
} | class ____:
"""
This class is intended to capture a sequence (of string messages) to
verify that all expected events occurred, and in the expected order. The
tricky part is that certain subsequences can occur in arbitrary order and
still be correct.
Specifically, when posting a particular value to a DAGPool instance
unblocks several waiting greenthreads, it is indeterminate which
greenthread will first receive the new value.
Similarly, when several values for which a particular greenthread is
waiting become available at (effectively) the same time, it is
indeterminate in which order they will be delivered.
This is addressed by building a list of sets. Each set contains messages
that can occur in indeterminate order, therefore comparing that set to any
other ordering of the same messages should succeed. However, it's
important that each set of messages that occur 'at the same time' should
itself be properly sequenced with respect to all other such sets.
"""
def __init__(self):
self.sequence = [set()]
def add(self, message):
self.sequence[-1].add(message)
def step(self):
self.sequence.append(set())
def validate(self, sequence):
# Let caller pass any sequence of grouped items. For comparison
# purposes, turn them into the specific form we store: a list of sets.
setlist = []
for subseq in sequence:
if isinstance(subseq, str):
# If this item is a plain string (which Python regards as an
# iterable of characters) rather than a list or tuple or set
# of strings, treat it as atomic. Make a set containing only
# that string.
setlist.append({subseq})
else:
try:
iter(subseq)
except TypeError:
# subseq is a scalar of some other kind. Make a set
# containing only that item.
setlist.append({subseq})
else:
# subseq is, as we expect, an iterable -- possibly already
# a set. Make a set containing its elements.
setlist.append(set(subseq))
# Now that we've massaged 'sequence' into 'setlist', compare.
assert_equal(self.sequence, setlist)
# ****************************************************************************
# Canonical DAGPool greenthread function
# ****************************************************************************
def observe(key, results, capture, event):
for k, v in results:
capture.add("{} got {}".format(key, k))
result = event.wait()
capture.add("{} returning {}".format(key, result))
return result
# ****************************************************************************
# DAGPool test functions
# ****************************************************************************
def test_init():
with suspend_checker():
# no preload data, just so we know it doesn't blow up
pool = DAGPool()
# preload dict
pool = DAGPool(dict(a=1, b=2, c=3))
# this must not hang
with check_no_suspend():
results = pool.waitall()
# with no spawn() or post(), waitall() returns preload data
assert_equal(results, dict(a=1, b=2, c=3))
# preload sequence of pairs
pool = DAGPool([("d", 4), ("e", 5), ("f", 6)])
# this must not hang
with check_no_suspend():
results = pool.waitall()
assert_equal(results, dict(d=4, e=5, f=6))
def test_wait_each_empty():
pool = DAGPool()
with suspend_checker():
with check_no_suspend():
for k, v in pool.wait_each(()):
# shouldn't yield anything
raise AssertionError("empty wait_each() returned ({}, {})".format(k, v))
def test_wait_each_preload():
pool = DAGPool(dict(a=1, b=2, c=3))
with suspend_checker():
with check_no_suspend():
# wait_each() may deliver in arbitrary order; collect into a dict
# for comparison
assert_equal(dict(pool.wait_each("abc")), dict(a=1, b=2, c=3))
# while we're at it, test wait() for preloaded keys
assert_equal(pool.wait("bc"), dict(b=2, c=3))
def post_each(pool, capture):
# distinguish the results wait_each() can retrieve immediately from those
# it must wait for us to post()
eventlet.sleep(0)
capture.step()
pool.post('g', 'gval')
pool.post('f', 'fval')
eventlet.sleep(0)
capture.step()
pool.post('e', 'eval')
pool.post('d', 'dval')
def test_wait_each_posted():
capture = Capture()
pool = DAGPool(dict(a=1, b=2, c=3))
eventlet.spawn(post_each, pool, capture)
# use a string as a convenient iterable of single-letter keys
for k, v in pool.wait_each("bcdefg"):
capture.add("got ({}, {})".format(k, v))
capture.validate([
["got (b, 2)", "got (c, 3)"],
["got (f, fval)", "got (g, gval)"],
["got (d, dval)", "got (e, eval)"],
])
def test_wait_posted():
# same as test_wait_each_posted(), but calling wait()
capture = Capture()
pool = DAGPool(dict(a=1, b=2, c=3))
eventlet.spawn(post_each, pool, capture)
gotten = pool.wait("bcdefg")
capture.add("got all")
assert_equal(gotten,
dict(b=2, c=3,
d="dval", e="eval",
f="fval", g="gval"))
capture.validate([
[],
[],
["got all"],
])
def test_spawn_collision_preload():
pool = DAGPool([("a", 1)])
with assert_raises(Collision):
pool.spawn("a", (), lambda key, results: None)
def test_spawn_collision_post():
pool = DAGPool()
pool.post("a", "aval")
with assert_raises(Collision):
pool.spawn("a", (), lambda key, results: None)
def test_spawn_collision_spawn():
pool = DAGPool()
pool.spawn("a", (), lambda key, results: "aval")
# hasn't yet even started
assert_equal(pool.get("a"), None)
with assert_raises(Collision):
# Attempting to spawn again with same key should collide even if the
# first spawned greenthread hasn't yet had a chance to run.
pool.spawn("a", (), lambda key, results: "bad")
# now let the spawned eventlet run
eventlet.sleep(0)
# should have finished
assert_equal(pool.get("a"), "aval")
with assert_raises(Collision):
# Attempting to spawn with same key collides even when the greenthread
# has completed.
pool.spawn("a", (), lambda key, results: "badagain")
def spin():
# Let all pending greenthreads run until they're blocked
for x in range(10):
eventlet.sleep(0)
def test_spawn_multiple():
capture = Capture()
pool = DAGPool(dict(a=1, b=2, c=3))
events = {}
for k in "defg":
events[k] = eventlet.event.Event()
pool.spawn(k, (), observe, capture, events[k])
# Now for a greenthread that depends on ALL the above.
events["h"] = eventlet.event.Event()
# trigger the last event right away: we only care about dependencies
events["h"].send("hval")
pool.spawn("h", "bcdefg", observe, capture, events["h"])
# let all the spawned greenthreads get as far as they can
spin()
capture.step()
# but none of them has yet produced a result
for k in "defgh":
assert_equal(pool.get(k), None)
assert_equal(set(pool.keys()), set("abc"))
assert_equal(dict(pool.items()), dict(a=1, b=2, c=3))
assert_equal(pool.running(), 5)
assert_equal(set(pool.running_keys()), set("defgh"))
assert_equal(pool.waiting(), 1)
assert_equal(pool.waiting_for(), dict(h=set("defg")))
assert_equal(pool.waiting_for("d"), set())
assert_equal(pool.waiting_for("c"), set())
with assert_raises(KeyError):
pool.waiting_for("j")
assert_equal(pool.waiting_for("h"), set("defg"))
# let one of the upstream greenthreads complete
events["f"].send("fval")
spin()
capture.step()
assert_equal(pool.get("f"), "fval")
assert_equal(set(pool.keys()), set("abcf"))
assert_equal(dict(pool.items()), dict(a=1, b=2, c=3, f="fval"))
assert_equal(pool.running(), 4)
assert_equal(set(pool.running_keys()), set("degh"))
assert_equal(pool.waiting(), 1)
assert_equal(pool.waiting_for("h"), set("deg"))
# now two others
events["e"].send("eval")
events["g"].send("gval")
spin()
capture.step()
assert_equal(pool.get("e"), "eval")
assert_equal(pool.get("g"), "gval")
assert_equal(set(pool.keys()), set("abcefg"))
assert_equal(dict(pool.items()),
dict(a=1, b=2, c=3, e="eval", f="fval", g="gval"))
assert_equal(pool.running(), 2)
assert_equal(set(pool.running_keys()), set("dh"))
assert_equal(pool.waiting(), 1)
assert_equal(pool.waiting_for("h"), set("d"))
# last one
events["d"].send("dval")
# make sure both pool greenthreads get a chance to run
spin()
capture.step()
assert_equal(pool.get("d"), "dval")
assert_equal(set(pool.keys()), set("abcdefgh"))
assert_equal(dict(pool.items()),
dict(a=1, b=2, c=3,
d="dval", e="eval", f="fval", g="gval", h="hval"))
assert_equal(pool.running(), 0)
assert not pool.running_keys()
assert_equal(pool.waiting(), 0)
assert_equal(pool.waiting_for("h"), set())
capture.validate([
["h got b", "h got c"],
["f returning fval", "h got f"],
["e returning eval", "g returning gval",
"h got e", "h got g"],
["d returning dval", "h got d", "h returning hval"],
[],
])
def spawn_many_func(key, results, capture, pool):
for k, v in results:
# with a capture.step() at each post(), too complicated to predict
# which results will be delivered when
pass
capture.add("{} done".format(key))
# use post(key) instead of waiting for implicit post() of return value
pool.post(key, key)
capture.step()
spin()
def waitall_done(capture, pool):
pool.waitall()
capture.add("waitall() done")
def test_spawn_many():
# This dependencies dict sets up a graph like this:
# a
# / \
# b c
# \ /|
# d |
# \|
# e
deps = dict(e="cd",
d="bc",
c="a",
b="a",
a="")
capture = Capture()
pool = DAGPool()
# spawn a waitall() waiter externally to our DAGPool, but capture its
# message in same Capture instance
eventlet.spawn(waitall_done, capture, pool)
pool.spawn_many(deps, spawn_many_func, capture, pool)
# This set of greenthreads should in fact run to completion once spawned.
spin()
# verify that e completed (also that post(key) within greenthread
# overrides implicit post of return value, which would be None)
assert_equal(pool.get("e"), "e")
# With the dependency graph shown above, it is not guaranteed whether b or
# c will complete first. Handle either case.
sequence = capture.sequence[:]
sequence[1:3] = [{sequence[1].pop(), sequence[2].pop()}]
assert_equal(sequence,
[{"a done"},
{"b done", "c done"},
{"d done"},
{"e done"},
{"waitall() done"},
])
# deliberately distinguish this from dagpool._MISSING
_notthere = object()
def test_wait_each_all():
# set up a simple linear dependency chain
deps = dict(b="a", c="b", d="c", e="d")
capture = Capture()
pool = DAGPool([("a", "a")])
# capture a different Event for each key
events = {key: eventlet.event.Event() for key in deps.keys()}
# can't use spawn_many() because we need a different event for each
for key, dep in deps.items():
pool.spawn(key, dep, observe, capture, events[key])
keys = "abcde" # this specific order
each = iter(pool.wait_each())
for pos in range(len(keys)):
# next value from wait_each()
k, v = next(each)
assert_equal(k, keys[pos])
# advance every pool greenlet as far as it can go
spin()
# everything from keys[:pos+1] should have a value by now
for k in keys[:pos + 1]:
assert pool.get(k, _notthere) is not _notthere, \
"greenlet {} did not yet produce a value".format(k)
# everything from keys[pos+1:] should not yet
for k in keys[pos + 1:]:
assert pool.get(k, _notthere) is _notthere, \
"wait_each() delayed value for {}".format(keys[pos])
# let next greenthread complete
if pos < len(keys) - 1:
k = keys[pos + 1]
events[k].send(k)
def test_kill():
pool = DAGPool()
# nonexistent key raises KeyError
with assert_raises(KeyError):
pool.kill("a")
# spawn a greenthread
pool.spawn("a", (), lambda key, result: 1)
# kill it before it can even run
pool.kill("a")
# didn't run
spin()
assert_equal(pool.get("a"), None)
# killing it forgets about it
with assert_raises(KeyError):
pool.kill("a")
# so that we can try again
pool.spawn("a", (), lambda key, result: 2)
spin()
# this time it ran to completion, so can no longer be killed
with assert_raises(KeyError):
pool.kill("a")
# verify it ran to completion
assert_equal(pool.get("a"), 2)
def test_post_collision_preload():
pool = DAGPool(dict(a=1))
with assert_raises(Collision):
pool.post("a", 2)
def test_post_collision_post():
pool = DAGPool()
pool.post("a", 1)
with assert_raises(Collision):
pool.post("a", 2)
def test_post_collision_spawn():
pool = DAGPool()
pool.spawn("a", (), lambda key, result: 1)
# hasn't yet run
with assert_raises(Collision):
# n.b. This exercises the code that tests whether post(key) is or is
# not coming from that key's greenthread.
pool.post("a", 2)
# kill it
pool.kill("a")
# now we can post
pool.post("a", 3)
assert_equal(pool.get("a"), 3)
pool = DAGPool()
pool.spawn("a", (), lambda key, result: 4)
# run it
spin()
with assert_raises(Collision):
pool.post("a", 5)
# can't kill it now either
with assert_raises(KeyError):
pool.kill("a")
# still can't post
with assert_raises(Collision):
pool.post("a", 6)
def test_post_replace():
pool = DAGPool()
pool.post("a", 1)
pool.post("a", 2, replace=True)
assert_equal(pool.get("a"), 2)
assert_equal(dict(pool.wait_each("a")), dict(a=2))
assert_equal(pool.wait("a"), dict(a=2))
assert_equal(pool["a"], 2)
def waitfor(capture, pool, key):
value = pool[key]
capture.add("got {}".format(value))
def test_getitem():
capture = Capture()
pool = DAGPool()
eventlet.spawn(waitfor, capture, pool, "a")
# pool["a"] just waiting
capture.validate([[]])
pool.spawn("a", (), lambda key, results: 1)
# still waiting: hasn't yet run
capture.validate([[]])
# run it
spin()
capture.validate([["got 1"]])
| Capture |
python | falconry__falcon | tests/test_request_media.py | {
"start": 6341,
"end": 7847
} | class ____(media.BaseHandler):
def serialize(self, *args, **kwargs):
pass
def deserialize(self, *args, **kwargs):
pass
exhaust_stream = True
def test_complete_consumption(asgi):
client = create_client(asgi, {'nope/nope': NopeHandler()})
body = b'{"something": "abracadabra"}'
headers = {'Content-Type': 'nope/nope'}
assert client.simulate_post('/', body=body, headers=headers).status_code == 200
req_media = client.resource.captured_req_media
assert req_media is None
req_bounded_stream = client.resource.captured_req.bounded_stream
assert req_bounded_stream.eof
@pytest.mark.parametrize('payload', [False, 0, 0.0, '', [], {}])
def test_empty_json_media(asgi, payload):
resource = ResourceCachedMediaAsync() if asgi else ResourceCachedMedia()
client = create_client(asgi, resource=resource)
assert client.simulate_post('/', json=payload).status_code == 200
assert resource.captured_req_media == payload
def test_null_json_media(client):
assert (
client.simulate_post(
'/', body='null', headers={'Content-Type': 'application/json'}
).status_code
== 200
)
assert client.resource.captured_req_media is None
def _create_client_invalid_media(asgi, error_type, handlers=None):
resource_type = ResourceInvalidMediaAsync if asgi else ResourceInvalidMedia
resource = resource_type(error_type)
return create_client(asgi, handlers=handlers, resource=resource)
| NopeHandler |
python | numpy__numpy | numpy/f2py/tests/test_array_from_pyobj.py | {
"start": 6994,
"end": 11086
} | class ____:
def __repr__(self):
return (f'Array({self.type}, {self.dims}, {self.intent},'
f' {self.obj})|arr={self.arr}')
def __init__(self, typ, dims, intent, obj):
self.type = typ
self.dims = dims
self.intent = intent
self.obj_copy = copy.deepcopy(obj)
self.obj = obj
# arr.dtypechar may be different from typ.dtypechar
self.arr = wrap.call(typ.type_num,
typ.elsize,
dims, intent.flags, obj)
assert isinstance(self.arr, np.ndarray)
self.arr_attr = wrap.array_attrs(self.arr)
if len(dims) > 1:
if self.intent.is_intent("c"):
assert (intent.flags & wrap.F2PY_INTENT_C)
assert not self.arr.flags["FORTRAN"]
assert self.arr.flags["CONTIGUOUS"]
assert (not self.arr_attr[6] & wrap.FORTRAN)
else:
assert (not intent.flags & wrap.F2PY_INTENT_C)
assert self.arr.flags["FORTRAN"]
assert not self.arr.flags["CONTIGUOUS"]
assert (self.arr_attr[6] & wrap.FORTRAN)
if obj is None:
self.pyarr = None
self.pyarr_attr = None
return
if intent.is_intent("cache"):
assert isinstance(obj, np.ndarray), repr(type(obj))
self.pyarr = np.array(obj).reshape(*dims).copy()
else:
self.pyarr = np.array(
np.array(obj, dtype=typ.dtypechar).reshape(*dims),
order=(self.intent.is_intent("c") and "C") or "F",
)
assert self.pyarr.dtype == typ
self.pyarr.setflags(write=self.arr.flags["WRITEABLE"])
assert self.pyarr.flags["OWNDATA"], (obj, intent)
self.pyarr_attr = wrap.array_attrs(self.pyarr)
if len(dims) > 1:
if self.intent.is_intent("c"):
assert not self.pyarr.flags["FORTRAN"]
assert self.pyarr.flags["CONTIGUOUS"]
assert (not self.pyarr_attr[6] & wrap.FORTRAN)
else:
assert self.pyarr.flags["FORTRAN"]
assert not self.pyarr.flags["CONTIGUOUS"]
assert (self.pyarr_attr[6] & wrap.FORTRAN)
assert self.arr_attr[1] == self.pyarr_attr[1] # nd
assert self.arr_attr[2] == self.pyarr_attr[2] # dimensions
if self.arr_attr[1] <= 1:
assert self.arr_attr[3] == self.pyarr_attr[3], repr((
self.arr_attr[3],
self.pyarr_attr[3],
self.arr.tobytes(),
self.pyarr.tobytes(),
)) # strides
assert self.arr_attr[5][-2:] == self.pyarr_attr[5][-2:], repr((
self.arr_attr[5], self.pyarr_attr[5]
)) # descr
assert self.arr_attr[6] == self.pyarr_attr[6], repr((
self.arr_attr[6],
self.pyarr_attr[6],
flags2names(0 * self.arr_attr[6] - self.pyarr_attr[6]),
flags2names(self.arr_attr[6]),
intent,
)) # flags
if intent.is_intent("cache"):
assert self.arr_attr[5][3] >= self.type.elsize
else:
assert self.arr_attr[5][3] == self.type.elsize
assert (self.arr_equal(self.pyarr, self.arr))
if isinstance(self.obj, np.ndarray):
if typ.elsize == Type(obj.dtype).elsize:
if not intent.is_intent("copy") and self.arr_attr[1] <= 1:
assert self.has_shared_memory()
def arr_equal(self, arr1, arr2):
if arr1.shape != arr2.shape:
return False
return (arr1 == arr2).all()
def __str__(self):
return str(self.arr)
def has_shared_memory(self):
"""Check that created array shares data with input array."""
if self.obj is self.arr:
return True
if not isinstance(self.obj, np.ndarray):
return False
obj_attr = wrap.array_attrs(self.obj)
return obj_attr[0] == self.arr_attr[0]
| Array |
python | apache__airflow | airflow-core/tests/unit/api_fastapi/core_api/routes/public/test_dags.py | {
"start": 43197,
"end": 45897
} | class ____(TestDagEndpoint):
"""Unit tests for Get DAG."""
@pytest.mark.parametrize(
("query_params", "dag_id", "expected_status_code", "dag_display_name", "expected_tags"),
[
({}, "fake_dag_id", 404, "fake_dag", []),
({}, DAG2_ID, 200, DAG2_ID, []),
],
)
def test_get_dag(
self, test_client, query_params, dag_id, expected_status_code, dag_display_name, expected_tags
):
response = test_client.get(f"/dags/{dag_id}", params=query_params)
assert response.status_code == expected_status_code
if expected_status_code != 200:
return
# Match expected and actual responses below.
res_json = response.json()
last_parsed_time = res_json["last_parsed_time"]
last_parse_duration = res_json["last_parse_duration"]
file_token = res_json["file_token"]
tags = res_json.get("tags", [])
assert len(tags) == len(expected_tags)
for tag in tags:
assert tag["name"] in expected_tags
assert tag["dag_id"] == dag_id
assert tag["dag_display_name"] == dag_display_name
expected = {
"dag_id": dag_id,
"dag_display_name": dag_display_name,
"description": None,
"fileloc": __file__,
"file_token": file_token,
"is_paused": False,
"is_stale": False,
"owners": ["airflow"],
"timetable_summary": None,
"tags": tags,
"has_task_concurrency_limits": True,
"next_dagrun_data_interval_start": None,
"next_dagrun_data_interval_end": None,
"next_dagrun_logical_date": None,
"next_dagrun_run_after": None,
"max_active_runs": 16,
"max_consecutive_failed_dag_runs": 0,
"last_expired": None,
"max_active_tasks": 16,
"last_parsed_time": last_parsed_time,
"last_parse_duration": last_parse_duration,
"timetable_description": "Never, external triggers only",
"has_import_errors": False,
"bundle_name": "dag_maker",
"bundle_version": None,
"relative_fileloc": "test_dags.py",
}
assert res_json == expected
def test_get_dag_should_response_401(self, unauthenticated_test_client):
response = unauthenticated_test_client.get(f"/dags/{DAG1_ID}")
assert response.status_code == 401
def test_get_dag_should_response_403(self, unauthorized_test_client):
response = unauthorized_test_client.get(f"/dags/{DAG1_ID}")
assert response.status_code == 403
| TestGetDag |
python | sympy__sympy | sympy/printing/repr.py | {
"start": 468,
"end": 11376
} | class ____(Printer):
printmethod = "_sympyrepr"
_default_settings: dict[str, Any] = {
"order": None,
"perm_cyclic" : True,
}
def reprify(self, args, sep):
"""
Prints each item in `args` and joins them with `sep`.
"""
return sep.join([self.doprint(item) for item in args])
def emptyPrinter(self, expr):
"""
The fallback printer.
"""
if isinstance(expr, str):
return expr
elif hasattr(expr, "__srepr__"):
return expr.__srepr__()
elif hasattr(expr, "args") and hasattr(expr.args, "__iter__"):
l = []
for o in expr.args:
l.append(self._print(o))
return expr.__class__.__name__ + '(%s)' % ', '.join(l)
elif hasattr(expr, "__module__") and hasattr(expr, "__name__"):
return "<'%s.%s'>" % (expr.__module__, expr.__name__)
else:
return str(expr)
def _print_Add(self, expr, order=None):
args = Add.make_args(expr)
args = map(self._print, args)
clsname = type(expr).__name__
return clsname + "(%s)" % ", ".join(args)
def _print_Cycle(self, expr):
return expr.__repr__()
def _print_Permutation(self, expr):
from sympy.combinatorics.permutations import Permutation, Cycle
from sympy.utilities.exceptions import sympy_deprecation_warning
perm_cyclic = Permutation.print_cyclic
if perm_cyclic is not None:
sympy_deprecation_warning(
f"""
Setting Permutation.print_cyclic is deprecated. Instead use
init_printing(perm_cyclic={perm_cyclic}).
""",
deprecated_since_version="1.6",
active_deprecations_target="deprecated-permutation-print_cyclic",
stacklevel=7,
)
else:
perm_cyclic = self._settings.get("perm_cyclic", True)
if perm_cyclic:
if not expr.size:
return 'Permutation()'
# before taking Cycle notation, see if the last element is
# a singleton and move it to the head of the string
s = Cycle(expr)(expr.size - 1).__repr__()[len('Cycle'):]
last = s.rfind('(')
if not last == 0 and ',' not in s[last:]:
s = s[last:] + s[:last]
return 'Permutation%s' %s
else:
s = expr.support()
if not s:
if expr.size < 5:
return 'Permutation(%s)' % str(expr.array_form)
return 'Permutation([], size=%s)' % expr.size
trim = str(expr.array_form[:s[-1] + 1]) + ', size=%s' % expr.size
use = full = str(expr.array_form)
if len(trim) < len(full):
use = trim
return 'Permutation(%s)' % use
def _print_Function(self, expr):
r = self._print(expr.func)
r += '(%s)' % ', '.join([self._print(a) for a in expr.args])
return r
def _print_Heaviside(self, expr):
# Same as _print_Function but uses pargs to suppress default value for
# 2nd arg.
r = self._print(expr.func)
r += '(%s)' % ', '.join([self._print(a) for a in expr.pargs])
return r
def _print_FunctionClass(self, expr):
if issubclass(expr, AppliedUndef):
return 'Function(%r)' % (expr.__name__)
else:
return expr.__name__
def _print_Half(self, expr):
return 'Rational(1, 2)'
def _print_RationalConstant(self, expr):
return str(expr)
def _print_AtomicExpr(self, expr):
return str(expr)
def _print_NumberSymbol(self, expr):
return str(expr)
def _print_Integer(self, expr):
return 'Integer(%i)' % expr.p
def _print_Complexes(self, expr):
return 'Complexes'
def _print_Integers(self, expr):
return 'Integers'
def _print_Naturals(self, expr):
return 'Naturals'
def _print_Naturals0(self, expr):
return 'Naturals0'
def _print_Rationals(self, expr):
return 'Rationals'
def _print_Reals(self, expr):
return 'Reals'
def _print_EmptySet(self, expr):
return 'EmptySet'
def _print_UniversalSet(self, expr):
return 'UniversalSet'
def _print_EmptySequence(self, expr):
return 'EmptySequence'
def _print_list(self, expr):
return "[%s]" % self.reprify(expr, ", ")
def _print_dict(self, expr):
sep = ", "
dict_kvs = ["%s: %s" % (self.doprint(key), self.doprint(value)) for key, value in expr.items()]
return "{%s}" % sep.join(dict_kvs)
def _print_set(self, expr):
if not expr:
return "set()"
return "{%s}" % self.reprify(expr, ", ")
def _print_MatrixBase(self, expr):
# special case for some empty matrices
if (expr.rows == 0) ^ (expr.cols == 0):
return '%s(%s, %s, %s)' % (expr.__class__.__name__,
self._print(expr.rows),
self._print(expr.cols),
self._print([]))
l = []
for i in range(expr.rows):
l.append([])
for j in range(expr.cols):
l[-1].append(expr[i, j])
return '%s(%s)' % (expr.__class__.__name__, self._print(l))
def _print_BooleanTrue(self, expr):
return "true"
def _print_BooleanFalse(self, expr):
return "false"
def _print_NaN(self, expr):
return "nan"
def _print_Mul(self, expr, order=None):
args = Mul.make_args(expr)
args = map(self._print, args)
clsname = type(expr).__name__
return clsname + "(%s)" % ", ".join(args)
def _print_Rational(self, expr):
return 'Rational(%s, %s)' % (self._print(expr.p), self._print(expr.q))
def _print_PythonRational(self, expr):
return "%s(%d, %d)" % (expr.__class__.__name__, expr.p, expr.q)
def _print_Fraction(self, expr):
return 'Fraction(%s, %s)' % (self._print(expr.numerator), self._print(expr.denominator))
def _print_Float(self, expr):
r = mlib_to_str(expr._mpf_, repr_dps(expr._prec))
return "%s('%s', precision=%i)" % (expr.__class__.__name__, r, expr._prec)
def _print_Sum2(self, expr):
return "Sum2(%s, (%s, %s, %s))" % (self._print(expr.f), self._print(expr.i),
self._print(expr.a), self._print(expr.b))
def _print_Str(self, s):
return "%s(%s)" % (s.__class__.__name__, self._print(s.name))
def _print_Symbol(self, expr):
d = expr._assumptions_orig
# print the dummy_index like it was an assumption
if expr.is_Dummy:
d = d.copy()
d['dummy_index'] = expr.dummy_index
if d == {}:
return "%s(%s)" % (expr.__class__.__name__, self._print(expr.name))
else:
attr = ['%s=%s' % (k, v) for k, v in d.items()]
return "%s(%s, %s)" % (expr.__class__.__name__,
self._print(expr.name), ', '.join(attr))
def _print_CoordinateSymbol(self, expr):
d = expr._assumptions.generator
if d == {}:
return "%s(%s, %s)" % (
expr.__class__.__name__,
self._print(expr.coord_sys),
self._print(expr.index)
)
else:
attr = ['%s=%s' % (k, v) for k, v in d.items()]
return "%s(%s, %s, %s)" % (
expr.__class__.__name__,
self._print(expr.coord_sys),
self._print(expr.index),
', '.join(attr)
)
def _print_Predicate(self, expr):
return "Q.%s" % expr.name
def _print_AppliedPredicate(self, expr):
# will be changed to just expr.args when args overriding is removed
args = expr._args
return "%s(%s)" % (expr.__class__.__name__, self.reprify(args, ", "))
def _print_str(self, expr):
return repr(expr)
def _print_tuple(self, expr):
if len(expr) == 1:
return "(%s,)" % self._print(expr[0])
else:
return "(%s)" % self.reprify(expr, ", ")
def _print_WildFunction(self, expr):
return "%s('%s')" % (expr.__class__.__name__, expr.name)
def _print_AlgebraicNumber(self, expr):
return "%s(%s, %s)" % (expr.__class__.__name__,
self._print(expr.root), self._print(expr.coeffs()))
def _print_PolyRing(self, ring):
return "%s(%s, %s, %s)" % (ring.__class__.__name__,
self._print(ring.symbols), self._print(ring.domain), self._print(ring.order))
def _print_FracField(self, field):
return "%s(%s, %s, %s)" % (field.__class__.__name__,
self._print(field.symbols), self._print(field.domain), self._print(field.order))
def _print_PolyElement(self, poly):
terms = list(poly.terms())
terms.sort(key=poly.ring.order, reverse=True)
return "%s(%s, %s)" % (poly.__class__.__name__, self._print(poly.ring), self._print(terms))
def _print_FracElement(self, frac):
numer_terms = list(frac.numer.terms())
numer_terms.sort(key=frac.field.order, reverse=True)
denom_terms = list(frac.denom.terms())
denom_terms.sort(key=frac.field.order, reverse=True)
numer = self._print(numer_terms)
denom = self._print(denom_terms)
return "%s(%s, %s, %s)" % (frac.__class__.__name__, self._print(frac.field), numer, denom)
def _print_FractionField(self, domain):
cls = domain.__class__.__name__
field = self._print(domain.field)
return "%s(%s)" % (cls, field)
def _print_PolynomialRingBase(self, ring):
cls = ring.__class__.__name__
dom = self._print(ring.domain)
gens = ', '.join(map(self._print, ring.gens))
order = str(ring.order)
if order != ring.default_order:
orderstr = ", order=" + order
else:
orderstr = ""
return "%s(%s, %s%s)" % (cls, dom, gens, orderstr)
def _print_DMP(self, p):
cls = p.__class__.__name__
rep = self._print(p.to_list())
dom = self._print(p.dom)
return "%s(%s, %s)" % (cls, rep, dom)
def _print_MonogenicFiniteExtension(self, ext):
# The expanded tree shown by srepr(ext.modulus)
# is not practical.
return "FiniteExtension(%s)" % str(ext.modulus)
def _print_ExtensionElement(self, f):
rep = self._print(f.rep)
ext = self._print(f.ext)
return "ExtElem(%s, %s)" % (rep, ext)
@print_function(ReprPrinter)
def srepr(expr, **settings):
"""return expr in repr form"""
return ReprPrinter(settings).doprint(expr)
| ReprPrinter |
python | tensorflow__tensorflow | tensorflow/python/data/kernel_tests/as_numpy_iterator_test.py | {
"start": 1368,
"end": 5598
} | class ____(test_base.DatasetTestBase, parameterized.TestCase):
@combinations.generate(test_base.eager_only_combinations())
def testBasic(self):
ds = dataset_ops.Dataset.range(3)
self.assertEqual([0, 1, 2], list(ds.as_numpy_iterator()))
@combinations.generate(test_base.eager_only_combinations())
def testImmutable(self):
ds = dataset_ops.Dataset.from_tensors([1, 2, 3])
arr = next(ds.as_numpy_iterator())
with self.assertRaisesRegex(ValueError,
'assignment destination is read-only'):
arr[0] = 0
@combinations.generate(test_base.eager_only_combinations())
def testNestedStructure(self):
point = collections.namedtuple('Point', ['x', 'y'])
ds = dataset_ops.Dataset.from_tensor_slices({
'a': ([1, 2], [3, 4]),
'b': [5, 6],
'c': point([7, 8], [9, 10])
})
self.assertEqual([{
'a': (1, 3),
'b': 5,
'c': point(7, 9)
}, {
'a': (2, 4),
'b': 6,
'c': point(8, 10)
}], list(ds.as_numpy_iterator()))
@combinations.generate(test_base.graph_only_combinations())
def testNonEager(self):
ds = dataset_ops.Dataset.range(10)
with self.assertRaises(RuntimeError):
ds.as_numpy_iterator()
def _testInvalidElement(self, element):
ds = dataset_ops.Dataset.from_tensors(element)
with self.assertRaisesRegex(TypeError,
'is not supported for datasets that'):
ds.as_numpy_iterator()
@combinations.generate(test_base.eager_only_combinations())
def testSparseElement(self):
st = sparse_tensor.SparseTensor(
indices=[(0, 0), (1, 1), (2, 2)], values=[1, 2, 3], dense_shape=(3, 3))
ds = dataset_ops.Dataset.from_tensor_slices(st)
dt = sparse_ops.sparse_tensor_to_dense(st)
self.assertAllEqual(list(ds.as_numpy_iterator()), dt.numpy())
@combinations.generate(test_base.eager_only_combinations())
def testRaggedElement(self):
lst = [[1, 2], [3], [4, 5, 6]]
rt = ragged_factory_ops.constant([lst])
# This dataset consists of exactly one ragged tensor.
ds = dataset_ops.Dataset.from_tensor_slices(rt)
expected = np.array([
np.array([1, 2], dtype=np.int32),
np.array([3], dtype=np.int32),
np.array([4, 5, 6], dtype=np.int32)
], dtype=object)
for actual in ds.as_numpy_iterator():
self.assertEqual(len(actual), len(expected))
for actual_arr, expected_arr in zip(actual, expected):
self.assertTrue(np.array_equal(actual_arr, expected_arr),
f'{actual_arr} != {expected_arr}')
@combinations.generate(test_base.eager_only_combinations())
def testDatasetElement(self):
self._testInvalidElement(dataset_ops.Dataset.range(3))
@combinations.generate(test_base.eager_only_combinations())
def testNestedNonTensorElement(self):
tuple_elem = (constant_op.constant([1, 2, 3]), dataset_ops.Dataset.range(3))
self._testInvalidElement(tuple_elem)
@combinations.generate(test_base.eager_only_combinations())
def testNoneElement(self):
ds = dataset_ops.Dataset.from_tensors((2, None))
self.assertAllEqual(list(ds.as_numpy_iterator()), [(2, None)])
@combinations.generate(combinations.times(
test_base.eager_only_combinations(),
combinations.combine(enable_async_ckpt=[True, False])
))
def testCompatibleWithCheckpoint(self, enable_async_ckpt):
ds = dataset_ops.Dataset.range(10)
iterator = ds.as_numpy_iterator()
ckpt = trackable_utils.Checkpoint(iterator=iterator)
ckpt_options = checkpoint_options.CheckpointOptions(
experimental_enable_async_checkpoint=enable_async_ckpt)
for _ in range(5):
next(iterator)
prefix = os.path.join(self.get_temp_dir(), 'ckpt')
save_path = ckpt.save(prefix, options=ckpt_options)
self.assertEqual(5, next(iterator))
self.assertEqual(6, next(iterator))
restore_iter = ds.as_numpy_iterator()
restore_ckpt = trackable_utils.Checkpoint(iterator=restore_iter)
if enable_async_ckpt:
ckpt.sync() # Otherwise save may not finish yet
restore_ckpt.restore(save_path)
self.assertEqual(5, next(restore_iter))
if __name__ == '__main__':
test.main()
| AsNumpyIteratorTest |
python | realpython__materials | inheritance-and-composition/inheritance/employees.py | {
"start": 1056,
"end": 1276
} | class ____(Employee, SecretaryRole, HourlyPolicy):
def __init__(self, id, name, hours_worked, hour_rate):
HourlyPolicy.__init__(self, hours_worked, hour_rate)
super().__init__(id, name)
| TemporarySecretary |
python | ansible__ansible | test/lib/ansible_test/_util/controller/sanity/validate-modules/validate_modules/module_args.py | {
"start": 1402,
"end": 1458
} | class ____(ImportError):
pass
| AnsibleModuleImportError |
python | getsentry__sentry | src/sentry/integrations/slack/unfurl/types.py | {
"start": 616,
"end": 692
} | class ____(NamedTuple):
url: str
args: Mapping[str, Any]
| UnfurlableUrl |
python | run-llama__llama_index | llama-index-core/llama_index/core/llama_dataset/base.py | {
"start": 1686,
"end": 3241
} | class ____(BaseModel):
_prediction_type: ClassVar[Type[BaseLlamaExamplePrediction]]
predictions: List[BaseLlamaExamplePrediction] = Field(
default_factory=list, description="Predictions on train_examples."
)
def __getitem__(
self, val: Union[slice, int]
) -> Union[Sequence[BaseLlamaExamplePrediction], BaseLlamaExamplePrediction]:
"""
Enable slicing and indexing.
Returns the desired slice on `predictions`.
"""
return self.predictions[val]
@abstractmethod
def to_pandas(self) -> Any:
"""Create pandas dataframe."""
def save_json(self, path: str) -> None:
"""Save json."""
with open(path, "w") as f:
predictions = None
if self.predictions:
predictions = [
self._prediction_type.model_dump(el) for el in self.predictions
]
data = {
"predictions": predictions,
}
json.dump(data, f, indent=4)
@classmethod
def from_json(cls, path: str) -> "BaseLlamaPredictionDataset":
"""Load json."""
with open(path) as f:
data = json.load(f)
predictions = [
cls._prediction_type.model_validate(el) for el in data["predictions"]
]
return cls(
predictions=predictions,
)
@property
@abstractmethod
def class_name(self) -> str:
"""Class name."""
return "BaseLlamaPredictionDataset"
| BaseLlamaPredictionDataset |
python | pypa__warehouse | tests/unit/legacy/api/test_json.py | {
"start": 17669,
"end": 29067
} | class ____:
def test_normalizing_redirects(self, db_request):
release = ReleaseFactory.create(version="3.0")
db_request.matchdict = {
"name": release.project.name.swapcase(),
"version": "3.0",
}
db_request.current_route_path = pretend.call_recorder(
lambda name: "/project/the-redirect/3.0/"
)
resp = json.json_release(release, db_request)
assert isinstance(resp, HTTPMovedPermanently)
assert resp.headers["Location"] == "/project/the-redirect/3.0/"
_assert_has_cors_headers(resp.headers)
assert db_request.current_route_path.calls == [
pretend.call(name=release.project.normalized_name)
]
def test_detail_renders(self, pyramid_config, db_request, db_session):
project = ProjectFactory.create(has_docs=True)
description_content_type = "text/x-rst"
url = "/the/fake/url/"
project_urls = [
"url," + url,
"Homepage,https://example.com/home2/",
"Source Code,https://example.com/source-code/",
"uri,http://john.doe@www.example.com:123/forum/questions/?tag=networking&order=newest#top", # noqa: E501
"ldap,ldap://[2001:db8::7]/c=GB?objectClass?one",
"tel,tel:+1-816-555-1212",
"telnet,telnet://192.0.2.16:80/",
"urn,urn:oasis:names:specification:docbook:dtd:xml:4.1.2",
"reservedchars,http://example.com?&$+/:;=@#", # Commas don't work!
r"unsafechars,http://example.com <>[]{}|\^%",
]
expected_urls = []
for project_url in sorted(
project_urls, key=lambda u: u.split(",", 1)[0].strip().lower()
):
expected_urls.append(tuple(project_url.split(",", 1)))
expected_urls = dict(tuple(expected_urls))
releases = [
ReleaseFactory.create(project=project, version=v)
for v in ["0.1", "1.0", "2.0"]
]
releases += [
ReleaseFactory.create(
project=project,
version="3.0",
description=DescriptionFactory.create(
content_type=description_content_type
),
dynamic=["Platform", "Supported-Platform"],
provides_extra=["testing", "plugin"],
)
]
for urlspec in project_urls:
label, _, purl = urlspec.partition(",")
db_session.add(
ReleaseURL(
release=releases[-1],
name=label.strip(),
url=purl.strip(),
)
)
files = [
FileFactory.create(
release=r,
filename=f"{project.name}-{r.version}.tar.gz",
python_version="source",
size=200,
)
for r in releases[1:]
]
user = UserFactory.create()
JournalEntryFactory.reset_sequence()
je = JournalEntryFactory.create(name=project.name, submitted_by=user)
db_request.route_url = pretend.call_recorder(lambda *args, **kw: url)
db_request.matchdict = {
"name": project.normalized_name,
"version": "3.0",
}
result = json.json_release(releases[-1], db_request)
assert set(db_request.route_url.calls) == {
pretend.call("packaging.file", path=files[-1].path),
pretend.call("packaging.project", name=project.name),
pretend.call(
"packaging.release", name=project.name, version=releases[-1].version
),
pretend.call("legacy.docs", project=project.name),
}
_assert_has_cors_headers(db_request.response.headers)
assert db_request.response.headers["X-PyPI-Last-Serial"] == str(je.id)
assert result == {
"info": {
"author": None,
"author_email": None,
"bugtrack_url": None,
"classifiers": [],
"description_content_type": description_content_type,
"description": releases[-1].description.raw,
"docs_url": "/the/fake/url/",
"download_url": None,
"downloads": {"last_day": -1, "last_week": -1, "last_month": -1},
"dynamic": ["Platform", "Supported-Platform"],
"home_page": None,
"keywords": None,
"license": None,
"license_expression": None,
"license_files": None,
"maintainer": None,
"maintainer_email": None,
"name": project.name,
"package_url": "/the/fake/url/",
"platform": None,
"project_url": "/the/fake/url/",
"project_urls": expected_urls,
"provides_extra": ["testing", "plugin"],
"release_url": "/the/fake/url/",
"requires_dist": None,
"requires_python": None,
"summary": None,
"yanked": False,
"yanked_reason": None,
"version": "3.0",
},
"urls": [
{
"comment_text": None,
"downloads": -1,
"filename": files[-1].filename,
"has_sig": False,
"md5_digest": files[-1].md5_digest,
"digests": {
"md5": files[-1].md5_digest,
"sha256": files[-1].sha256_digest,
"blake2b_256": files[-1].blake2_256_digest,
},
"packagetype": files[-1].packagetype,
"python_version": "source",
"size": 200,
"upload_time": files[-1].upload_time.strftime("%Y-%m-%dT%H:%M:%S"),
"upload_time_iso_8601": files[-1].upload_time.isoformat() + "Z",
"url": "/the/fake/url/",
"requires_python": None,
"yanked": False,
"yanked_reason": None,
}
],
"last_serial": je.id,
"vulnerabilities": [],
}
def test_minimal_renders(self, pyramid_config, db_request):
project = ProjectFactory.create(has_docs=False)
release = ReleaseFactory.create(project=project, version="0.1")
file = FileFactory.create(
release=release,
filename=f"{project.name}-{release.version}.tar.gz",
python_version="source",
size=200,
)
user = UserFactory.create()
JournalEntryFactory.reset_sequence()
je = JournalEntryFactory.create(name=project.name, submitted_by=user)
url = "/the/fake/url/"
db_request.route_url = pretend.call_recorder(lambda *args, **kw: url)
db_request.matchdict = {
"name": project.normalized_name,
"version": release.canonical_version,
}
result = json.json_release(release, db_request)
assert set(db_request.route_url.calls) == {
pretend.call("packaging.file", path=file.path),
pretend.call("packaging.project", name=project.name),
pretend.call(
"packaging.release", name=project.name, version=release.version
),
}
_assert_has_cors_headers(db_request.response.headers)
assert db_request.response.headers["X-PyPI-Last-Serial"] == str(je.id)
assert result == {
"info": {
"author": None,
"author_email": None,
"bugtrack_url": None,
"classifiers": [],
"description_content_type": release.description.content_type,
"description": release.description.raw,
"docs_url": None,
"download_url": None,
"downloads": {"last_day": -1, "last_week": -1, "last_month": -1},
"dynamic": None,
"home_page": None,
"keywords": None,
"license": None,
"license_expression": None,
"license_files": None,
"maintainer": None,
"maintainer_email": None,
"name": project.name,
"package_url": "/the/fake/url/",
"platform": None,
"project_url": "/the/fake/url/",
"project_urls": None,
"provides_extra": None,
"release_url": "/the/fake/url/",
"requires_dist": None,
"requires_python": None,
"summary": None,
"yanked": False,
"yanked_reason": None,
"version": "0.1",
},
"urls": [
{
"comment_text": None,
"downloads": -1,
"filename": file.filename,
"has_sig": False,
"md5_digest": file.md5_digest,
"digests": {
"md5": file.md5_digest,
"sha256": file.sha256_digest,
"blake2b_256": file.blake2_256_digest,
},
"packagetype": file.packagetype,
"python_version": "source",
"size": 200,
"upload_time": file.upload_time.strftime("%Y-%m-%dT%H:%M:%S"),
"upload_time_iso_8601": file.upload_time.isoformat() + "Z",
"url": "/the/fake/url/",
"requires_python": None,
"yanked": False,
"yanked_reason": None,
}
],
"last_serial": je.id,
"vulnerabilities": [],
}
@pytest.mark.parametrize("withdrawn", [None, "2022-06-28T16:39:06Z"])
def test_vulnerabilities_renders(self, pyramid_config, db_request, withdrawn):
project = ProjectFactory.create(has_docs=False)
release = ReleaseFactory.create(project=project, version="0.1")
VulnerabilityRecordFactory.create(
id="PYSEC-001",
source="the source",
link="the link",
aliases=["alias1", "alias2"],
details="some details",
summary="some summary",
fixed_in=["3.3.2"],
releases=[release],
withdrawn=withdrawn,
)
url = "/the/fake/url/"
db_request.route_url = pretend.call_recorder(lambda *args, **kw: url)
db_request.matchdict = {
"name": project.normalized_name,
"version": release.canonical_version,
}
result = json.json_release(release, db_request)
assert result["vulnerabilities"] == [
{
"id": "PYSEC-001",
"source": "the source",
"link": "the link",
"aliases": ["alias1", "alias2"],
"details": "some details",
"summary": "some summary",
"fixed_in": ["3.3.2"],
"withdrawn": withdrawn,
},
]
| TestJSONRelease |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/model_query_type_annotation.py | {
"start": 1103,
"end": 1218
} | class ____:
taint_1: int = 0
taint_2: int = 0
no_taint_1: List[int] = []
no_taint_2: str = ""
| Test7_C |
python | dateutil__dateutil | src/dateutil/tz/tz.py | {
"start": 3289,
"end": 5055
} | class ____(datetime.tzinfo):
"""
A simple class for representing a fixed offset from UTC.
:param name:
The timezone name, to be returned when ``tzname()`` is called.
:param offset:
The time zone offset in seconds, or (since version 2.6.0, represented
as a :py:class:`datetime.timedelta` object).
"""
def __init__(self, name, offset):
self._name = name
try:
# Allow a timedelta
offset = offset.total_seconds()
except (TypeError, AttributeError):
pass
self._offset = datetime.timedelta(seconds=_get_supported_offset(offset))
def utcoffset(self, dt):
return self._offset
def dst(self, dt):
return ZERO
@tzname_in_python2
def tzname(self, dt):
return self._name
@_validate_fromutc_inputs
def fromutc(self, dt):
return dt + self._offset
def is_ambiguous(self, dt):
"""
Whether or not the "wall time" of a given datetime is ambiguous in this
zone.
:param dt:
A :py:class:`datetime.datetime`, naive or time zone aware.
:return:
Returns ``True`` if ambiguous, ``False`` otherwise.
.. versionadded:: 2.6.0
"""
return False
def __eq__(self, other):
if not isinstance(other, tzoffset):
return NotImplemented
return self._offset == other._offset
__hash__ = None
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return "%s(%s, %s)" % (self.__class__.__name__,
repr(self._name),
int(self._offset.total_seconds()))
__reduce__ = object.__reduce__
| tzoffset |
python | getsentry__sentry | tests/sentry/notifications/notification_action/test_metric_alert_registry_handlers.py | {
"start": 1794,
"end": 2236
} | class ____(BaseMetricAlertHandler):
@classmethod
def send_alert(
cls,
notification_context: NotificationContext,
alert_context: AlertContext,
metric_issue_context: MetricIssueContext,
open_period_context: OpenPeriodContext,
trigger_status: TriggerStatus,
notification_uuid: str,
organization: Organization,
project: Project,
) -> None:
pass
| TestHandler |
python | astropy__astropy | astropy/timeseries/core.py | {
"start": 1254,
"end": 3526
} | class ____(QTable):
_required_columns = None
_required_columns_enabled = True
# If _required_column_relax is True, we don't require the columns to be
# present but we do require them to be the correct ones IF present. Note
# that this is a temporary state - as soon as the required columns
# are all present, we toggle this to False
_required_columns_relax = False
def _check_required_columns(self):
def as_scalar_or_list_str(obj):
if not hasattr(obj, "__len__"):
return f"'{obj}'"
elif len(obj) == 1:
return f"'{obj[0]}'"
else:
return str(obj)
if not self._required_columns_enabled:
return
if self._required_columns is not None:
if self._required_columns_relax:
required_columns = self._required_columns[: len(self.colnames)]
else:
required_columns = self._required_columns
plural = "s" if len(required_columns) > 1 else ""
if not self._required_columns_relax and len(self.colnames) == 0:
raise ValueError(
f"{self.__class__.__name__} object is invalid - expected"
f" '{required_columns[0]}' as the first column{plural} but time"
" series has no columns"
)
elif self.colnames[: len(required_columns)] != required_columns:
raise ValueError(
f"{self.__class__.__name__} object is invalid - expected"
f" {as_scalar_or_list_str(required_columns)} as the first"
f" column{plural} but found"
f" {as_scalar_or_list_str(self.colnames[: len(required_columns)])}"
)
if (
self._required_columns_relax
and self._required_columns
== self.colnames[: len(self._required_columns)]
):
self._required_columns_relax = False
@contextmanager
def _delay_required_column_checks(self):
self._required_columns_enabled = False
yield
self._required_columns_enabled = True
self._check_required_columns()
| BaseTimeSeries |
python | neetcode-gh__leetcode | python/0231-power-of-two.py | {
"start": 176,
"end": 300
} | class ____:
def isPowerOfTwo(self, n: int) -> bool:
return n > 0 and (n & (n - 1)) == 0
# Bit manipulation
| Solution |
python | openai__openai-python | src/openai/types/beta/chatkit/thread_delete_response.py | {
"start": 198,
"end": 483
} | class ____(BaseModel):
id: str
"""Identifier of the deleted thread."""
deleted: bool
"""Indicates that the thread has been deleted."""
object: Literal["chatkit.thread.deleted"]
"""Type discriminator that is always `chatkit.thread.deleted`."""
| ThreadDeleteResponse |
python | django__django | tests/postgres_tests/test_search.py | {
"start": 18087,
"end": 23098
} | class ____(GrailTestData, PostgreSQLTestCase):
def test_ranking(self):
searched = (
Line.objects.filter(character=self.minstrel)
.annotate(
rank=SearchRank(
SearchVector("dialogue"), SearchQuery("brave sir robin")
),
)
.order_by("rank")
)
self.assertSequenceEqual(searched, [self.verse2, self.verse1, self.verse0])
def test_rank_passing_untyped_args(self):
searched = (
Line.objects.filter(character=self.minstrel)
.annotate(
rank=SearchRank("dialogue", "brave sir robin"),
)
.order_by("rank")
)
self.assertSequenceEqual(searched, [self.verse2, self.verse1, self.verse0])
def test_weights_in_vector(self):
vector = SearchVector("dialogue", weight="A") + SearchVector(
"character__name", weight="D"
)
searched = (
Line.objects.filter(scene=self.witch_scene)
.annotate(
rank=SearchRank(vector, SearchQuery("witch")),
)
.order_by("-rank")[:2]
)
self.assertSequenceEqual(searched, [self.crowd, self.witch])
vector = SearchVector("dialogue", weight="D") + SearchVector(
"character__name", weight="A"
)
searched = (
Line.objects.filter(scene=self.witch_scene)
.annotate(
rank=SearchRank(vector, SearchQuery("witch")),
)
.order_by("-rank")[:2]
)
self.assertSequenceEqual(searched, [self.witch, self.crowd])
def test_ranked_custom_weights(self):
vector = SearchVector("dialogue", weight="D") + SearchVector(
"character__name", weight="A"
)
weights = [1.0, 0.0, 0.0, 0.5]
searched = (
Line.objects.filter(scene=self.witch_scene)
.annotate(
rank=SearchRank(vector, SearchQuery("witch"), weights=weights),
)
.order_by("-rank")[:2]
)
self.assertSequenceEqual(searched, [self.crowd, self.witch])
def test_ranking_chaining(self):
searched = (
Line.objects.filter(character=self.minstrel)
.annotate(
rank=SearchRank(
SearchVector("dialogue"), SearchQuery("brave sir robin")
),
)
.filter(rank__gt=0.3)
)
self.assertSequenceEqual(searched, [self.verse0])
def test_cover_density_ranking(self):
not_dense_verse = Line.objects.create(
scene=self.robin,
character=self.minstrel,
dialogue=(
"Bravely taking to his feet, he beat a very brave retreat. "
"A brave retreat brave Sir Robin."
),
)
searched = (
Line.objects.filter(character=self.minstrel)
.annotate(
rank=SearchRank(
SearchVector("dialogue"),
SearchQuery("brave robin"),
cover_density=True,
),
)
.order_by("rank", "-pk")
)
self.assertSequenceEqual(
searched,
[self.verse2, not_dense_verse, self.verse1, self.verse0],
)
def test_ranking_with_normalization(self):
short_verse = Line.objects.create(
scene=self.robin,
character=self.minstrel,
dialogue="A brave retreat brave Sir Robin.",
)
searched = (
Line.objects.filter(character=self.minstrel)
.annotate(
rank=SearchRank(
SearchVector("dialogue"),
SearchQuery("brave sir robin"),
# Divide the rank by the document length.
normalization=2,
),
)
.order_by("rank")
)
self.assertSequenceEqual(
searched,
[self.verse2, self.verse1, self.verse0, short_verse],
)
def test_ranking_with_masked_normalization(self):
short_verse = Line.objects.create(
scene=self.robin,
character=self.minstrel,
dialogue="A brave retreat brave Sir Robin.",
)
searched = (
Line.objects.filter(character=self.minstrel)
.annotate(
rank=SearchRank(
SearchVector("dialogue"),
SearchQuery("brave sir robin"),
# Divide the rank by the document length and by the number
# of unique words in document.
normalization=Value(2).bitor(Value(8)),
),
)
.order_by("rank")
)
self.assertSequenceEqual(
searched,
[self.verse2, self.verse1, self.verse0, short_verse],
)
| TestRankingAndWeights |
python | lazyprogrammer__machine_learning_examples | svm_class/svm_gradient.py | {
"start": 1346,
"end": 4775
} | class ____:
def __init__(self, kernel, C=1.0):
self.kernel = kernel
self.C = C
def _train_objective(self):
return np.sum(self.alphas) - 0.5 * np.sum(self.YYK * np.outer(self.alphas, self.alphas))
def fit(self, X, Y, lr=1e-5, n_iters=400):
# we need these to make future predictions
self.Xtrain = X
self.Ytrain = Y
self.N = X.shape[0]
self.alphas = np.random.random(self.N)
self.b = 0
# kernel matrix
self.K = self.kernel(X, X)
self.YY = np.outer(Y, Y)
self.YYK = self.K * self.YY
# gradient ascent
losses = []
for _ in range(n_iters):
loss = self._train_objective()
losses.append(loss)
grad = np.ones(self.N) - self.YYK.dot(self.alphas)
self.alphas += lr * grad
# clip
self.alphas[self.alphas < 0] = 0
self.alphas[self.alphas > self.C] = self.C
# distrbution of bs
idx = np.where((self.alphas) > 0 & (self.alphas < self.C))[0]
bs = Y[idx] - (self.alphas * Y).dot(self.kernel(X, X[idx]))
self.b = np.mean(bs)
plt.plot(losses)
plt.title("loss per iteration")
plt.show()
def _decision_function(self, X):
return (self.alphas * self.Ytrain).dot(self.kernel(self.Xtrain, X)) + self.b
def predict(self, X):
return np.sign(self._decision_function(X))
def score(self, X, Y):
P = self.predict(X)
return np.mean(Y == P)
def medical():
data = load_breast_cancer()
X, Y = data.data, data.target
Xtrain, Xtest, Ytrain, Ytest = train_test_split(X, Y, test_size=0.33)
return Xtrain, Xtest, Ytrain, Ytest, rbf, 1e-3, 200
def medical_sigmoid():
data = load_breast_cancer()
X, Y = data.data, data.target
Xtrain, Xtest, Ytrain, Ytest = train_test_split(X, Y, test_size=0.33)
return Xtrain, Xtest, Ytrain, Ytest, sigmoid, 1e-3, 200
def xor():
X, Y = get_xor()
Xtrain, Xtest, Ytrain, Ytest = train_test_split(X, Y, test_size=0.33)
kernel = lambda X1, X2: rbf(X1, X2, gamma=5.)
return Xtrain, Xtest, Ytrain, Ytest, kernel, 1e-2, 300
def donut():
X, Y = get_donut()
Xtrain, Xtest, Ytrain, Ytest = train_test_split(X, Y, test_size=0.33)
kernel = lambda X1, X2: rbf(X1, X2, gamma=5.)
return Xtrain, Xtest, Ytrain, Ytest, kernel, 1e-2, 300
def spiral():
X, Y = get_spiral()
Xtrain, Xtest, Ytrain, Ytest = train_test_split(X, Y, test_size=0.33)
kernel = lambda X1, X2: rbf(X1, X2, gamma=5.)
return Xtrain, Xtest, Ytrain, Ytest, kernel, 1e-2, 300
def clouds():
X, Y = get_clouds()
Xtrain, Xtest, Ytrain, Ytest = train_test_split(X, Y, test_size=0.33)
return Xtrain, Xtest, Ytrain, Ytest, linear, 1e-5, 400
if __name__ == '__main__':
Xtrain, Xtest, Ytrain, Ytest, kernel, lr, n_iters = spiral()
print("Possible labels:", set(Ytrain))
# make sure the targets are (-1, +1)
Ytrain[Ytrain == 0] = -1
Ytest[Ytest == 0] = -1
# scale the data
scaler = StandardScaler()
Xtrain = scaler.fit_transform(Xtrain)
Xtest = scaler.transform(Xtest)
# now we'll use our custom implementation
model = SVM(kernel=kernel, C=1.0)
t0 = datetime.now()
model.fit(Xtrain, Ytrain, lr=lr, n_iters=n_iters)
print("train duration:", datetime.now() - t0)
t0 = datetime.now()
print("train score:", model.score(Xtrain, Ytrain), "duration:", datetime.now() - t0)
t0 = datetime.now()
print("test score:", model.score(Xtest, Ytest), "duration:", datetime.now() - t0)
if Xtrain.shape[1] == 2:
plot_decision_boundary(model)
| SVM |
python | apache__airflow | providers/apprise/tests/unit/apprise/notifications/test_apprise.py | {
"start": 1065,
"end": 4906
} | class ____:
@pytest.fixture(autouse=True)
def setup_connections(self, create_connection_without_db):
extra = {"config": {"path": "http://some_path_that_dont_exist/", "tag": "alert"}}
create_connection_without_db(
Connection(
conn_id="apprise_default",
conn_type="apprise",
extra=extra,
)
)
@mock.patch("airflow.providers.apprise.notifications.apprise.AppriseHook")
def test_notifier(self, mock_apprise_hook, create_dag_without_db):
notifier = send_apprise_notification(body="DISK at 99%", notify_type=NotifyType.FAILURE)
notifier({"dag": create_dag_without_db("test_notifier")})
call_args = mock_apprise_hook.return_value.notify.call_args.kwargs
assert call_args == {
"body": "DISK at 99%",
"notify_type": NotifyType.FAILURE,
"title": None,
"body_format": NotifyFormat.TEXT,
"tag": "all",
"attach": None,
"interpret_escapes": None,
"config": None,
}
mock_apprise_hook.return_value.notify.assert_called_once()
@mock.patch("airflow.providers.apprise.notifications.apprise.AppriseHook")
def test_notifier_with_notifier_class(self, mock_apprise_hook, create_dag_without_db):
notifier = AppriseNotifier(body="DISK at 99%", notify_type=NotifyType.FAILURE)
notifier({"dag": create_dag_without_db("test_notifier")})
call_args = mock_apprise_hook.return_value.notify.call_args.kwargs
assert call_args == {
"body": "DISK at 99%",
"notify_type": NotifyType.FAILURE,
"title": None,
"body_format": NotifyFormat.TEXT,
"tag": "all",
"attach": None,
"interpret_escapes": None,
"config": None,
}
mock_apprise_hook.return_value.notify.assert_called_once()
@mock.patch("airflow.providers.apprise.notifications.apprise.AppriseHook")
def test_notifier_templated(self, mock_apprise_hook, create_dag_without_db):
notifier = AppriseNotifier(
notify_type=NotifyType.FAILURE,
title="DISK at 99% {{dag.dag_id}}",
body="System can crash soon {{dag.dag_id}}",
)
context = {"dag": create_dag_without_db("test_notifier")}
notifier(context)
call_args = mock_apprise_hook.return_value.notify.call_args.kwargs
assert call_args == {
"body": "System can crash soon test_notifier",
"title": "DISK at 99% test_notifier",
"notify_type": NotifyType.FAILURE,
"body_format": NotifyFormat.TEXT,
"tag": "all",
"attach": None,
"interpret_escapes": None,
"config": None,
}
mock_apprise_hook.return_value.notify.assert_called_once()
@pytest.mark.asyncio
@mock.patch("airflow.providers.apprise.notifications.apprise.AppriseHook")
async def test_async_apprise_notifier(self, mock_apprise_hook, create_dag_without_db):
mock_apprise_hook.return_value.async_notify = mock.AsyncMock()
notifier = send_apprise_notification(body="DISK at 99%", notify_type=NotifyType.FAILURE)
await notifier.async_notify({"dag": create_dag_without_db("test_notifier")})
call_args = mock_apprise_hook.return_value.async_notify.call_args.kwargs
assert call_args == {
"body": "DISK at 99%",
"notify_type": NotifyType.FAILURE,
"title": None,
"body_format": NotifyFormat.TEXT,
"tag": "all",
"attach": None,
"interpret_escapes": None,
"config": None,
}
mock_apprise_hook.return_value.async_notify.assert_called_once()
| TestAppriseNotifier |
python | tensorflow__tensorflow | tensorflow/python/distribute/multi_worker_test_base.py | {
"start": 20149,
"end": 20290
} | class ____(object):
def __enter__(self):
return
def __exit__(self, exception_type, exception_value, traceback):
pass
| DummySession |
python | agronholm__apscheduler | src/apscheduler/executors/async_.py | {
"start": 191,
"end": 670
} | class ____(JobExecutor):
"""
Executes functions directly on the event loop thread.
If the function returns a coroutine object (or another kind of awaitable), that is
awaited on and its return value is used as the job's return value.
"""
async def run_job(self, func: Callable[..., Any], job: Job) -> Any:
retval = func(*job.args, **job.kwargs)
if isawaitable(retval):
retval = await retval
return retval
| AsyncJobExecutor |
python | google__pytype | pytype/typegraph/typegraph_serializer.py | {
"start": 1209,
"end": 1305
} | class ____:
id: VariableId
bindings: list[BindingId]
@dataclasses.dataclass
| SerializedVariable |
python | ray-project__ray | release/ray_release/result.py | {
"start": 512,
"end": 2721
} | class ____:
results: Optional[Dict] = None
status: str = ResultStatus.UNKNOWN.value
return_code: int = 0
last_logs: Optional[str] = None
runtime: Optional[float] = None
stable: bool = True
smoke_test: bool = False
buildkite_url: Optional[str] = None
cluster_url: Optional[str] = None
# Anyscale Jobs specific
job_url: Optional[str] = None
job_id: Optional[str] = None
buildkite_job_id: Optional[str] = None
cluster_id: Optional[str] = None
prometheus_metrics: Optional[Dict] = None
extra_tags: Optional[Dict] = None
def _is_transient_error(runtime: int) -> bool:
"""
Classify whether an infra-failure issue is a transient issue. This is based on
the status of its previous retries, and its runtime.
"""
retry_count = int(os.environ.get("BUILDKITE_RETRY_COUNT", 0))
max_retry = int(os.environ.get("BUILDKITE_MAX_RETRIES", 1))
if retry_count >= max_retry:
# Already reach retry limit
return False
if runtime > int(os.environ.get("BUILDKITE_TIME_LIMIT_FOR_RETRY", 0)):
# Take too long to run
return False
return True
def handle_exception(
e: Exception, run_duration: int
) -> Tuple[ExitCode, ResultStatus, Optional[int]]:
if not isinstance(e, ReleaseTestError):
return ExitCode.UNKNOWN, ResultStatus.UNKNOWN, 0
exit_code = e.exit_code
if 1 <= exit_code.value < 10:
result_status = ResultStatus.RUNTIME_ERROR
runtime = None
elif 10 <= exit_code.value < 20:
result_status = ResultStatus.INFRA_ERROR
runtime = None
elif 30 <= exit_code.value < 40:
result_status = ResultStatus.INFRA_TIMEOUT
runtime = None
elif exit_code == ExitCode.COMMAND_TIMEOUT:
result_status = ResultStatus.TIMEOUT
runtime = 0
elif 40 <= exit_code.value:
result_status = ResultStatus.ERROR
runtime = 0
# if this result is to be retried, mark its status as transient
# this logic should be in-sync with run_release_test.sh
if _is_transient_error(run_duration):
result_status = ResultStatus.TRANSIENT_INFRA_ERROR
return exit_code, result_status, runtime
| Result |
python | etianen__django-reversion | tests/test_app/tests/test_api.py | {
"start": 574,
"end": 719
} | class ____(TestModelMixin, TestBase):
def testIsRegistered(self):
self.assertTrue(reversion.is_registered(TestModel))
| IsRegisteredTest |
python | apache__airflow | providers/databricks/src/airflow/providers/databricks/plugins/databricks_workflow.py | {
"start": 17334,
"end": 18980
} | class ____(BaseOperatorLink, LoggingMixin):
"""Construct a link to send a repair request for a single databricks task."""
name = "Repair a single task"
def get_link(
self,
operator,
dttm=None,
*,
ti_key: TaskInstanceKey | None = None,
) -> str:
if not ti_key:
ti = get_task_instance(operator, dttm)
ti_key = ti.key
task_group = operator.task_group
if not task_group:
raise AirflowException("Task group is required for generating repair link.")
self.log.info(
"Creating link to repair a single task for databricks job run %s task %s",
task_group.group_id,
ti_key.task_id,
)
from airflow.utils.session import create_session
with create_session() as session:
dag = _get_dag(ti_key.dag_id, session=session)
task = dag.get_task(ti_key.task_id)
if TYPE_CHECKING:
assert isinstance(task, DatabricksTaskBaseOperator)
if ".launch" not in ti_key.task_id:
launch_task_id = get_launch_task_id(task_group)
ti_key = _get_launch_task_key(ti_key, task_id=launch_task_id)
metadata = get_xcom_result(ti_key, "return_value")
query_params = {
"dag_id": ti_key.dag_id,
"databricks_conn_id": metadata.conn_id,
"databricks_run_id": metadata.run_id,
"run_id": ti_key.run_id,
"tasks_to_repair": task.databricks_task_key,
}
return url_for("RepairDatabricksTasks.repair", **query_params)
| WorkflowJobRepairSingleTaskLink |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-github/llama_index/readers/github/collaborators/github_client.py | {
"start": 254,
"end": 685
} | class ____(Protocol):
def get_all_endpoints(self) -> Dict[str, str]: ...
async def request(
self,
endpoint: str,
method: str,
headers: Dict[str, Any] = {},
params: Dict[str, Any] = {},
**kwargs: Any,
) -> Any: ...
async def get_collaborators(
self,
owner: str,
repo: str,
page: int = 1,
) -> Dict: ...
| BaseGitHubCollaboratorsClient |
python | tensorflow__tensorflow | tensorflow/python/autograph/pyct/common_transformers/anf.py | {
"start": 2803,
"end": 23477
} | class ____(transformer.Base):
"""Performs the conversion to A-normal form (ANF)."""
# The algorithm is a postorder recursive tree walk. Any given node A may, in
# general, require creation of a series B of Assign statements, which compute
# and explicitly name the intermediate values needed to compute the value of
# A. If A was already a statement, it can be replaced with the sequence B +
# [A]. If A was an expression, B needs to be propagated up the tree until a
# statement is encountered. Since the `ast.NodeTransformer` framework makes
# no provision for subtraversals returning side information, this class
# accumulates the sequence B in an instance variable.
# The only other subtlety is that some Python statements (like `if`) have both
# expression fields (`test`) and statement list fields (`body` and `orelse`).
# Any additional assignments needed to name all the intermediate values in the
# `test` can be prepended to the `if` node, but assignments produced by
# processing the `body` and the `orelse` need to be kept together with them,
# and not accidentally lifted out of the `if`.
def __init__(self, ctx, config):
"""Creates an ANF transformer.
Args:
ctx: transformer.Context
config: Configuration
"""
super(AnfTransformer, self).__init__(ctx)
if config is None:
# These could be pulled out, but are generally considered to already be in
# A-normal form. Thus they are left in by default, but could be pulled
# out if the configuration calls for it.
if gast_util.GAST2:
literal_node_types = (
gast.Num, gast.Str, gast.Bytes, gast.NameConstant,
gast.Name # Name is here to cover True, False, and None in Python 2
)
elif gast_util.GAST3:
literal_node_types = (
gast.Constant,
gast.Name # Name is here to cover True, False, and None in Python 2
)
else:
assert False
self._overrides = [
(ASTEdgePattern(ANY, ANY, literal_node_types), LEAVE),
(ASTEdgePattern(ANY, ANY, gast.expr), REPLACE)]
else:
self._overrides = config
self._gensym = DummyGensym()
self._pending_statements = []
def _consume_pending_statements(self):
ans = self._pending_statements
self._pending_statements = []
return ans
def _add_pending_statement(self, stmt):
self._pending_statements.append(stmt)
def _match(self, pattern, parent, field, child):
if pattern is ANY:
return True
else:
return pattern.matches(parent, field, child)
def _should_transform(self, parent, field, child):
for pat, result in self._overrides:
if self._match(pat, parent, field, child):
return result(parent, field, child)
# Fell off the end of the pattern list: do not transform
return False
def _do_transform_node(self, node):
temp_name = self._gensym.new_name()
temp_assign = templates.replace(
'temp_name = expr', temp_name=temp_name, expr=node)[0]
self._add_pending_statement(temp_assign)
answer = templates.replace('temp_name', temp_name=temp_name)[0]
return answer
def _ensure_node_in_anf(self, parent, field, node):
"""Puts `node` in A-normal form, by replacing it with a variable if needed.
The exact definition of A-normal form is given by the configuration. The
parent and the incoming field name are only needed because the configuration
may be context-dependent.
Args:
parent: An AST node, the parent of `node`.
field: The field name under which `node` is the child of `parent`.
node: An AST node, potentially to be replaced with a variable reference.
Returns:
node: An AST node; the argument if transformation was not necessary,
or the new variable reference if it was.
"""
if node is None:
return node
if _is_trivial(node):
return node
if isinstance(node, list):
# If something's field was actually a list, e.g., variadic arguments.
return [self._ensure_node_in_anf(parent, field, n) for n in node]
if isinstance(node, gast.keyword):
node.value = self._ensure_node_in_anf(parent, field, node.value)
return node
if isinstance(node, (gast.Starred, gast.withitem, gast.slice)):
# These nodes aren't really extractable in their own right, but their
# subnodes might be. Propagate the parent and field name to the child
# nodes, instead of querying the configuration for children of, e.g.,
# gast.Starred.
return self._ensure_fields_in_anf(node, parent, field)
if self._should_transform(parent, field, node):
return self._do_transform_node(node)
else:
return node
def _ensure_fields_in_anf(self, node, parent=None, super_field=None):
for field in node._fields:
if field.startswith('__'):
continue
parent_supplied = node if parent is None else parent
field_supplied = field if super_field is None else super_field
setattr(node, field, self._ensure_node_in_anf(
parent_supplied, field_supplied, getattr(node, field)))
return node
def _visit_strict_statement(self, node, children_ok_to_transform=True):
assert not self._pending_statements
node = self.generic_visit(node)
if children_ok_to_transform:
self._ensure_fields_in_anf(node)
results = self._consume_pending_statements()
results.append(node)
return results
def _visit_trivial_only_statement(self, node, msg):
assert not self._pending_statements
node = self.generic_visit(node)
self._ensure_fields_in_anf(node)
if self._pending_statements:
raise ValueError(msg)
else:
return node
def _visit_strict_expression(self, node):
node = self.generic_visit(node)
self._ensure_fields_in_anf(node)
return node
def _visit_trivial_only_expression(self, node, msg):
k = len(self._pending_statements)
node = self.generic_visit(node)
self._ensure_fields_in_anf(node)
# This check relies on there being no opportunities to consume pending
# statements while traversing children of an expression.
if len(self._pending_statements) != k:
raise ValueError(msg)
else:
return node
# Note on code order: These are listed in the same order as the grammar
# elements on https://github.com/serge-sans-paille/gast
# FunctionDef, AsyncFunctionDef, and ClassDef should be correct by default.
def visit_Return(self, node):
return self._visit_strict_statement(node)
def visit_Delete(self, node):
return self._visit_strict_statement(node, children_ok_to_transform=False)
def visit_Assign(self, node):
return self._visit_strict_statement(node, children_ok_to_transform=False)
def visit_AugAssign(self, node):
return self._visit_strict_statement(node, children_ok_to_transform=False)
def visit_Print(self, node):
return self._visit_strict_statement(node)
def visit_For(self, node):
assert not self._pending_statements
# It's important to visit node.iter first, because any statements created
# thereby need to live outside the body.
self.visit(node.iter)
node.iter = self._ensure_node_in_anf(node, 'iter', node.iter)
iter_stmts = self._consume_pending_statements()
# This generic_visit will revisit node.iter, but that is correct because by
# this point the node.iter link has been checked. It may be somewhat
# expensive if the configuration didn't call for transforming node.iter, as
# then it may be large and will be uselessly transformed again. This
# behavior is what causes the documented effect that configuration callables
# may be invoked more than once of the same links; if the code is rewritten
# not to do that (anywhere), the docstring of `transform` should be updated.
node = self.generic_visit(node)
assert not self._pending_statements
iter_stmts.append(node)
return iter_stmts
def visit_AsyncFor(self, node):
msg = ('Nontrivial AsyncFor nodes not supported yet '
'(need to think through the semantics).')
return self._visit_trivial_only_statement(node, msg)
def visit_While(self, node):
assert not self._pending_statements
self.visit(node.test)
node.test = self._ensure_node_in_anf(node, 'test', node.test)
if self._pending_statements:
msg = ('While with nontrivial test not supported yet '
'(need to avoid precomputing the test).')
raise ValueError(msg)
# If traversing node.test yielded no statements extracted, the generic visit
# will do the right thing.
return self.generic_visit(node)
def visit_If(self, node):
assert not self._pending_statements
# It's important to visit node.test first, because any statements created
# thereby need to live outside the body.
self.visit(node.test)
node.test = self._ensure_node_in_anf(node, 'test', node.test)
condition_stmts = self._consume_pending_statements()
# This generic_visit will revisit node.test, but that is correct because by
# this point the node.test link has been checked. It may be somewhat
# expensive if the configuration didn't call for transforming node.test, as
# then it may be large and will be uselessly transformed again. This
# happens in several places.
node = self.generic_visit(node)
assert not self._pending_statements
condition_stmts.append(node)
return condition_stmts
def visit_With(self, node):
assert not self._pending_statements
# It's important to visit node.items first, because any statements created
# thereby need to live outside the body.
for item in node.items:
self.visit(item)
node.items = [self._ensure_node_in_anf(node, 'items', n)
for n in node.items]
contexts_stmts = self._consume_pending_statements()
# This generic_visit will revisit node.items, but that is correct because by
# this point the node.items link has been checked. It may be somewhat
# expensive if the configuration didn't call for transforming node.items, as
# then it may be large and will be uselessly transformed again. This
# happens in several places.
node = self.generic_visit(node)
assert not self._pending_statements
contexts_stmts.append(node)
return contexts_stmts
def visit_AsyncWith(self, node):
msg = ('Nontrivial AsyncWith nodes not supported yet '
'(need to think through the semantics).')
return self._visit_trivial_only_statement(node, msg)
def visit_Raise(self, node):
return self._visit_strict_statement(node)
# Try should be correct by default.
def visit_Assert(self, node):
msg = ('Nontrivial Assert nodes not supported yet '
'(need to avoid computing the test when assertions are off, and '
'avoid computing the irritant when the assertion does not fire).')
return self._visit_trivial_only_statement(node, msg)
# Import and ImportFrom should be correct by default.
def visit_Exec(self, node):
return self._visit_strict_statement(node)
# Global and Nonlocal should be correct by default.
def visit_Expr(self, node):
return self._visit_strict_statement(node, children_ok_to_transform=False)
# Pass, Break, and Continue should be correct by default.
def visit_BoolOp(self, node):
msg = ('Nontrivial BoolOp nodes not supported yet '
'(need to preserve short-circuiting semantics).')
return self._visit_trivial_only_expression(node, msg)
def visit_BinOp(self, node):
return self._visit_strict_expression(node)
def visit_UnaryOp(self, node):
return self._visit_strict_expression(node)
def visit_Lambda(self, node):
msg = ('Nontrivial Lambda nodes not supported '
'(cannot insert statements into lambda bodies).')
return self._visit_trivial_only_expression(node, msg)
def visit_IfExp(self, node):
msg = ('Nontrivial IfExp nodes not supported yet '
'(need to convert to If statement, to evaluate branches lazily '
'and insert statements into them).')
return self._visit_trivial_only_expression(node, msg)
def visit_Dict(self, node):
return self._visit_strict_expression(node)
def visit_Set(self, node):
return self._visit_strict_expression(node)
def visit_ListComp(self, node):
msg = ('ListComp nodes not supported '
'(need to convert to a form that tolerates '
'assignment statements in clause bodies).')
raise ValueError(msg)
def visit_SetComp(self, node):
msg = ('SetComp nodes not supported '
'(need to convert to a form that tolerates '
'assignment statements in clause bodies).')
raise ValueError(msg)
def visit_DictComp(self, node):
msg = ('DictComp nodes not supported '
'(need to convert to a form that tolerates '
'assignment statements in clause bodies).')
raise ValueError(msg)
def visit_GeneratorExp(self, node):
msg = ('GeneratorExp nodes not supported '
'(need to convert to a form that tolerates '
'assignment statements in clause bodies).')
raise ValueError(msg)
def visit_Await(self, node):
msg = ('Nontrivial Await nodes not supported yet '
'(need to think through the semantics).')
return self._visit_trivial_only_expression(node, msg)
def visit_Yield(self, node):
return self._visit_strict_expression(node)
def visit_YieldFrom(self, node):
msg = ('Nontrivial YieldFrom nodes not supported yet '
'(need to unit-test them in Python 2).')
return self._visit_trivial_only_expression(node, msg)
def visit_Compare(self, node):
if len(node.ops) > 1:
msg = ('Multi-ary compare nodes not supported yet '
'(need to preserve short-circuiting semantics).')
raise ValueError(msg)
return self._visit_strict_expression(node)
def visit_Call(self, node):
return self._visit_strict_expression(node)
def visit_Repr(self, node):
msg = ('Nontrivial Repr nodes not supported yet '
'(need to research their syntax and semantics).')
return self._visit_trivial_only_expression(node, msg)
def visit_FormattedValue(self, node):
msg = ('Nontrivial FormattedValue nodes not supported yet '
'(need to unit-test them in Python 2).')
return self._visit_trivial_only_expression(node, msg)
def visit_JoinedStr(self, node):
msg = ('Nontrivial JoinedStr nodes not supported yet '
'(need to unit-test them in Python 2).')
return self._visit_trivial_only_expression(node, msg)
def visit_Attribute(self, node):
return self._visit_strict_expression(node)
def visit_Subscript(self, node):
return self._visit_strict_expression(node)
# Starred and Name are correct by default, because the right thing to do is to
# just recur.
def visit_List(self, node):
node = self.generic_visit(node)
if not isinstance(node.ctx, gast.Store):
self._ensure_fields_in_anf(node)
return node
def visit_Tuple(self, node):
node = self.generic_visit(node)
if not isinstance(node.ctx, gast.Store):
self._ensure_fields_in_anf(node)
return node
def _is_py2_name_constant(node):
return isinstance(node, gast.Name) and node.id in ['True', 'False', 'None']
def _is_trivial(node):
"""Returns whether to consider the given node 'trivial'.
The definition of 'trivial' is a node that can't meaningfully be pulled out
into its own assignment statement.
This is surprisingly difficult to do robustly across versions of Python and
gast, as the parsing of constants has changed, if I may, constantly.
Args:
node: An AST node to check for triviality
Returns:
trivial: A Python `bool` indicating whether the node is trivial.
"""
trivial_node_types = (
# Variable names
gast.Name,
# Non-nodes that show up as AST fields
bool,
str,
# Binary operators
gast.Add,
gast.Sub,
gast.Mult,
gast.Div,
gast.Mod,
gast.Pow,
gast.LShift,
gast.RShift,
gast.BitOr,
gast.BitXor,
gast.BitAnd,
gast.FloorDiv,
# Unary operators
gast.Invert,
gast.Not,
gast.UAdd,
gast.USub,
# Comparison operators
gast.Eq,
gast.NotEq,
gast.Lt,
gast.LtE,
gast.Gt,
gast.GtE,
gast.Is,
gast.IsNot,
gast.In,
gast.NotIn,
# Other leaf nodes that don't make sense standalone.
gast.expr_context,
)
if isinstance(node, trivial_node_types) and not _is_py2_name_constant(node):
return True
if gast_util.is_ellipsis(node):
return True
return False
def transform(node, ctx, config=None):
"""Converts the given node to A-normal form (ANF).
The general idea of A-normal form: https://en.wikipedia.org/wiki/A-normal_form
The specific converters used here are based on Python AST semantics as
documented at https://greentreesnakes.readthedocs.io/en/latest/.
What exactly should be considered A-normal form for any given programming
language is not completely obvious. The transformation defined here is
therefore configurable as to which syntax to replace with a fresh variable and
which to leave be. The configuration is intentionally flexible enough to
define very precise variable insertion transformations, should that be
desired.
The configuration is a list of syntax rules, each of which is a 2-tuple:
- An `ASTEdgePattern` (which see) defining a type of AST edge, and
- Whether to transform children of such edges.
The special object `anf.ANY` may be used as a pattern that matches all edges.
Each replacement directive is one of three possible things:
- The object `anf.REPLACE`, meaning "Replace this child node with a variable",
- The object `anf.LEAVE`, meaning "Do not replace this child node with a
variable", or
- A Python callable. If a callable, it is called with the parent node, the
field name, and the child node, and must compute a boolean indicating
whether to transform the child node or not. The callable is free to use
whatever context information it chooses. The callable may be invoked more
than once on the same link, and must produce the same answer each time.
The syntax rules are tested in order, and the first match governs. If no rule
matches, the node is not transformed.
The above rules notwithstanding,
- Variable references are never replaced with (fresh) variables, as that would
accomplish nothing.
- The left-hand children of Assign and AugAssign nodes, and the children of
Del nodes, are never replaced with variables, as that would break their
semantics.
- The right-hand children of Assign nodes are never replaced with variables,
as the original assignment would still have to be present in the result
to define the new variable. (That is, there's no point in transforming
`x = sin(y)` into `tmp = sin(y); x = tmp`.)
- The right-hand children of AugAssign nodes are never replaced with variables
either, but only because the difference from Assign was considered a
potential source of confusion (and it would have been slightly awkward in
the code to treat the RHS differently than the LHS).
- Various special-purpose AST nodes are not exposed to the configuration, lest
the transform produce invalid syntax like, e.g., `tmp = +; x = 1 tmp 2`.
For example, the configuration
```python
[(anf.ASTEdgePattern(anf.ANY, anf.ANY, gast.expr), anf.REPLACE)]
```
gives explicit fresh names to all expressions regardless of context (except as
outlined above), whereas
```python
[(anf.ASTEdgePattern(gast.If, "test", anf.ANY), anf.REPLACE)]
```
only transforms the conditionals of `if` statements (but not, e.g., `while`).
If no configuration is supplied, the default behavior is to transform all
expressions except literal constants, which is defined as a configuration as
```python
# For Python 3, and gast library versions before 0.3
literals = (gast.Num, gast.Str, gast.Bytes, gast.NameConstant)
[(anf.ASTEdgePattern(anf.ANY, anf.ANY, literals), anf.LEAVE),
(anf.ASTEdgePattern(anf.ANY, anf.ANY, gast.expr), anf.REPLACE)]
```
Args:
node: The node to transform.
ctx: transformer.EntityInfo. TODO(mdan): What information does this
argument provide?
config: Optional ANF configuration. If omitted, ANF replaces all expression
expect literal constants.
"""
return AnfTransformer(ctx, config).visit(node)
| AnfTransformer |
python | bokeh__bokeh | tests/unit/bokeh/util/test_hex.py | {
"start": 2751,
"end": 4136
} | class ____:
def test_gaussian_pointytop(self) -> None:
bins = buh.hexbin(x, y, 2)
np.testing.assert_array_equal(bins.q, [0, 0, 1, 1, 1, 2, 2])
np.testing.assert_array_equal(bins.r, [0, -1, 0, -2, -1, -2, -1])
np.testing.assert_array_equal(bins.counts, [54, 9, 98, 1, 313, 3, 22])
pointy_bins = buh.hexbin(x, y, 2, "pointytop")
np.testing.assert_array_equal(bins.q, pointy_bins.q)
np.testing.assert_array_equal(bins.r, pointy_bins.r)
np.testing.assert_array_equal(bins.counts, pointy_bins.counts)
def test_gaussian_flattop(self) -> None:
bins = buh.hexbin(x, y, 2, "flattop")
np.testing.assert_array_equal(bins.q, [0, 0, 1, 1, 1, 2])
np.testing.assert_array_equal(bins.r, [0, -1, 0, -2, -1, -2])
np.testing.assert_array_equal(bins.counts, [57, 95, 8, 14, 324, 2])
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| Test_hexbin |
python | pyinstaller__pyinstaller | bootloader/waflib/Node.py | {
"start": 1853,
"end": 15259
} | class ____(object):
dict_class = dict
__slots__ = ('name', 'parent', 'children', 'cache_abspath', 'cache_isdir')
def __init__(self, name, parent):
self.name = name
self.parent = parent
if parent:
if name in parent.children:
raise Errors.WafError('node %s exists in the parent files %r already' % (name, parent))
parent.children[name] = self
def __setstate__(self, data):
self.name = data[0]
self.parent = data[1]
if data[2] is not None:
self.children = self.dict_class(data[2])
def __getstate__(self):
return (self.name, self.parent, getattr(self, 'children', None))
def __str__(self):
return self.abspath()
def __repr__(self):
return self.abspath()
def __copy__(self):
raise Errors.WafError('nodes are not supposed to be copied')
def read(self, flags='r', encoding='latin-1'):
return Utils.readf(self.abspath(), flags, encoding)
def write(self, data, flags='w', encoding='latin-1'):
Utils.writef(self.abspath(), data, flags, encoding)
def read_json(self, convert=True, encoding='utf-8'):
import json
object_pairs_hook = None
if convert and sys.hexversion < 0x3000000:
try:
_type = unicode
except NameError:
_type = str
def convert(value):
if isinstance(value, list):
return [convert(element) for element in value]
elif isinstance(value, _type):
return str(value)
else:
return value
def object_pairs(pairs):
return dict((str(pair[0]), convert(pair[1])) for pair in pairs)
object_pairs_hook = object_pairs
return json.loads(self.read(encoding=encoding), object_pairs_hook=object_pairs_hook)
def write_json(self, data, pretty=True):
import json
indent = 2
separators = (',', ': ')
sort_keys = pretty
newline = os.linesep
if not pretty:
indent = None
separators = (',', ':')
newline = ''
output = json.dumps(data, indent=indent, separators=separators, sort_keys=sort_keys) + newline
self.write(output, encoding='utf-8')
def exists(self):
return os.path.exists(self.abspath())
def isdir(self):
return os.path.isdir(self.abspath())
def chmod(self, val):
os.chmod(self.abspath(), val)
def delete(self, evict=True):
try:
try:
if os.path.isdir(self.abspath()):
shutil.rmtree(self.abspath())
else:
os.remove(self.abspath())
except OSError:
if os.path.exists(self.abspath()):
raise
finally:
if evict:
self.evict()
def evict(self):
del self.parent.children[self.name]
def suffix(self):
k = max(0, self.name.rfind('.'))
return self.name[k:]
def height(self):
d = self
val = -1
while d:
d = d.parent
val += 1
return val
def listdir(self):
lst = Utils.listdir(self.abspath())
lst.sort()
return lst
def mkdir(self):
if self.isdir():
return
try:
self.parent.mkdir()
except OSError:
pass
if self.name:
try:
os.makedirs(self.abspath())
except OSError:
pass
if not self.isdir():
raise Errors.WafError('Could not create the directory %r' % self)
try:
self.children
except AttributeError:
self.children = self.dict_class()
def find_node(self, lst):
if isinstance(lst, str):
lst = [x for x in Utils.split_path(lst) if x and x != '.']
if lst and lst[0].startswith('\\\\') and not self.parent:
node = self.ctx.root.make_node(lst[0])
node.cache_isdir = True
return node.find_node(lst[1:])
cur = self
for x in lst:
if x == '..':
cur = cur.parent or cur
continue
try:
ch = cur.children
except AttributeError:
cur.children = self.dict_class()
else:
try:
cur = ch[x]
continue
except KeyError:
pass
cur = self.__class__(x, cur)
if not cur.exists():
cur.evict()
return None
if not cur.exists():
cur.evict()
return None
return cur
def make_node(self, lst):
if isinstance(lst, str):
lst = [x for x in Utils.split_path(lst) if x and x != '.']
cur = self
for x in lst:
if x == '..':
cur = cur.parent or cur
continue
try:
cur = cur.children[x]
except AttributeError:
cur.children = self.dict_class()
except KeyError:
pass
else:
continue
cur = self.__class__(x, cur)
return cur
def search_node(self, lst):
if isinstance(lst, str):
lst = [x for x in Utils.split_path(lst) if x and x != '.']
cur = self
for x in lst:
if x == '..':
cur = cur.parent or cur
else:
try:
cur = cur.children[x]
except (AttributeError, KeyError):
return None
return cur
def path_from(self, node):
c1 = self
c2 = node
c1h = c1.height()
c2h = c2.height()
lst = []
up = 0
while c1h > c2h:
lst.append(c1.name)
c1 = c1.parent
c1h -= 1
while c2h > c1h:
up += 1
c2 = c2.parent
c2h -= 1
while not c1 is c2:
lst.append(c1.name)
up += 1
c1 = c1.parent
c2 = c2.parent
if c1.parent:
lst.extend(['..'] * up)
lst.reverse()
return os.sep.join(lst) or '.'
else:
return self.abspath()
def abspath(self):
try:
return self.cache_abspath
except AttributeError:
pass
if not self.parent:
val = os.sep
elif not self.parent.name:
val = os.sep + self.name
else:
val = self.parent.abspath() + os.sep + self.name
self.cache_abspath = val
return val
if Utils.is_win32:
def abspath(self):
try:
return self.cache_abspath
except AttributeError:
pass
if not self.parent:
val = ''
elif not self.parent.name:
val = self.name + os.sep
else:
val = self.parent.abspath().rstrip(os.sep) + os.sep + self.name
self.cache_abspath = val
return val
def is_child_of(self, node):
p = self
diff = self.height() - node.height()
while diff > 0:
diff -= 1
p = p.parent
return p is node
def ant_iter(self, accept=None, maxdepth=25, pats=[], dir=False, src=True, remove=True, quiet=False):
dircont = self.listdir()
try:
lst = set(self.children.keys())
except AttributeError:
self.children = self.dict_class()
else:
if remove:
for x in lst - set(dircont):
self.children[x].evict()
for name in dircont:
npats = accept(name, pats)
if npats and npats[0]:
accepted = [] in npats[0]
node = self.make_node([name])
isdir = node.isdir()
if accepted:
if isdir:
if dir:
yield node
elif src:
yield node
if isdir:
node.cache_isdir = True
if maxdepth:
for k in node.ant_iter(
accept=accept,
maxdepth=maxdepth - 1,
pats=npats,
dir=dir,
src=src,
remove=remove,
quiet=quiet
):
yield k
def ant_glob(self, *k, **kw):
src = kw.get('src', True)
dir = kw.get('dir')
excl = kw.get('excl', exclude_regs)
incl = k and k[0] or kw.get('incl', '**')
remove = kw.get('remove', True)
maxdepth = kw.get('maxdepth', 25)
ignorecase = kw.get('ignorecase', False)
quiet = kw.get('quiet', False)
pats = (ant_matcher(incl, ignorecase), ant_matcher(excl, ignorecase))
if kw.get('generator'):
return Utils.lazy_generator(self.ant_iter, (ant_sub_matcher, maxdepth, pats, dir, src, remove, quiet))
it = self.ant_iter(ant_sub_matcher, maxdepth, pats, dir, src, remove, quiet)
if kw.get('flat'):
return ' '.join(x.path_from(self) for x in it)
return list(it)
def is_src(self):
cur = self
x = self.ctx.srcnode
y = self.ctx.bldnode
while cur.parent:
if cur is y:
return False
if cur is x:
return True
cur = cur.parent
return False
def is_bld(self):
cur = self
y = self.ctx.bldnode
while cur.parent:
if cur is y:
return True
cur = cur.parent
return False
def get_src(self):
cur = self
x = self.ctx.srcnode
y = self.ctx.bldnode
lst = []
while cur.parent:
if cur is y:
lst.reverse()
return x.make_node(lst)
if cur is x:
return self
lst.append(cur.name)
cur = cur.parent
return self
def get_bld(self):
cur = self
x = self.ctx.srcnode
y = self.ctx.bldnode
lst = []
while cur.parent:
if cur is y:
return self
if cur is x:
lst.reverse()
return self.ctx.bldnode.make_node(lst)
lst.append(cur.name)
cur = cur.parent
lst.reverse()
if lst and Utils.is_win32 and len(lst[0]) == 2 and lst[0].endswith(':'):
lst[0] = lst[0][0]
return self.ctx.bldnode.make_node(['__root__'] + lst)
def find_resource(self, lst):
if isinstance(lst, str):
lst = [x for x in Utils.split_path(lst) if x and x != '.']
node = self.get_bld().search_node(lst)
if not node:
node = self.get_src().find_node(lst)
if node and node.isdir():
return None
return node
def find_or_declare(self, lst):
if isinstance(lst, str) and os.path.isabs(lst):
node = self.ctx.root.make_node(lst)
else:
node = self.get_bld().make_node(lst)
node.parent.mkdir()
return node
def find_dir(self, lst):
if isinstance(lst, str):
lst = [x for x in Utils.split_path(lst) if x and x != '.']
node = self.find_node(lst)
if node and not node.isdir():
return None
return node
def change_ext(self, ext, ext_in=None):
name = self.name
if ext_in is None:
k = name.rfind('.')
if k >= 0:
name = name[:k] + ext
else:
name = name + ext
else:
name = name[:-len(ext_in)] + ext
return self.parent.find_or_declare([name])
def bldpath(self):
return self.path_from(self.ctx.bldnode)
def srcpath(self):
return self.path_from(self.ctx.srcnode)
def relpath(self):
cur = self
x = self.ctx.bldnode
while cur.parent:
if cur is x:
return self.bldpath()
cur = cur.parent
return self.srcpath()
def bld_dir(self):
return self.parent.bldpath()
def h_file(self):
return Utils.h_file(self.abspath())
def get_bld_sig(self):
try:
cache = self.ctx.cache_sig
except AttributeError:
cache = self.ctx.cache_sig = {}
try:
ret = cache[self]
except KeyError:
p = self.abspath()
try:
ret = cache[self] = self.h_file()
except EnvironmentError:
if self.isdir():
st = os.stat(p)
ret = cache[self] = Utils.h_list([p, st.st_ino, st.st_mode])
return ret
raise
return ret
pickle_lock = Utils.threading.Lock()
| Node |
python | huggingface__transformers | src/transformers/models/switch_transformers/modeling_switch_transformers.py | {
"start": 5657,
"end": 6811
} | class ____(nn.Module):
def __init__(self, hidden_size, eps=1e-6):
"""
Construct a layernorm module in the SWITCH_TRANSFORMERS style. No bias and no subtraction of mean.
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
# SWITCH_TRANSFORMERS uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://huggingface.co/papers/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
# convert into half-precision if necessary
if self.weight.dtype in [torch.float16, torch.bfloat16]:
hidden_states = hidden_states.to(self.weight.dtype)
return self.weight * hidden_states
| SwitchTransformersLayerNorm |
python | langchain-ai__langchain | libs/langchain/langchain_classic/evaluation/criteria/eval_chain.py | {
"start": 18478,
"end": 21524
} | class ____(CriteriaEvalChain):
"""Criteria evaluation chain that requires references."""
@classmethod
@override
def is_lc_serializable(cls) -> bool:
return False
@property
def requires_reference(self) -> bool:
"""Whether the evaluation requires a reference text."""
return True
@classmethod
def _resolve_prompt(
cls,
prompt: BasePromptTemplate | None = None,
) -> BasePromptTemplate:
expected_input_vars = {"input", "output", "criteria", "reference"}
prompt_ = prompt or PROMPT_WITH_REFERENCES
if expected_input_vars != set(prompt_.input_variables):
msg = (
f"Input variables should be {expected_input_vars}, "
f"but got {prompt_.input_variables}"
)
raise ValueError(msg)
return prompt_
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
criteria: CRITERIA_TYPE | None = None,
*,
prompt: BasePromptTemplate | None = None,
**kwargs: Any,
) -> CriteriaEvalChain:
"""Create a `LabeledCriteriaEvalChain` instance from an llm and criteria.
Parameters
----------
llm : BaseLanguageModel
The language model to use for evaluation.
criteria : CRITERIA_TYPE - default=None for "helpfulness"
The criteria to evaluate the runs against. It can be:
- a mapping of a criterion name to its description
- a single criterion name present in one of the default criteria
- a single `ConstitutionalPrinciple` instance
prompt : Optional[BasePromptTemplate], default=None
The prompt template to use for generating prompts. If not provided,
a default prompt will be used.
**kwargs : Any
Additional keyword arguments to pass to the `LLMChain`
constructor.
Returns:
-------
LabeledCriteriaEvalChain
An instance of the `LabeledCriteriaEvalChain` class.
Examples:
--------
>>> from langchain_openai import OpenAI
>>> from langchain_classic.evaluation.criteria import LabeledCriteriaEvalChain
>>> model = OpenAI()
>>> criteria = {
"hallucination": (
"Does this submission contain information"
" not present in the input or reference?"
),
}
>>> chain = LabeledCriteriaEvalChain.from_llm(
llm=model,
criteria=criteria,
)
"""
prompt = cls._resolve_prompt(prompt)
criteria_ = cls.resolve_criteria(criteria)
criteria_str = "\n".join(f"{k}: {v}" for k, v in criteria_.items())
prompt_ = prompt.partial(criteria=criteria_str)
return cls(
llm=llm,
prompt=prompt_,
criterion_name="-".join(criteria_),
**kwargs,
)
| LabeledCriteriaEvalChain |
python | apache__airflow | providers/openlineage/tests/unit/openlineage/plugins/test_listener.py | {
"start": 45894,
"end": 92056
} | class ____:
@pytest.mark.skip("Rendering fields is not migrated yet in Airflow 3")
@patch("airflow.models.BaseOperator.render_template")
def test_listener_does_not_change_task_instance(self, render_mock, mock_supervisor_comms, spy_agency):
from airflow.sdk.execution_time.task_runner import (
RuntimeTaskInstance,
TaskInstance as SdkTaskInstance,
run,
)
render_mock.return_value = render_df()
date = timezone.datetime(2022, 1, 1)
dag = DAG(
"test",
schedule=None,
start_date=date,
user_defined_macros={"render_df": render_df},
params={"df": {"col": [1, 2]}},
)
task = TemplateOperator(task_id="template_op", dag=dag, do_xcom_push=True, df=dag.param("df"))
run_id = str(uuid.uuid1())
dagrun_kwargs = {
"dag_version": None,
"logical_date": date,
"triggered_by": types.DagRunTriggeredByType.TEST,
}
dag.create_dagrun(
run_id=run_id,
data_interval=(date, date),
run_type=types.DagRunType.MANUAL,
state=DagRunState.QUEUED,
**dagrun_kwargs,
)
ti = SdkTaskInstance(
id=uuid7(),
task_id="template_op",
dag_id=dag.dag_id,
run_id=run_id,
try_number=1,
map_index=-1,
dag_version_id=uuid7(),
)
runtime_ti = RuntimeTaskInstance.model_construct(**ti.model_dump(exclude_unset=True), task=task)
spy_agency.spy_on(runtime_ti.xcom_push, call_original=False)
run(runtime_ti, None)
# check if task returns the same DataFrame
pd.testing.assert_frame_equal(runtime_ti.xcom_push.last_call.args[1], render_df())
# check if render_template method always get the same unrendered field
assert not isinstance(runtime_ti.xcom_push.last_call.args[1], pd.DataFrame)
def _setup_mock_listener(self, mock_listener: mock.Mock, captured_try_numbers: dict[str, int]) -> None:
"""Sets up the mock listener with side effects to capture try numbers for different task instance events.
:param mock_listener: The mock object for the listener manager.
:param captured_try_numbers: A dictionary to store captured try numbers keyed by event names.
This function iterates through specified event names and sets a side effect on the corresponding
method of the listener manager's hook. The side effect is a nested function that captures the try number
of the task instance when the method is called.
:Example:
captured_try_numbers = {}
mock_listener = Mock()
_setup_mock_listener(mock_listener, captured_try_numbers)
# After running a task, captured_try_numbers will have the try number captured at the moment of
execution for specified methods. F.e. {"running": 1, "success": 2} for on_task_instance_running and
on_task_instance_success methods.
"""
def capture_try_number(method_name):
def inner(*args, **kwargs):
captured_try_numbers[method_name] = kwargs["task_instance"].try_number
return inner
for event in ["running", "success", "failed"]:
getattr(
mock_listener.return_value.hook, f"on_task_instance_{event}"
).side_effect = capture_try_number(event)
def _create_test_dag_and_task(
self, python_callable: Callable, scenario_name: str
) -> tuple[DagRun, TaskInstance]:
"""Creates a test DAG and a task for a custom test scenario.
:param python_callable: The Python callable to be executed by the PythonOperator.
:param scenario_name: The name of the test scenario, used to uniquely name the DAG and task.
:return: TaskInstance: The created TaskInstance object.
This function creates a DAG and a PythonOperator task with the provided python_callable. It generates a unique
run ID and creates a DAG run. This setup is useful for testing different scenarios in Airflow tasks.
:Example:
def sample_callable(**kwargs):
print("Hello World")
task_instance = _create_test_dag_and_task(sample_callable, "sample_scenario")
# Use task_instance to simulate running a task in a test.
"""
date = timezone.datetime(2022, 1, 1)
dag = DAG(
f"test_{scenario_name}",
schedule=None,
start_date=date,
)
t = PythonOperator(task_id=f"test_task_{scenario_name}", dag=dag, python_callable=python_callable)
run_id = str(uuid.uuid1())
dagrun_kwargs = {
"dag_version": None,
"logical_date": date,
"triggered_by": types.DagRunTriggeredByType.TEST,
}
dagrun = create_scheduler_dag(dag).create_dagrun(
run_id=run_id,
data_interval=(date, date),
start_date=date,
run_type=types.DagRunType.MANUAL,
state=DagRunState.QUEUED,
**dagrun_kwargs,
)
task_instance = TaskInstance(t, run_id=run_id) # type: ignore
task_instance.dag_run = dagrun
return dagrun, task_instance
def _create_listener_and_task_instance(
self, runtime_ti: bool = True
) -> tuple[OpenLineageListener, RuntimeTaskInstance | TaskInstance]:
"""Creates and configures an OpenLineageListener instance and a mock task instance for testing.
:arg runtime_ti: Whether we should return mock RuntimeTaskInstance or mock TaskInstance
:return: A tuple containing the configured OpenLineageListener and task instance.
This function instantiates an OpenLineageListener, sets up its required properties with mock objects, and
creates a mock task instance with predefined attributes. This setup is commonly used for testing the
interaction between an OpenLineageListener and a task instance in Airflow.
:Example:
listener, task_instance = _create_listener_and_task_instance()
# Now you can use listener and task_instance in your tests to simulate their interaction.
"""
if not runtime_ti:
# TaskInstance is used when on API server (when listener gets called about manual state change)
task_instance = TaskInstance(task=MagicMock(), dag_version_id=uuid7())
task_instance.dag_run = DagRun()
task_instance.dag_run.dag_id = "dag_id_from_dagrun_and_not_ti"
task_instance.dag_run.run_id = "dag_run_run_id"
task_instance.dag_run.clear_number = 0
task_instance.dag_run.logical_date = timezone.datetime(2020, 1, 1, 1, 1, 1)
task_instance.dag_run.run_after = timezone.datetime(2020, 1, 1, 1, 1, 1)
task_instance.dag_run.state = DagRunState.RUNNING
task_instance.task = None
task_instance.dag = None
task_instance.task_id = "task_id"
task_instance.dag_id = "dag_id"
task_instance.try_number = 1
task_instance.map_index = -1
else:
# RuntimeTaskInstance is used when on worker
from airflow.sdk.api.datamodels._generated import (
DagRun as SdkDagRun,
DagRunState as SdkDagRunState,
DagRunType,
TaskInstance as SdkTaskInstance,
TIRunContext,
)
from airflow.sdk.definitions.dag import DAG
from airflow.sdk.execution_time.task_runner import RuntimeTaskInstance
dag = DAG(
dag_id="dag_id_from_dag_not_ti",
description="Test DAG Description",
tags=["tag1", "tag2"],
)
task = EmptyOperator(
task_id="task_id_from_task_not_ti", dag=dag, owner="task_owner", doc_md="TASK Description"
)
task2 = EmptyOperator(task_id="task_id2_from_task_not_ti", dag=dag, owner="another_owner") # noqa: F841
sdk_task_instance = SdkTaskInstance(
id=uuid7(),
task_id="task_id",
dag_id="dag_id",
run_id="dag_run_run_id",
try_number=1,
map_index=-1,
dag_version_id=uuid7(),
)
task_instance = RuntimeTaskInstance.model_construct( # type: ignore
**sdk_task_instance.model_dump(exclude_unset=True),
task=task,
_ti_context_from_server=TIRunContext(
dag_run=SdkDagRun.model_validate(
{
"dag_id": "dag_id_from_dagrun_not_ti",
"run_id": "dag_run_run_id_from_dagrun_not_ti",
"logical_date": timezone.datetime(2020, 1, 1, 1, 1, 1),
"start_date": timezone.datetime(2023, 1, 1, 13, 1, 1),
"end_date": timezone.datetime(2023, 1, 3, 13, 1, 1),
"run_type": DagRunType.MANUAL,
"run_after": timezone.datetime(2023, 1, 3, 13, 1, 1),
"consumed_asset_events": [],
**(
{"state": SdkDagRunState.RUNNING} if "state" in SdkDagRun.model_fields else {}
),
}
),
task_reschedule_count=0,
max_tries=1,
should_retry=False,
),
start_date=timezone.datetime(2023, 1, 1, 13, 1, 1),
)
def mock_dag_id(dag_id, logical_date, clear_number):
return f"{logical_date.isoformat()}.{dag_id}.{clear_number}"
def mock_task_id(dag_id, task_id, try_number, logical_date, map_index):
return f"{logical_date.isoformat()}.{dag_id}.{task_id}.{try_number}.{map_index}"
listener = OpenLineageListener()
listener.extractor_manager = mock.Mock()
metadata = mock.Mock()
metadata.run_facets = {"run_facet": 1}
listener.extractor_manager.extract_metadata.return_value = metadata
adapter = mock.Mock()
adapter.build_dag_run_id.side_effect = mock_dag_id
adapter.build_task_instance_run_id.side_effect = mock_task_id
adapter.start_task = mock.Mock()
adapter.fail_task = mock.Mock()
adapter.complete_task = mock.Mock()
listener.adapter = adapter
return listener, task_instance
@mock.patch("airflow.providers.openlineage.conf.debug_mode", return_value=True)
@mock.patch("airflow.providers.openlineage.plugins.listener.get_airflow_debug_facet")
@mock.patch("airflow.providers.openlineage.plugins.listener.is_operator_disabled")
@mock.patch("airflow.providers.openlineage.plugins.listener.get_task_parent_run_facet")
@mock.patch("airflow.providers.openlineage.plugins.listener.get_airflow_run_facet")
@mock.patch("airflow.providers.openlineage.plugins.listener.get_airflow_mapped_task_facet")
@mock.patch("airflow.providers.openlineage.plugins.listener.get_user_provided_run_facets")
@mock.patch(
"airflow.providers.openlineage.plugins.listener.OpenLineageListener._execute", new=regular_call
)
def test_adapter_start_task_is_called_with_proper_arguments(
self,
mock_get_user_provided_run_facets,
mock_get_airflow_mapped_task_facet,
mock_get_airflow_run_facet,
mock_get_task_parent_run_facet,
mock_disabled,
mock_debug_facet,
mock_debug_mode,
):
"""Tests that the 'start_task' method of the OpenLineageAdapter is invoked with the correct arguments.
The test checks that the job name, job description, event time, and other related data are
correctly passed to the adapter. It also verifies that custom facets and Airflow run facets are
correctly retrieved and included in the call. This ensures that all relevant data, including custom
and Airflow-specific metadata, is accurately conveyed to the adapter during the initialization of a task,
reflecting the comprehensive tracking of task execution contexts."""
listener, task_instance = self._create_listener_and_task_instance()
mock_get_airflow_mapped_task_facet.return_value = {"mapped_facet": 1}
mock_get_user_provided_run_facets.return_value = {"custom_user_facet": 2, "parent": 99}
mock_get_airflow_run_facet.return_value = {"airflow_run_facet": 3}
mock_get_task_parent_run_facet.return_value = {"parent": 4}
mock_debug_facet.return_value = {"debug": "packages"}
mock_disabled.return_value = False
listener.on_task_instance_running(None, task_instance)
listener.adapter.start_task.assert_called_once_with(
run_id="2020-01-01T01:01:01+00:00.dag_id.task_id.1.-1",
job_name="dag_id.task_id",
job_description="TASK Description",
job_description_type="text/markdown",
event_time="2023-01-01T13:01:01+00:00",
nominal_start_time=None,
nominal_end_time=None,
owners=["task_owner"],
tags={"tag1", "tag2"},
task=listener.extractor_manager.extract_metadata(),
run_facets={
"mapped_facet": 1,
"custom_user_facet": 2,
"airflow_run_facet": 3,
"parent": 4,
"debug": "packages",
},
)
@mock.patch("airflow.providers.openlineage.conf.debug_mode", return_value=True)
@mock.patch("airflow.providers.openlineage.plugins.listener.get_airflow_debug_facet")
@mock.patch("airflow.providers.openlineage.plugins.listener.is_operator_disabled")
@mock.patch("airflow.providers.openlineage.plugins.listener.get_task_parent_run_facet")
@mock.patch("airflow.providers.openlineage.plugins.listener.get_airflow_run_facet")
@mock.patch("airflow.providers.openlineage.plugins.listener.get_airflow_mapped_task_facet")
@mock.patch("airflow.providers.openlineage.plugins.listener.get_user_provided_run_facets")
@mock.patch(
"airflow.providers.openlineage.plugins.listener.OpenLineageListener._execute", new=regular_call
)
def test_adapter_start_task_is_called_with_dag_owners_when_task_owner_is_default(
self,
mock_get_user_provided_run_facets,
mock_get_airflow_mapped_task_facet,
mock_get_airflow_run_facet,
mock_get_task_parent_run_facet,
mock_disabled,
mock_debug_facet,
mock_debug_mode,
):
listener, task_instance = self._create_listener_and_task_instance()
mock_get_airflow_mapped_task_facet.return_value = {"mapped_facet": 1}
mock_get_user_provided_run_facets.return_value = {"custom_user_facet": 2, "parent": 99}
mock_get_airflow_run_facet.return_value = {"airflow_run_facet": 3}
mock_get_task_parent_run_facet.return_value = {"parent": 4}
mock_debug_facet.return_value = {"debug": "packages"}
mock_disabled.return_value = False
task_instance.task.owner = "airflow" # Simulate default owner on task to force fallback to DAG owner
listener.on_task_instance_running(None, task_instance)
call_owners = listener.adapter.start_task.call_args.kwargs["owners"]
assert sorted(call_owners) == ["airflow", "another_owner"]
@mock.patch("airflow.providers.openlineage.conf.debug_mode", return_value=True)
@mock.patch("airflow.providers.openlineage.plugins.listener.get_airflow_debug_facet")
@mock.patch("airflow.providers.openlineage.plugins.listener.is_operator_disabled")
@mock.patch("airflow.providers.openlineage.plugins.listener.get_task_parent_run_facet")
@mock.patch("airflow.providers.openlineage.plugins.listener.get_airflow_run_facet")
@mock.patch("airflow.providers.openlineage.plugins.listener.get_airflow_mapped_task_facet")
@mock.patch("airflow.providers.openlineage.plugins.listener.get_user_provided_run_facets")
@mock.patch("airflow.providers.openlineage.plugins.listener.get_job_name")
@mock.patch(
"airflow.providers.openlineage.plugins.listener.OpenLineageListener._execute", new=regular_call
)
def test_adapter_start_task_is_called_with_dag_description_when_task_doc_is_empty(
self,
mock_get_job_name,
mock_get_user_provided_run_facets,
mock_get_airflow_mapped_task_facet,
mock_get_airflow_run_facet,
mock_get_task_parent_run_facet,
mock_disabled,
mock_debug_facet,
mock_debug_mode,
):
listener, task_instance = self._create_listener_and_task_instance()
mock_get_job_name.return_value = "job_name"
mock_get_airflow_mapped_task_facet.return_value = {"mapped_facet": 1}
mock_get_user_provided_run_facets.return_value = {"custom_user_facet": 2, "parent": 99}
mock_get_airflow_run_facet.return_value = {"airflow_run_facet": 3}
mock_get_task_parent_run_facet.return_value = {"parent": 4}
mock_debug_facet.return_value = {"debug": "packages"}
mock_disabled.return_value = False
task_instance.task.doc_md = None # Simulate lack of task doc to force fallback to DAG description
listener.on_task_instance_running(None, task_instance)
assert listener.adapter.start_task.call_args.kwargs["job_description"] == "Test DAG Description"
assert listener.adapter.start_task.call_args.kwargs["job_description_type"] == "text/plain"
@mock.patch("airflow.providers.openlineage.conf.debug_mode", return_value=True)
@mock.patch("airflow.providers.openlineage.plugins.listener.get_airflow_debug_facet")
@mock.patch("airflow.providers.openlineage.plugins.listener.is_operator_disabled")
@mock.patch("airflow.providers.openlineage.plugins.listener.get_task_parent_run_facet")
@mock.patch("airflow.providers.openlineage.plugins.listener.get_airflow_run_facet")
@mock.patch("airflow.providers.openlineage.plugins.listener.get_user_provided_run_facets")
@mock.patch(
"airflow.providers.openlineage.plugins.listener.OpenLineageListener._execute", new=regular_call
)
def test_adapter_fail_task_is_called_with_proper_arguments(
self,
mock_get_user_provided_run_facets,
mock_get_airflow_run_facet,
mock_get_task_parent_run_facet,
mock_disabled,
mock_debug_facet,
mock_debug_mode,
time_machine,
):
"""Tests that the 'fail_task' method of the OpenLineageAdapter is invoked with the correct arguments.
This test ensures that the job name is accurately retrieved and included, along with the generated
run_id and task metadata. By mocking the job name retrieval and the run_id generation,
the test verifies the integrity and consistency of the data passed to the adapter during task
failure events, thus confirming that the adapter's failure handling is functioning as expected.
"""
time_machine.move_to(timezone.datetime(2023, 1, 3, 13, 1, 1), tick=False)
listener, task_instance = self._create_listener_and_task_instance()
task_instance.get_template_context()["dag_run"].logical_date = timezone.datetime(2020, 1, 1, 1, 1, 1)
mock_get_user_provided_run_facets.return_value = {"custom_user_facet": 2, "parent": 99}
mock_get_airflow_run_facet.return_value = {"airflow": {"task": "..."}}
mock_get_task_parent_run_facet.return_value = {"parent": 4}
mock_debug_facet.return_value = {"debug": "packages"}
mock_disabled.return_value = False
err = ValueError("test")
listener.on_task_instance_failed(previous_state=None, task_instance=task_instance, error=err)
listener.adapter.fail_task.assert_called_once_with(
end_time="2023-01-03T13:01:01+00:00",
job_name="dag_id.task_id",
run_id="2020-01-01T01:01:01+00:00.dag_id.task_id.1.-1",
task=listener.extractor_manager.extract_metadata(),
owners=["task_owner"],
tags={"tag1", "tag2"},
job_description="TASK Description",
job_description_type="text/markdown",
nominal_start_time=None,
nominal_end_time=None,
run_facets={
"parent": 4,
"custom_user_facet": 2,
"airflow": {"task": "..."},
"debug": "packages",
},
error=err,
)
@mock.patch("airflow.providers.openlineage.conf.debug_mode", return_value=True)
@mock.patch("airflow.providers.openlineage.plugins.listener.get_airflow_debug_facet")
@mock.patch("airflow.providers.openlineage.plugins.listener.is_operator_disabled")
@mock.patch("airflow.providers.openlineage.plugins.listener.get_task_parent_run_facet")
@mock.patch("airflow.providers.openlineage.plugins.listener.get_airflow_run_facet")
@mock.patch("airflow.providers.openlineage.plugins.listener.get_airflow_mapped_task_facet")
@mock.patch("airflow.providers.openlineage.plugins.listener.get_user_provided_run_facets")
@mock.patch(
"airflow.providers.openlineage.plugins.listener.OpenLineageListener._execute", new=regular_call
)
def test_adapter_fail_task_is_called_with_dag_owners_when_task_owner_is_default(
self,
mock_get_user_provided_run_facets,
mock_get_airflow_mapped_task_facet,
mock_get_airflow_run_facet,
mock_get_task_parent_run_facet,
mock_disabled,
mock_debug_facet,
mock_debug_mode,
):
listener, task_instance = self._create_listener_and_task_instance()
mock_get_airflow_mapped_task_facet.return_value = {"mapped_facet": 1}
mock_get_user_provided_run_facets.return_value = {"custom_user_facet": 2, "parent": 99}
mock_get_airflow_run_facet.return_value = {"airflow_run_facet": 3}
mock_get_task_parent_run_facet.return_value = {"parent": 4}
mock_debug_facet.return_value = {"debug": "packages"}
mock_disabled.return_value = False
task_instance.task.owner = "airflow" # Simulate default owner on task to force fallback to DAG owner
err = ValueError("test")
listener.on_task_instance_failed(previous_state=None, task_instance=task_instance, error=err)
call_owners = listener.adapter.fail_task.call_args.kwargs["owners"]
assert sorted(call_owners) == ["airflow", "another_owner"]
@mock.patch("airflow.providers.openlineage.conf.debug_mode", return_value=True)
@mock.patch("airflow.providers.openlineage.plugins.listener.get_airflow_debug_facet")
@mock.patch("airflow.providers.openlineage.plugins.listener.is_operator_disabled")
@mock.patch("airflow.providers.openlineage.plugins.listener.get_task_parent_run_facet")
@mock.patch("airflow.providers.openlineage.plugins.listener.get_airflow_run_facet")
@mock.patch("airflow.providers.openlineage.plugins.listener.get_airflow_mapped_task_facet")
@mock.patch("airflow.providers.openlineage.plugins.listener.get_user_provided_run_facets")
@mock.patch(
"airflow.providers.openlineage.plugins.listener.OpenLineageListener._execute", new=regular_call
)
def test_adapter_fail_task_is_called_with_dag_description_when_task_doc_is_empty(
self,
mock_get_user_provided_run_facets,
mock_get_airflow_mapped_task_facet,
mock_get_airflow_run_facet,
mock_get_task_parent_run_facet,
mock_disabled,
mock_debug_facet,
mock_debug_mode,
):
listener, task_instance = self._create_listener_and_task_instance()
mock_get_airflow_mapped_task_facet.return_value = {"mapped_facet": 1}
mock_get_user_provided_run_facets.return_value = {"custom_user_facet": 2, "parent": 99}
mock_get_airflow_run_facet.return_value = {"airflow_run_facet": 3}
mock_get_task_parent_run_facet.return_value = {"parent": 4}
mock_debug_facet.return_value = {"debug": "packages"}
mock_disabled.return_value = False
task_instance.task.doc_md = None # Simulate lack of task doc to force fallback to DAG description
err = ValueError("test")
listener.on_task_instance_failed(previous_state=None, task_instance=task_instance, error=err)
assert listener.adapter.fail_task.call_args.kwargs["job_description"] == "Test DAG Description"
assert listener.adapter.fail_task.call_args.kwargs["job_description_type"] == "text/plain"
@mock.patch("airflow.providers.openlineage.plugins.adapter.OpenLineageAdapter.emit")
@mock.patch("airflow.providers.openlineage.conf.debug_mode", return_value=True)
@mock.patch("airflow.providers.openlineage.plugins.listener.get_airflow_debug_facet")
@mock.patch("airflow.providers.openlineage.plugins.listener.get_task_parent_run_facet")
@mock.patch(
"airflow.providers.openlineage.plugins.listener.OpenLineageListener._execute", new=regular_call
)
def test_adapter_fail_task_is_called_with_proper_arguments_for_db_task_instance_model(
self,
mock_get_task_parent_run_facet,
mock_debug_facet,
mock_debug_mode,
mock_emit,
time_machine,
):
"""Tests that the 'fail_task' method of the OpenLineageAdapter is invoked with the correct arguments.
This particular test is using TaskInstance model available on API Server and not on worker,
to simulate the listener being called after task's state has been manually set via API.
"""
time_machine.move_to(timezone.datetime(2023, 1, 3, 13, 1, 1), tick=False)
listener, task_instance = self._create_listener_and_task_instance(runtime_ti=False)
mock_get_task_parent_run_facet.return_value = {"parent": 4}
mock_debug_facet.return_value = {"debug": "packages"}
err = ValueError("test")
listener.on_task_instance_failed(previous_state=None, task_instance=task_instance, error=err)
mock_get_task_parent_run_facet.assert_called_once_with(
parent_run_id="2020-01-01T01:01:01+00:00.dag_id.0",
parent_job_name=task_instance.dag_id,
dr_conf={},
)
expected_args = dict(
end_time="2023-01-03T13:01:01+00:00",
job_name="dag_id.task_id",
run_id="2020-01-01T01:01:01+00:00.dag_id.task_id.1.-1",
task=OperatorLineage(),
nominal_start_time=None,
nominal_end_time=None,
tags=None,
owners=None,
job_description=None,
job_description_type=None,
run_facets={
"parent": 4,
"debug": "packages",
},
error=err,
)
listener.adapter.fail_task.assert_called_once_with(**expected_args)
expected_args["run_id"] = "9d3b14f7-de91-40b6-aeef-e887e2c7673e"
adapter = OpenLineageAdapter()
adapter.fail_task(**expected_args)
assert mock_emit.assert_called_once
@mock.patch("airflow.providers.openlineage.conf.debug_mode", return_value=True)
@mock.patch("airflow.providers.openlineage.plugins.listener.get_airflow_debug_facet")
@mock.patch("airflow.providers.openlineage.plugins.listener.is_operator_disabled")
@mock.patch("airflow.providers.openlineage.plugins.listener.get_task_parent_run_facet")
@mock.patch("airflow.providers.openlineage.plugins.listener.get_airflow_run_facet")
@mock.patch("airflow.providers.openlineage.plugins.listener.get_user_provided_run_facets")
@mock.patch(
"airflow.providers.openlineage.plugins.listener.OpenLineageListener._execute", new=regular_call
)
def test_adapter_complete_task_is_called_with_proper_arguments(
self,
mock_get_user_provided_run_facets,
mock_get_airflow_run_facet,
mock_get_task_parent_run_facet,
mock_disabled,
mock_debug_facet,
mock_debug_mode,
time_machine,
):
"""Tests that the 'complete_task' method of the OpenLineageAdapter is called with the correct arguments.
It checks that the job name is correctly retrieved and passed,
along with the run_id and task metadata. The test also simulates changes in the try_number
attribute of the task instance, as it would occur in Airflow, to ensure that the run_id is updated
accordingly. This helps confirm the consistency and correctness of the data passed to the adapter
during the task's lifecycle events.
"""
time_machine.move_to(timezone.datetime(2023, 1, 3, 13, 1, 1), tick=False)
listener, task_instance = self._create_listener_and_task_instance()
mock_get_user_provided_run_facets.return_value = {"custom_user_facet": 2, "parent": 99}
mock_get_airflow_run_facet.return_value = {"airflow": {"task": "..."}}
mock_get_task_parent_run_facet.return_value = {"parent": 4}
mock_debug_facet.return_value = {"debug": "packages"}
mock_disabled.return_value = False
listener.on_task_instance_success(None, task_instance)
calls = listener.adapter.complete_task.call_args_list
assert len(calls) == 1
assert calls[0][1] == dict(
end_time="2023-01-03T13:01:01+00:00",
job_name="dag_id.task_id",
run_id="2020-01-01T01:01:01+00:00.dag_id.task_id.1.-1",
task=listener.extractor_manager.extract_metadata(),
owners=["task_owner"],
tags={"tag1", "tag2"},
job_description="TASK Description",
job_description_type="text/markdown",
nominal_start_time=None,
nominal_end_time=None,
run_facets={
"parent": 4,
"custom_user_facet": 2,
"airflow": {"task": "..."},
"debug": "packages",
},
)
@mock.patch("airflow.providers.openlineage.conf.debug_mode", return_value=True)
@mock.patch("airflow.providers.openlineage.plugins.listener.get_airflow_debug_facet")
@mock.patch("airflow.providers.openlineage.plugins.listener.is_operator_disabled")
@mock.patch("airflow.providers.openlineage.plugins.listener.get_task_parent_run_facet")
@mock.patch("airflow.providers.openlineage.plugins.listener.get_airflow_run_facet")
@mock.patch("airflow.providers.openlineage.plugins.listener.get_airflow_mapped_task_facet")
@mock.patch("airflow.providers.openlineage.plugins.listener.get_user_provided_run_facets")
@mock.patch(
"airflow.providers.openlineage.plugins.listener.OpenLineageListener._execute", new=regular_call
)
def test_adapter_complete_task_is_called_with_dag_owners_when_task_owner_is_default(
self,
mock_get_user_provided_run_facets,
mock_get_airflow_mapped_task_facet,
mock_get_airflow_run_facet,
mock_get_task_parent_run_facet,
mock_disabled,
mock_debug_facet,
mock_debug_mode,
):
listener, task_instance = self._create_listener_and_task_instance()
mock_get_airflow_mapped_task_facet.return_value = {"mapped_facet": 1}
mock_get_user_provided_run_facets.return_value = {"custom_user_facet": 2, "parent": 99}
mock_get_airflow_run_facet.return_value = {"airflow_run_facet": 3}
mock_get_task_parent_run_facet.return_value = {"parent": 4}
mock_debug_facet.return_value = {"debug": "packages"}
mock_disabled.return_value = False
task_instance.task.owner = "airflow" # Simulate default owner on task to force fallback to DAG owner
listener.on_task_instance_success(None, task_instance)
call_owners = listener.adapter.complete_task.call_args.kwargs["owners"]
assert sorted(call_owners) == ["airflow", "another_owner"]
@mock.patch("airflow.providers.openlineage.conf.debug_mode", return_value=True)
@mock.patch("airflow.providers.openlineage.plugins.listener.get_airflow_debug_facet")
@mock.patch("airflow.providers.openlineage.plugins.listener.is_operator_disabled")
@mock.patch("airflow.providers.openlineage.plugins.listener.get_task_parent_run_facet")
@mock.patch("airflow.providers.openlineage.plugins.listener.get_airflow_run_facet")
@mock.patch("airflow.providers.openlineage.plugins.listener.get_airflow_mapped_task_facet")
@mock.patch("airflow.providers.openlineage.plugins.listener.get_user_provided_run_facets")
@mock.patch(
"airflow.providers.openlineage.plugins.listener.OpenLineageListener._execute", new=regular_call
)
def test_adapter_complete_task_is_called_with_dag_description_when_task_doc_is_empty(
self,
mock_get_user_provided_run_facets,
mock_get_airflow_mapped_task_facet,
mock_get_airflow_run_facet,
mock_get_task_parent_run_facet,
mock_disabled,
mock_debug_facet,
mock_debug_mode,
):
listener, task_instance = self._create_listener_and_task_instance()
mock_get_airflow_mapped_task_facet.return_value = {"mapped_facet": 1}
mock_get_user_provided_run_facets.return_value = {"custom_user_facet": 2, "parent": 99}
mock_get_airflow_run_facet.return_value = {"airflow_run_facet": 3}
mock_get_task_parent_run_facet.return_value = {"parent": 4}
mock_debug_facet.return_value = {"debug": "packages"}
mock_disabled.return_value = False
task_instance.task.doc_md = None # Simulate lack of task doc to force fallback to DAG description
listener.on_task_instance_success(None, task_instance)
assert listener.adapter.complete_task.call_args.kwargs["job_description"] == "Test DAG Description"
assert listener.adapter.complete_task.call_args.kwargs["job_description_type"] == "text/plain"
@mock.patch("airflow.providers.openlineage.plugins.adapter.OpenLineageAdapter.emit")
@mock.patch("airflow.providers.openlineage.conf.debug_mode", return_value=True)
@mock.patch("airflow.providers.openlineage.plugins.listener.get_airflow_debug_facet")
@mock.patch("airflow.providers.openlineage.plugins.listener.get_task_parent_run_facet")
@mock.patch(
"airflow.providers.openlineage.plugins.listener.OpenLineageListener._execute", new=regular_call
)
def test_adapter_complete_task_is_called_with_proper_arguments_for_db_task_instance_model(
self, mock_get_task_parent_run_facet, mock_debug_facet, mock_debug_mode, mock_emit, time_machine
):
"""Tests that the 'complete_task' method of the OpenLineageAdapter is called with the correct arguments.
This particular test is using TaskInstance model available on API Server and not on worker,
to simulate the listener being called after task's state has been manually set via API.
"""
time_machine.move_to(timezone.datetime(2023, 1, 3, 13, 1, 1), tick=False)
listener, task_instance = self._create_listener_and_task_instance(runtime_ti=False)
mock_get_task_parent_run_facet.return_value = {"parent": 4}
mock_debug_facet.return_value = {"debug": "packages"}
listener.on_task_instance_success(None, task_instance)
calls = listener.adapter.complete_task.call_args_list
assert len(calls) == 1
mock_get_task_parent_run_facet.assert_called_once_with(
parent_run_id="2020-01-01T01:01:01+00:00.dag_id.0",
parent_job_name=task_instance.dag_id,
dr_conf={},
)
expected_args = dict(
end_time="2023-01-03T13:01:01+00:00",
job_name="dag_id.task_id",
run_id="2020-01-01T01:01:01+00:00.dag_id.task_id.1.-1",
task=OperatorLineage(),
nominal_start_time=None,
nominal_end_time=None,
tags=None,
owners=None,
job_description=None,
job_description_type=None,
run_facets={
"parent": 4,
"debug": "packages",
},
)
assert calls[0][1] == expected_args
expected_args["run_id"] = "9d3b14f7-de91-40b6-aeef-e887e2c7673e"
adapter = OpenLineageAdapter()
adapter.complete_task(**expected_args)
assert mock_emit.assert_called_once
@mock.patch(
"airflow.providers.openlineage.plugins.listener.OpenLineageListener._execute", new=regular_call
)
def test_on_task_instance_running_correctly_calls_openlineage_adapter_run_id_method(self):
"""Tests the OpenLineageListener's response when a task instance is in the running state.
This test ensures that when an Airflow task instance transitions to the running state,
the OpenLineageAdapter's `build_task_instance_run_id` method is called exactly once with the correct
parameters derived from the task instance.
"""
listener, task_instance = self._create_listener_and_task_instance()
listener.on_task_instance_running(None, task_instance)
listener.adapter.build_task_instance_run_id.assert_called_once_with(
dag_id="dag_id",
task_id="task_id",
logical_date=timezone.datetime(2020, 1, 1, 1, 1, 1),
try_number=1,
map_index=-1,
)
@mock.patch(
"airflow.providers.openlineage.plugins.listener.OpenLineageListener._execute", new=regular_call
)
def test_on_task_instance_failed_correctly_calls_openlineage_adapter_run_id_method(self):
"""Tests the OpenLineageListener's response when a task instance is in the failed state.
This test ensures that when an Airflow task instance transitions to the failed state,
the OpenLineageAdapter's `build_task_instance_run_id` method is called exactly once with the correct
parameters derived from the task instance.
"""
listener, task_instance = self._create_listener_and_task_instance()
on_task_failed_kwargs = {"error": ValueError("test")}
listener.on_task_instance_failed(
previous_state=None, task_instance=task_instance, **on_task_failed_kwargs
)
listener.adapter.build_task_instance_run_id.assert_called_once_with(
dag_id="dag_id",
task_id="task_id",
logical_date=timezone.datetime(2020, 1, 1, 1, 1, 1),
try_number=1,
map_index=-1,
)
@mock.patch(
"airflow.providers.openlineage.plugins.listener.OpenLineageListener._execute", new=regular_call
)
def test_on_task_instance_success_correctly_calls_openlineage_adapter_run_id_method(self):
"""Tests the OpenLineageListener's response when a task instance is in the success state.
This test ensures that when an Airflow task instance transitions to the success state,
the OpenLineageAdapter's `build_task_instance_run_id` method is called exactly once with the correct
parameters derived from the task instance.
"""
listener, task_instance = self._create_listener_and_task_instance()
listener.on_task_instance_success(None, task_instance)
listener.adapter.build_task_instance_run_id.assert_called_once_with(
dag_id="dag_id",
task_id="task_id",
logical_date=timezone.datetime(2020, 1, 1, 1, 1, 1),
try_number=1,
map_index=-1,
)
@mock.patch("airflow.providers.openlineage.plugins.listener.is_operator_disabled")
@mock.patch("airflow.providers.openlineage.plugins.listener.get_airflow_run_facet")
@mock.patch("airflow.providers.openlineage.plugins.listener.get_user_provided_run_facets")
def test_listener_on_task_instance_running_do_not_call_adapter_when_disabled_operator(
self, mock_get_user_provided_run_facets, mock_get_airflow_run_facet, mock_disabled
):
listener, task_instance = self._create_listener_and_task_instance()
mock_get_user_provided_run_facets.return_value = {"custom_facet": 2}
mock_get_airflow_run_facet.return_value = {"airflow_run_facet": 3}
mock_disabled.return_value = True
listener.on_task_instance_running(None, task_instance)
mock_disabled.assert_called_once_with(task_instance.task)
listener.adapter.build_dag_run_id.assert_not_called()
listener.adapter.build_task_instance_run_id.assert_not_called()
listener.extractor_manager.extract_metadata.assert_not_called()
listener.adapter.start_task.assert_not_called()
@mock.patch("airflow.providers.openlineage.plugins.listener.is_operator_disabled")
@mock.patch("airflow.providers.openlineage.plugins.listener.get_user_provided_run_facets")
def test_listener_on_task_instance_failed_do_not_call_adapter_when_disabled_operator(
self, mock_get_user_provided_run_facets, mock_disabled
):
listener, task_instance = self._create_listener_and_task_instance()
mock_get_user_provided_run_facets.return_value = {"custom_facet": 2}
mock_disabled.return_value = True
on_task_failed_kwargs = {"error": ValueError("test")}
listener.on_task_instance_failed(
previous_state=None, task_instance=task_instance, **on_task_failed_kwargs
)
mock_disabled.assert_called_once_with(task_instance.task)
listener.adapter.build_dag_run_id.assert_not_called()
listener.adapter.build_task_instance_run_id.assert_not_called()
listener.extractor_manager.extract_metadata.assert_not_called()
listener.adapter.fail_task.assert_not_called()
@mock.patch("airflow.providers.openlineage.plugins.listener.is_operator_disabled")
@mock.patch("airflow.providers.openlineage.plugins.listener.get_user_provided_run_facets")
def test_listener_on_task_instance_success_do_not_call_adapter_when_disabled_operator(
self, mock_get_user_provided_run_facets, mock_disabled
):
listener, task_instance = self._create_listener_and_task_instance()
mock_get_user_provided_run_facets.return_value = {"custom_facet": 2}
mock_disabled.return_value = True
listener.on_task_instance_success(None, task_instance)
mock_disabled.assert_called_once_with(task_instance.task)
listener.adapter.build_dag_run_id.assert_not_called()
listener.adapter.build_task_instance_run_id.assert_not_called()
listener.extractor_manager.extract_metadata.assert_not_called()
listener.adapter.complete_task.assert_not_called()
@pytest.mark.parametrize(
("max_workers", "expected"),
[
(None, 1),
("8", 8),
],
)
@mock.patch("airflow.providers.openlineage.plugins.listener.ProcessPoolExecutor", autospec=True)
def test_listener_on_dag_run_state_changes_configure_process_pool_size(
self, mock_executor, max_workers, expected
):
"""mock ProcessPoolExecutor and check if conf.dag_state_change_process_pool_size is applied to max_workers"""
listener = OpenLineageListener()
# mock ProcessPoolExecutor class
with conf_vars({("openlineage", "dag_state_change_process_pool_size"): max_workers}):
listener.on_dag_run_running(mock.MagicMock(), None)
mock_executor.assert_called_once_with(max_workers=expected, initializer=mock.ANY)
mock_executor.return_value.submit.assert_called_once()
@pytest.mark.db_test
@pytest.mark.parametrize(
("method", "dag_run_state"),
[
("on_dag_run_running", DagRunState.RUNNING),
("on_dag_run_success", DagRunState.SUCCESS),
("on_dag_run_failed", DagRunState.FAILED),
],
)
@patch("airflow.providers.openlineage.plugins.adapter.OpenLineageAdapter.emit")
def test_listener_on_dag_run_state_changes(self, mock_emit, method, dag_run_state, create_task_instance):
mock_executor = MockExecutor()
ti = create_task_instance(dag_id="dag", task_id="op")
ti.start_date = datetime(2020, 1, 1, tzinfo=timezone.utc)
ti.end_date = datetime(2020, 1, 1, 1, tzinfo=timezone.utc)
# Change the state explicitly to set end_date following the logic in the method
ti.dag_run.set_state(dag_run_state)
with mock.patch(
"airflow.providers.openlineage.plugins.listener.ProcessPoolExecutor", return_value=mock_executor
):
listener = OpenLineageListener()
getattr(listener, method)(ti.dag_run, None)
assert mock_executor.submitted is True
assert mock_executor.succeeded is True
mock_emit.assert_called_once()
def test_listener_logs_failed_serialization(self):
listener = OpenLineageListener()
callback_future = Future()
def set_result(*args, **kwargs):
callback_future.set_result(True)
listener.log = MagicMock()
listener.log.warning = MagicMock(side_effect=set_result)
listener.adapter = OpenLineageAdapter(
client=OpenLineageClient(transport=ConsoleTransport(config=ConsoleConfig()))
)
event_time = timezone.utcnow()
fut = listener.submit_callable(
listener.adapter.dag_failed,
dag_id="",
run_id="",
end_date=event_time,
logical_date=callback_future,
clear_number=0,
dag_run_state=DagRunState.FAILED,
task_ids=["task_id"],
msg="",
)
assert fut.exception(10)
callback_future.result(10)
assert callback_future.done()
listener.log.debug.assert_not_called()
listener.log.warning.assert_called_once()
@pytest.mark.skipif(AIRFLOW_V_3_0_PLUS, reason="Airflow 2 tests")
| TestOpenLineageListenerAirflow3 |
python | google__jax | jax/_src/source_info_util.py | {
"start": 2462,
"end": 2685
} | class ____(NamedTuple):
name: str
def wrap(self, stack: list[str]):
if stack:
stack[-1] = f'{self.name}({stack[-1]})'
else:
stack.append(f'{self.name}()')
@dataclasses.dataclass(frozen=True)
| Transform |
python | kamyu104__LeetCode-Solutions | Python/clone-n-ary-tree.py | {
"start": 957,
"end": 1340
} | class ____(object):
def cloneTree(self, root):
"""
:type root: Node
:rtype: Node
"""
def dfs(node):
if not node:
return None
copy = Node(node.val)
for child in node.children:
copy.children.append(dfs(child))
return copy
return dfs(root)
| Solution2 |
python | keras-team__keras | keras/src/wrappers/utils.py | {
"start": 835,
"end": 2394
} | class ____(TransformerMixin, BaseEstimator):
"""Convert 1D targets to 2D and back.
For use in pipelines with transformers that only accept
2D inputs, like OneHotEncoder and OrdinalEncoder.
Attributes:
ndim_ : int
Dimensions of y that the transformer was trained on.
"""
def fit(self, y):
"""Fit the transformer to a target y.
Returns:
TargetReshaper
A reference to the current instance of TargetReshaper.
"""
self.ndim_ = y.ndim
return self
def transform(self, y):
"""Makes 1D y 2D.
Args:
y : np.ndarray
Target y to be transformed.
Returns:
np.ndarray
A numpy array, of dimension at least 2.
"""
if y.ndim == 1:
return y.reshape(-1, 1)
return y
def inverse_transform(self, y):
"""Revert the transformation of transform.
Args:
y: np.ndarray
Transformed numpy array.
Returns:
np.ndarray
If the transformer was fit to a 1D numpy array,
and a 2D numpy array with a singleton second dimension
is passed, it will be squeezed back to 1D. Otherwise, it
will eb left untouched.
"""
from sklearn.utils.validation import check_is_fitted
check_is_fitted(self)
if self.ndim_ == 1 and y.ndim == 2:
return np.squeeze(y, axis=1)
return y
| TargetReshaper |
python | kamyu104__LeetCode-Solutions | Python/painting-the-walls.py | {
"start": 64,
"end": 473
} | class ____(object):
def paintWalls(self, cost, time):
"""
:type cost: List[int]
:type time: List[int]
:rtype: int
"""
dp = [float("inf")]*(len(cost)+1)
dp[0] = 0
for c, t in itertools.izip(cost, time):
for j in reversed(xrange(1, len(cost)+1)):
dp[j] = min(dp[j], dp[max(j-(t+1), 0)]+c)
return dp[-1]
| Solution |
python | pypa__virtualenv | src/virtualenv/config/cli/parser.py | {
"start": 1246,
"end": 4036
} | class ____(ArgumentParser):
"""Custom option parser which updates its defaults by checking the configuration files and environmental vars."""
def __init__(self, options=None, env=None, *args, **kwargs) -> None:
env = os.environ if env is None else env
self.file_config = IniConfig(env)
self.epilog_list = []
self.env = env
kwargs["epilog"] = self.file_config.epilog
kwargs["add_help"] = False
kwargs["formatter_class"] = HelpFormatter
kwargs["prog"] = "virtualenv"
super().__init__(*args, **kwargs)
self._fixed = set()
if options is not None and not isinstance(options, VirtualEnvOptions):
msg = "options must be of type VirtualEnvOptions"
raise TypeError(msg)
self.options = VirtualEnvOptions() if options is None else options
self._interpreter = None
self._app_data = None
def _fix_defaults(self):
for action in self._actions:
action_id = id(action)
if action_id not in self._fixed:
self._fix_default(action)
self._fixed.add(action_id)
def _fix_default(self, action):
if hasattr(action, "default") and hasattr(action, "dest") and action.default != SUPPRESS:
as_type = get_type(action)
names = OrderedDict((i.lstrip("-").replace("-", "_"), None) for i in action.option_strings)
outcome = None
for name in names:
outcome = get_env_var(name, as_type, self.env)
if outcome is not None:
break
if outcome is None and self.file_config:
for name in names:
outcome = self.file_config.get(name, as_type)
if outcome is not None:
break
if outcome is not None:
action.default, action.default_source = outcome
else:
outcome = action.default, "default"
self.options.set_src(action.dest, *outcome)
def enable_help(self):
self._fix_defaults()
self.add_argument("-h", "--help", action="help", default=SUPPRESS, help="show this help message and exit")
def parse_known_args(self, args=None, namespace=None):
if namespace is None:
namespace = self.options
elif namespace is not self.options:
msg = "can only pass in parser.options"
raise ValueError(msg)
self._fix_defaults()
self.options._src = "cli" # noqa: SLF001
try:
namespace.env = self.env
return super().parse_known_args(args, namespace=namespace)
finally:
self.options._src = None # noqa: SLF001
| VirtualEnvConfigParser |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/oracle/cx_oracle.py | {
"start": 25969,
"end": 26078
} | class ____(oracle.INTERVAL):
def get_dbapi_type(self, dbapi):
return dbapi.INTERVAL
| _OracleInterval |
python | pytorch__pytorch | torch/nn/modules/activation.py | {
"start": 16845,
"end": 18321
} | class ____(Module):
r"""Applies the CELU function element-wise.
.. math::
\text{CELU}(x) = \max(0,x) + \min(0, \alpha * (\exp(x/\alpha) - 1))
More details can be found in the paper `Continuously Differentiable Exponential Linear Units`_ .
Args:
alpha: the :math:`\alpha` value for the CELU formulation. Default: 1.0
inplace: can optionally do the operation in-place. Default: ``False``
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Output: :math:`(*)`, same shape as the input.
.. image:: ../scripts/activation_images/CELU.png
Examples::
>>> m = nn.CELU()
>>> input = torch.randn(2)
>>> output = m(input)
.. _`Continuously Differentiable Exponential Linear Units`:
https://arxiv.org/abs/1704.07483
"""
__constants__ = ["alpha", "inplace"]
alpha: float
inplace: bool
def __init__(self, alpha: float = 1.0, inplace: bool = False) -> None:
super().__init__()
self.alpha = alpha
self.inplace = inplace
def forward(self, input: Tensor) -> Tensor:
"""
Runs the forward pass.
"""
return F.celu(input, self.alpha, self.inplace)
def extra_repr(self) -> str:
"""
Return the extra representation of the module.
"""
inplace_str = ", inplace=True" if self.inplace else ""
return f"alpha={self.alpha}{inplace_str}"
| CELU |
python | readthedocs__readthedocs.org | readthedocs/doc_builder/backends/sphinx.py | {
"start": 8832,
"end": 10123
} | class ____(BaseSphinx):
sphinx_builder = "epub"
relative_output_dir = "epub"
def _post_build(self):
"""Internal post build to cleanup EPUB output directory and leave only one .epub file."""
temp_epub_file = f"/tmp/{self.project.slug}-{self.version.slug}.epub"
target_file = os.path.join(
self.absolute_container_output_dir,
f"{self.project.slug}.epub",
)
epub_sphinx_filepaths = glob(os.path.join(self.absolute_host_output_dir, "*.epub"))
if epub_sphinx_filepaths:
# NOTE: we currently support only one .epub per version
epub_filepath = epub_sphinx_filepaths[0]
self.run("mv", epub_filepath, temp_epub_file, cwd=self.project_path, record=False)
self.run(
"rm",
"--recursive",
self.absolute_container_output_dir,
cwd=self.project_path,
record=False,
)
self.run(
"mkdir",
"--parents",
self.absolute_container_output_dir,
cwd=self.project_path,
record=False,
)
self.run("mv", temp_epub_file, target_file, cwd=self.project_path, record=False)
| EpubBuilder |
python | google__pytype | pytype/rewrite/flow/frame_base.py | {
"start": 714,
"end": 808
} | class ____:
"""Block and opcode indices for a frame step."""
block: int
opcode: int
| _Step |
python | ray-project__ray | python/ray/serve/_private/benchmarks/streaming/streaming_handle_throughput.py | {
"start": 240,
"end": 2363
} | class ____(Caller):
async def _consume_single_stream(self):
method = self._get_remote_method().options(
stream=True,
)
async for r in method.remote():
# Blackhole the response
# self.sink(str(r, 'utf-8'))
self.sink(r)
@click.command(help="Benchmark streaming deployment handle throughput.")
@click.option(
"--tokens-per-request",
type=int,
default=1000,
help="Number of requests to send to downstream deployment in each trial.",
)
@click.option(
"--batch-size",
type=int,
default=10,
help="Number of requests to send to downstream deployment in each trial.",
)
@click.option(
"--num-replicas",
type=int,
default=1,
help="Number of replicas in the downstream deployment.",
)
@click.option(
"--num-trials",
type=int,
default=5,
help="Number of trials of the benchmark to run.",
)
@click.option(
"--trial-runtime",
type=int,
default=1,
help="Duration to run each trial of the benchmark for (seconds).",
)
@click.option(
"--io-mode",
type=str,
default="async",
help="Controls mode of the streaming generation (either 'sync' or 'async')",
)
def main(
tokens_per_request: int,
batch_size: int,
num_replicas: int,
num_trials: int,
trial_runtime: float,
io_mode: str,
):
app = CallerDeployment.bind(
EndpointDeployment.options(num_replicas=num_replicas).bind(tokens_per_request),
mode=IOMode(io_mode.upper()),
tokens_per_request=tokens_per_request,
batch_size=batch_size,
num_trials=num_trials,
trial_runtime=trial_runtime,
)
h = serve.run(app)
mean, stddev = h.run_benchmark.remote().result()
print(
"DeploymentHandle streaming throughput ({}) {}: {} +- {} tokens/s".format(
io_mode.upper(),
f"(num_replicas={num_replicas}, "
f"tokens_per_request={tokens_per_request}, "
f"batch_size={batch_size})",
mean,
stddev,
)
)
if __name__ == "__main__":
main()
| CallerDeployment |
python | getsentry__sentry | tests/apidocs/endpoints/releases/test_organization_release_details.py | {
"start": 172,
"end": 1686
} | class ____(APIDocsTestCase):
def setUp(self) -> None:
user = self.create_user(is_staff=False, is_superuser=False)
org = self.organization
org2 = self.create_organization()
org.flags.allow_joinleave = False
org.save()
team1 = self.create_team(organization=org)
team2 = self.create_team(organization=org)
self.project1 = self.create_project(teams=[team1], organization=org)
self.project2 = self.create_project(teams=[team2], organization=org2)
self.project3 = self.create_project(teams=[team1], organization=org)
self.create_member(teams=[team1], user=user, organization=org)
self.login_as(user=user)
release = self.create_release(
project=self.project1,
version="1",
date_added=datetime(2013, 8, 13, 3, 8, 24, 880386, tzinfo=UTC),
)
self.url = reverse(
"sentry-api-0-organization-release-details",
kwargs={"organization_id_or_slug": org.slug, "version": release.version},
)
def test_get(self) -> None:
response = self.client.get(self.url)
request = RequestFactory().get(self.url)
self.validate_schema(request, response)
def test_put(self) -> None:
data = {"projects": [self.project3.slug]}
response = self.client.put(self.url, data)
request = RequestFactory().put(self.url, data)
self.validate_schema(request, response)
| OrganizationReleaseDetailsDocsTest |
python | PrefectHQ__prefect | src/prefect/context.py | {
"start": 12677,
"end": 13633
} | class ____(ContextModel):
"""
The base context for a flow or task run. Data in this context will always be
available when `get_run_context` is called.
Attributes:
start_time: The time the run context was entered
client: The Prefect client instance being used for API communication
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
start_client_metrics_server()
start_time: DateTime = Field(
default_factory=lambda: prefect.types._datetime.now("UTC")
)
input_keyset: Optional[dict[str, dict[str, str]]] = None
client: Union[PrefectClient, SyncPrefectClient]
def serialize(self: Self, include_secrets: bool = True) -> dict[str, Any]:
return self.model_dump(
include={"start_time", "input_keyset"},
exclude_unset=True,
context={"include_secrets": include_secrets},
)
| RunContext |
python | huggingface__transformers | tests/models/speech_to_text/test_feature_extraction_speech_to_text.py | {
"start": 1407,
"end": 3374
} | class ____:
def __init__(
self,
parent,
batch_size=7,
min_seq_length=400,
max_seq_length=2000,
feature_size=24,
num_mel_bins=24,
padding_value=0.0,
sampling_rate=16_000,
return_attention_mask=True,
do_normalize=True,
):
self.parent = parent
self.batch_size = batch_size
self.min_seq_length = min_seq_length
self.max_seq_length = max_seq_length
self.seq_length_diff = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
self.feature_size = feature_size
self.num_mel_bins = num_mel_bins
self.padding_value = padding_value
self.sampling_rate = sampling_rate
self.return_attention_mask = return_attention_mask
self.do_normalize = do_normalize
def prepare_feat_extract_dict(self):
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def prepare_inputs_for_common(self, equal_length=False, numpify=False):
def _flatten(list_of_lists):
return list(itertools.chain(*list_of_lists))
if equal_length:
speech_inputs = [floats_list((self.max_seq_length, self.feature_size)) for _ in range(self.batch_size)]
else:
# make sure that inputs increase in size
speech_inputs = [
floats_list((x, self.feature_size))
for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff)
]
if numpify:
speech_inputs = [np.asarray(x) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
| Speech2TextFeatureExtractionTester |
python | altair-viz__altair | altair/vegalite/v6/schema/channels.py | {
"start": 134084,
"end": 139494
} | class ____(ValueChannelMixin, core.StringValueDefWithCondition):
"""
DescriptionValue schema wrapper.
Parameters
----------
condition : dict, :class:`ConditionalMarkPropFieldOrDatumDef`, :class:`ConditionalValueDefstringnullExprRef`, :class:`ConditionalParameterMarkPropFieldOrDatumDef`, :class:`ConditionalPredicateMarkPropFieldOrDatumDef`, :class:`ConditionalParameterValueDefstringnullExprRef`, :class:`ConditionalPredicateValueDefstringnullExprRef`, Sequence[dict, :class:`ConditionalValueDefstringnullExprRef`, :class:`ConditionalParameterValueDefstringnullExprRef`, :class:`ConditionalPredicateValueDefstringnullExprRef`]
A field definition or one or more value definition(s) with a parameter predicate.
value : str, dict, :class:`ExprRef`, None
A constant value in visual domain (e.g., ``"red"`` / ``"#0099ff"`` / `gradient
definition <https://vega.github.io/vega-lite/docs/types.html#gradient>`__ for color,
values between ``0`` to ``1`` for opacity).
"""
_class_is_valid_at_instantiation = False
_encoding_name = "description"
@overload
def condition(
self,
*,
aggregate: Optional[SchemaBase | Map | NonArgAggregateOp_T] = Undefined,
bandPosition: Optional[float] = Undefined,
bin: Optional[bool | SchemaBase | Map | None] = Undefined,
field: Optional[str | SchemaBase | Map] = Undefined,
legend: Optional[SchemaBase | Map | None] = Undefined,
scale: Optional[SchemaBase | Map | None] = Undefined,
sort: Optional[
SchemaBase
| Sequence[str]
| Sequence[bool]
| Sequence[float]
| Sequence[Temporal | SchemaBase | Map]
| Map
| AllSortString_T
| None
] = Undefined,
test: Optional[str | SchemaBase | Map] = Undefined,
timeUnit: Optional[
SchemaBase | Map | MultiTimeUnit_T | BinnedTimeUnit_T | SingleTimeUnit_T
] = Undefined,
title: Optional[str | SchemaBase | Sequence[str] | None] = Undefined,
type: Optional[SchemaBase | StandardType_T] = Undefined,
) -> DescriptionValue: ...
@overload
def condition(
self,
*,
bandPosition: Optional[float] = Undefined,
datum: Optional[
Temporal | Parameter | SchemaBase | Map | PrimitiveValue_T
] = Undefined,
legend: Optional[SchemaBase | Map | None] = Undefined,
scale: Optional[SchemaBase | Map | None] = Undefined,
test: Optional[str | SchemaBase | Map] = Undefined,
title: Optional[str | SchemaBase | Sequence[str] | None] = Undefined,
type: Optional[SchemaBase | Type_T] = Undefined,
) -> DescriptionValue: ...
@overload
def condition(
self,
*,
aggregate: Optional[SchemaBase | Map | NonArgAggregateOp_T] = Undefined,
bandPosition: Optional[float] = Undefined,
bin: Optional[bool | SchemaBase | Map | None] = Undefined,
empty: Optional[bool] = Undefined,
field: Optional[str | SchemaBase | Map] = Undefined,
legend: Optional[SchemaBase | Map | None] = Undefined,
param: Optional[str | SchemaBase] = Undefined,
scale: Optional[SchemaBase | Map | None] = Undefined,
sort: Optional[
SchemaBase
| Sequence[str]
| Sequence[bool]
| Sequence[float]
| Sequence[Temporal | SchemaBase | Map]
| Map
| AllSortString_T
| None
] = Undefined,
timeUnit: Optional[
SchemaBase | Map | MultiTimeUnit_T | BinnedTimeUnit_T | SingleTimeUnit_T
] = Undefined,
title: Optional[str | SchemaBase | Sequence[str] | None] = Undefined,
type: Optional[SchemaBase | StandardType_T] = Undefined,
) -> DescriptionValue: ...
@overload
def condition(
self,
*,
bandPosition: Optional[float] = Undefined,
datum: Optional[
Temporal | Parameter | SchemaBase | Map | PrimitiveValue_T
] = Undefined,
empty: Optional[bool] = Undefined,
legend: Optional[SchemaBase | Map | None] = Undefined,
param: Optional[str | SchemaBase] = Undefined,
scale: Optional[SchemaBase | Map | None] = Undefined,
title: Optional[str | SchemaBase | Sequence[str] | None] = Undefined,
type: Optional[SchemaBase | Type_T] = Undefined,
) -> DescriptionValue: ...
@overload
def condition(
self,
*,
test: Optional[str | SchemaBase | Map] = Undefined,
value: Optional[str | Parameter | SchemaBase | Map | None] = Undefined,
) -> DescriptionValue: ...
@overload
def condition(
self,
*,
empty: Optional[bool] = Undefined,
param: Optional[str | SchemaBase] = Undefined,
value: Optional[str | Parameter | SchemaBase | Map | None] = Undefined,
) -> DescriptionValue: ...
@overload
def condition(
self, _: list[core.ConditionalValueDefstringnullExprRef], /
) -> DescriptionValue: ...
def __init__(
self,
value,
condition: Optional[SchemaBase | Sequence[SchemaBase | Map] | Map] = Undefined,
**kwds,
):
super().__init__(value=value, condition=condition, **kwds)
@with_property_setters
| DescriptionValue |
python | getsentry__sentry | src/sentry/api/serializers/models/release.py | {
"start": 4602,
"end": 7849
} | class ____(TypedDict):
authors: list[Author]
def _get_authors_metadata(
item_list: list[Release], user: User | RpcUser | AnonymousUser
) -> dict[Release, _AuthorList]:
"""
Returns a dictionary of release_id => authors metadata,
where each commit metadata dict contains an array of
authors.
e.g.
{
1: {
'authors': [<User id=1>, <User id=2>]
},
...
}
"""
author_ids = set()
for obj in item_list:
if obj.authors is not None:
author_ids.update(obj.authors)
if author_ids:
authors = list(CommitAuthor.objects.filter(id__in=author_ids))
else:
authors = []
if authors:
org_ids = {item.organization_id for item in item_list}
if len(org_ids) != 1:
users_by_author: Mapping[str, Author] = {}
else:
users_by_author = get_users_for_authors(
organization_id=org_ids.pop(), authors=authors, user=user
)
else:
users_by_author = {}
result: dict[Release, _AuthorList] = {}
for item in item_list:
item_authors = []
seen_authors = set()
if item.authors is not None:
for user_resp in (users_by_author.get(a) for a in item.authors):
if user_resp and user_resp["email"] not in seen_authors:
seen_authors.add(user_resp["email"])
item_authors.append(user_resp)
result[item] = {"authors": item_authors}
return result
def _get_last_commit_metadata(item_list, user):
"""
Returns a dictionary of release_id => commit metadata,
where each commit metadata dict contains last_commit.
e.g.
{
1: {
'last_commit': <Commit id=1>,
},
...
}
"""
commit_ids = {o.last_commit_id for o in item_list if o.last_commit_id}
if commit_ids:
commit_list = list(Commit.objects.filter(id__in=commit_ids).select_related("author"))
commits = {c.id: d for c, d in zip(commit_list, serialize(commit_list, user))}
else:
commits = {}
result = {}
for item in item_list:
result[item] = {
"last_commit": commits.get(item.last_commit_id),
}
return result
def _get_last_deploy_metadata(item_list, user):
"""
Returns a dictionary of release_id => deploy metadata,
where each commit metadata dict contains last_deploy
e.g.
{
1: {
'latest_commit': <Commit id=1>,
'authors': [<User id=1>, <User id=2>]
},
...
}
"""
deploy_ids = {o.last_deploy_id for o in item_list if o.last_deploy_id}
if deploy_ids:
deploy_list = list(Deploy.objects.filter(id__in=deploy_ids))
deploys = {d.id: c for d, c in zip(deploy_list, serialize(deploy_list, user))}
else:
deploys = {}
result = {}
for item in item_list:
result[item] = {"last_deploy": deploys.get(item.last_deploy_id)}
return result
def _user_to_author_cache_key(organization_id: int, author: CommitAuthor) -> str:
author_hash = md5_text(author.email.lower()).hexdigest()
return f"get_users_for_authors:{organization_id}:{author_hash}"
| _AuthorList |
python | tensorflow__tensorflow | tensorflow/python/saved_model/saved_model_test.py | {
"start": 58558,
"end": 67643
} | class ____(SavedModelTestBase):
def _validate_asset_collection(self,
export_dir,
graph_collection_def,
expected_asset_file_name,
expected_asset_file_contents,
expected_asset_tensor_name,
asset_id=0):
assets_any = graph_collection_def[constants.ASSETS_KEY].any_list.value
asset = meta_graph_pb2.AssetFileDef()
assets_any[asset_id].Unpack(asset)
assets_path = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes(constants.ASSETS_DIRECTORY),
compat.as_bytes(expected_asset_file_name))
actual_asset_contents = file_io.read_file_to_string(assets_path)
self.assertEqual(expected_asset_file_contents,
compat.as_text(actual_asset_contents))
self.assertEqual(expected_asset_file_name, asset.filename)
self.assertEqual(expected_asset_tensor_name, asset.tensor_info.name)
def testWritingAssetsToCollection(self):
export_dir = self._get_export_dir("test_writing_assets_to_collection")
builder = saved_model_builder.SavedModelBuilder(export_dir)
with ops.Graph().as_default():
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
# Build an asset list.
ignored_filepath = os.path.join(
compat.as_bytes(test.get_temp_dir()),
compat.as_bytes("ignored.txt"))
file_io.write_string_to_file(ignored_filepath, "will be ignored")
asset_collection = self._build_asset_collection("hello42.txt",
"foo bar baz",
"asset_file_tensor")
builder.add_meta_graph_and_variables(
sess, ["foo"], assets_collection=asset_collection)
# Save the SavedModel to disk.
builder.save()
with self.session(graph=ops.Graph()) as sess:
foo_graph = loader.load(sess, ["foo"], export_dir)
self._validate_asset_collection(export_dir, foo_graph.collection_def,
"hello42.txt", "foo bar baz",
"asset_file_tensor:0")
ignored_asset_path = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes(constants.ASSETS_DIRECTORY),
compat.as_bytes("ignored.txt"))
self.assertFalse(file_io.file_exists(ignored_asset_path))
def testLegacyInitOpWithNonEmptyCollection(self):
export_dir = self._get_export_dir(
"test_legacy_init_op_with_non_empty_collection")
self._testInitOpsWithNonEmptyCollection(export_dir,
constants.LEGACY_INIT_OP_KEY)
def testMainOpWithNonEmptyCollection(self):
export_dir = self._get_export_dir("test_main_op_with_non_empty_collection")
self._testInitOpsWithNonEmptyCollection(export_dir, constants.MAIN_OP_KEY)
def _testInitOpsWithNonEmptyCollection(self, export_dir, key):
builder = saved_model_builder.SavedModelBuilder(export_dir)
with ops.Graph().as_default():
with self.session() as sess:
# Initialize variable `v1` to 1.
v1 = variable_v1.VariableV1(1, name="v1")
ops.add_to_collection("v", v1)
# Initialize another variable `v2` to 42.
v2 = variable_v1.VariableV1(
42, name="v2", trainable=False, collections=[])
ops.add_to_collection("v", v2)
# Set up an assignment op to be run as part of the init op.
assign_v2 = state_ops.assign(v2, v1)
init_op = control_flow_ops.group(assign_v2, name="init_op")
self.evaluate(variables.global_variables_initializer())
ops.add_to_collection(key, control_flow_ops.no_op())
# ValueError should be raised since the LEGACY_INIT_OP_KEY collection
# is not empty and we don't support multiple init ops.
with self.assertRaisesRegex(ValueError, "Graph already contains"):
builder.add_meta_graph_and_variables(
sess, ["foo"], legacy_init_op=init_op)
# We shouldn't be able to add as MAIN_OP, either.
with self.assertRaisesRegex(ValueError, "Graph already contains"):
builder.add_meta_graph_and_variables(sess, ["foo"], main_op=init_op)
def testStripDefaultAttrs(self):
export_dir = self._get_export_dir("test_strip_default_attrs")
builder = saved_model_builder.SavedModelBuilder(export_dir)
# Add a graph with two float32 variables and a Complex Op composing them
# with strip_default_attrs enabled.
with session.Session(graph=ops.Graph()) as sess:
real_num = variable_v1.VariableV1(1.0, dtype=dtypes.float32, name="real")
imag_num = variable_v1.VariableV1(2.0, dtype=dtypes.float32, name="imag")
math_ops.complex(real_num, imag_num, name="complex")
self.evaluate(variables.global_variables_initializer())
builder.add_meta_graph_and_variables(
sess, ["foo"], strip_default_attrs=True)
# Add a graph with the same float32 variables and a Complex Op composing
# them with strip_default_attrs disabled.
with session.Session(graph=ops.Graph()) as sess:
real_num = variable_v1.VariableV1(1.0, dtype=dtypes.float32, name="real")
imag_num = variable_v1.VariableV1(2.0, dtype=dtypes.float32, name="imag")
math_ops.complex(real_num, imag_num, name="complex")
self.evaluate(variables.global_variables_initializer())
builder.add_meta_graph(["bar"], strip_default_attrs=False)
# Save the SavedModel to disk in text format.
builder.save(as_text=True)
# Loading graph "foo" via the loader must restore the defaults for the
# "Complex" node based on the "Complex" OpDef in the Op registry.
sess = session.Session(graph=ops.Graph())
meta_graph_def = loader.load(sess, ["foo"], export_dir)
complex_node = test_util.get_node_def_from_graph("complex",
meta_graph_def.graph_def)
self.assertIn("T", complex_node.attr)
self.assertIn("Tout", complex_node.attr)
# Load graph "foo" from disk as-is to verify default attrs are stripped.
saved_model_pb = loader_impl.parse_saved_model(export_dir)
self.assertIsNotNone(saved_model_pb)
meta_graph_foo_def = None
meta_graph_bar_def = None
for meta_graph_def in saved_model_pb.meta_graphs:
if set(meta_graph_def.meta_info_def.tags) == set(["foo"]):
meta_graph_foo_def = meta_graph_def
elif set(meta_graph_def.meta_info_def.tags) == set(["bar"]):
meta_graph_bar_def = meta_graph_def
self.assertIsNotNone(meta_graph_foo_def)
self.assertIsNotNone(meta_graph_bar_def)
# "Complex" Op has 2 attributes with defaults:
# o "T" : float32. (input type)
# o "Tout" : complex64. (output type)
# "Complex" Op in graph "foo" shouldn't have attributes "T" and "Tout".
# Graph "foo" was saved with strip_default_attrs set to True.
node_def = test_util.get_node_def_from_graph("complex",
meta_graph_foo_def.graph_def)
self.assertNotIn("T", node_def.attr)
self.assertNotIn("Tout", node_def.attr)
# "Complex" Op in graph "bar" must have attributes "T" and "Tout".
# Graph "bar" was saved with strip_default_attrs set to False.
node_def = test_util.get_node_def_from_graph("complex",
meta_graph_bar_def.graph_def)
self.assertIn("T", node_def.attr)
self.assertIn("Tout", node_def.attr)
def testLegacyInitOp(self):
export_dir = self._get_export_dir("test_legacy_init_op")
builder = saved_model_builder.SavedModelBuilder(export_dir)
with ops.Graph().as_default():
with self.session(graph=ops.Graph()) as sess:
# Add `v1` and `v2` variables to the graph.
v1 = variable_v1.VariableV1(1, name="v1")
v2 = variable_v1.VariableV1(2, name="v2")
# Initialize another variable `v3` to 42.
v3 = variable_v1.VariableV1(42, name="v3", trainable=False)
# Set up an assignment op to be run as part of the init_op.
assign_v3 = state_ops.assign(v3, math_ops.add(v1, v2))
legacy_init_op = control_flow_ops.group(
assign_v3, name="legacy_init_op")
self.evaluate(variables.global_variables_initializer())
builder.add_meta_graph_and_variables(
sess, ["foo"], legacy_init_op=legacy_init_op)
# Save the SavedModel to disk.
builder.save()
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, ["foo"], export_dir)
self.assertEqual(1, self._eval("v1"))
self.assertEqual(2, self._eval("v2"))
# Evaluates to the sum of the first two variables and assigned as part
# of the legacy_init_op, following a restore.
self.assertEqual(3, self._eval("v3"))
if __name__ == "__main__":
test.main()
| SavedModelV1Test |
python | great-expectations__great_expectations | great_expectations/expectations/core/expect_column_min_to_be_between.py | {
"start": 2769,
"end": 16921
} | class ____(ColumnAggregateExpectation):
__doc__ = f"""{EXPECTATION_SHORT_DESCRIPTION}
ExpectColumnMinToBeBetween is a \
Column Aggregate Expectation.
Column Aggregate Expectations are one of the most common types of Expectation.
They are evaluated for a single column, and produce an aggregate Metric, such as a mean, standard deviation, number of unique values, column type, etc.
If that Metric meets the conditions you set, the Expectation considers that data valid.
Args:
column (str): \
{COLUMN_DESCRIPTION}
min_value (comparable type or None): \
{MIN_VALUE_DESCRIPTION}
max_value (comparable type or None): \
{MAX_VALUE_DESCRIPTION}
strict_min (boolean): \
{STRICT_MIN_DESCRIPTION}
strict_max (boolean): \
{STRICT_MAX_DESCRIPTION}
Other Parameters:
result_format (str or None): \
Which output mode to use: BOOLEAN_ONLY, BASIC, COMPLETE, or SUMMARY. \
For more detail, see [result_format](https://docs.greatexpectations.io/docs/reference/expectations/result_format).
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see [catch_exceptions](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#catch_exceptions).
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see [meta](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#meta).
severity (str or None): \
{FAILURE_SEVERITY_DESCRIPTION} \
For more detail, see [failure severity](https://docs.greatexpectations.io/docs/cloud/expectations/expectations_overview/#failure-severity).
Returns:
An [ExpectationSuiteValidationResult](https://docs.greatexpectations.io/docs/terms/validation_result)
Exact fields vary depending on the values passed to result_format, catch_exceptions, and meta.
Notes:
* min_value and max_value are both inclusive unless strict_min or strict_max are set to True.
* If min_value is None, then max_value is treated as an upper bound
* If max_value is None, then min_value is treated as a lower bound
* observed_value field in the result object is customized for this expectation to be a list \
representing the actual column min
See Also:
[ExpectColumnMaxToBeBetween](https://greatexpectations.io/expectations/expect_column_max_to_be_between)
Supported Data Sources:
[{SUPPORTED_DATA_SOURCES[0]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[1]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[2]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[3]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[4]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[5]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[6]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[7]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[8]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[9]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[10]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[11]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[12]}](https://docs.greatexpectations.io/docs/application_integration_support/)
Data Quality Issues:
{DATA_QUALITY_ISSUES[0]}
Example Data:
test test2
0 1 1
1 1.3 7
2 .8 2.5
3 2 3
Code Examples:
Passing Case:
Input:
ExpectColumnMinToBeBetween(
column="test",
min_value=.5,
max_value=1
)
Output:
{{
"exception_info": {{
"raised_exception": false,
"exception_traceback": null,
"exception_message": null
}},
"result": {{
"observed_value": .8
}},
"meta": {{}},
"success": true
}}
Failing Case:
Input:
ExpectColumnMedianToBeBetween(
column="test2",
min_value=1,
max_value=3,
strict_min=True,
strict_max=True
)
Output:
{{
"exception_info": {{
"raised_exception": false,
"exception_traceback": null,
"exception_message": null
}},
"result": {{
"observed_value": 1
}},
"meta": {{}},
"success": false
}}
""" # noqa: E501 # FIXME CoP
min_value: Optional[Comparable] = pydantic.Field(
default=None, description=MIN_VALUE_DESCRIPTION
)
max_value: Optional[Comparable] = pydantic.Field(
default=None, description=MAX_VALUE_DESCRIPTION
)
strict_min: Union[bool, SuiteParameterDict] = pydantic.Field(
default=False, description=STRICT_MIN_DESCRIPTION
)
strict_max: Union[bool, SuiteParameterDict] = pydantic.Field(
default=False, description=STRICT_MAX_DESCRIPTION
)
library_metadata: ClassVar[Dict[str, Union[str, list, bool]]] = {
"maturity": "production",
"tags": ["core expectation", "column aggregate expectation"],
"contributors": ["@great_expectations"],
"requirements": [],
"has_full_test_suite": True,
"manually_reviewed_code": True,
}
_library_metadata = library_metadata
# Setting necessary computation metric dependencies and defining kwargs, as well as assigning kwargs default values\ # noqa: E501 # FIXME CoP
metric_dependencies = ("column.min",)
success_keys = (
"min_value",
"strict_min",
"max_value",
"strict_max",
)
args_keys = (
"column",
"min_value",
"max_value",
"strict_min",
"strict_max",
)
class Config:
title = "Expect column minimum to be between"
@staticmethod
def schema_extra(schema: Dict[str, Any], model: Type[ExpectColumnMinToBeBetween]) -> None:
ColumnAggregateExpectation.Config.schema_extra(schema, model)
schema["properties"]["metadata"]["properties"].update(
{
"data_quality_issues": {
"title": "Data Quality Issues",
"type": "array",
"const": DATA_QUALITY_ISSUES,
},
"library_metadata": {
"title": "Library Metadata",
"type": "object",
"const": model._library_metadata,
},
"short_description": {
"title": "Short Description",
"type": "string",
"const": EXPECTATION_SHORT_DESCRIPTION,
},
"supported_data_sources": {
"title": "Supported Data Sources",
"type": "array",
"const": SUPPORTED_DATA_SOURCES,
},
}
)
@classmethod
@override
def _prescriptive_template( # noqa: C901 # too complex
cls,
renderer_configuration: RendererConfiguration,
) -> RendererConfiguration:
add_param_args: AddParamArgs = (
("column", RendererValueType.STRING),
("min_value", [RendererValueType.NUMBER, RendererValueType.DATETIME]),
("max_value", [RendererValueType.NUMBER, RendererValueType.DATETIME]),
("strict_min", RendererValueType.BOOLEAN),
("strict_max", RendererValueType.BOOLEAN),
)
for name, param_type in add_param_args:
renderer_configuration.add_param(name=name, param_type=param_type)
params = renderer_configuration.params
if not params.min_value and not params.max_value:
template_str = "minimum value may have any numerical value."
else:
at_least_str = "greater than or equal to"
if params.strict_min:
at_least_str = cls._get_strict_min_string(
renderer_configuration=renderer_configuration
)
at_most_str = "less than or equal to"
if params.strict_max:
at_most_str = cls._get_strict_max_string(
renderer_configuration=renderer_configuration
)
if params.min_value and params.max_value:
if params.min_value == params.max_value:
template_str = "minimum value must be $min_value"
else:
template_str = f"minimum value must be {at_least_str} $min_value and {at_most_str} $max_value." # noqa: E501 # FIXME CoP
elif not params.min_value:
template_str = f"minimum value must be {at_most_str} $max_value."
else:
template_str = f"minimum value must be {at_least_str} $min_value."
if renderer_configuration.include_column_name:
template_str = f"$column {template_str}"
renderer_configuration.template_str = template_str
return renderer_configuration
@classmethod
@override
@renderer(renderer_type=LegacyRendererType.PRESCRIPTIVE)
@render_suite_parameter_string
def _prescriptive_renderer( # type: ignore[override] # TODO: Fix this type ignore
cls,
configuration: ExpectationConfiguration,
result: Optional[ExpectationValidationResult] = None,
runtime_configuration: Optional[dict] = None,
**kwargs,
):
runtime_configuration = runtime_configuration or {}
include_column_name = runtime_configuration.get("include_column_name") is not False
styling = runtime_configuration.get("styling")
params = substitute_none_for_missing(
configuration.kwargs,
[
"column",
"min_value",
"max_value",
"row_condition",
"condition_parser",
"strict_min",
"strict_max",
],
)
if (params["min_value"] is None) and (params["max_value"] is None):
template_str = "minimum value may have any numerical value."
else:
at_least_str, at_most_str = handle_strict_min_max(params)
if params["min_value"] is not None and params["max_value"] is not None:
template_str = (
f"minimum value must be {at_least_str} $min_value and {at_most_str} $max_value."
)
elif params["min_value"] is None:
template_str = f"minimum value must be {at_most_str} $max_value."
elif params["max_value"] is None:
template_str = f"minimum value must be {at_least_str} $min_value."
else:
template_str = ""
if include_column_name:
template_str = f"$column {template_str}"
if params["row_condition"] is not None:
conditional_template_str = parse_row_condition_string(params["row_condition"])
template_str, styling = _style_row_condition(
conditional_template_str,
template_str,
params,
styling,
)
return [
RenderedStringTemplateContent(
content_block_type="string_template",
string_template={
"template": template_str,
"params": params,
"styling": styling,
},
)
]
@classmethod
@renderer(renderer_type=LegacyDescriptiveRendererType.STATS_TABLE_MIN_ROW)
def _descriptive_stats_table_min_row_renderer(
cls,
configuration: Optional[ExpectationConfiguration] = None,
result: Optional[ExpectationValidationResult] = None,
runtime_configuration: Optional[dict] = None,
**kwargs,
):
assert result, "Must pass in result."
return [
{
"content_block_type": "string_template",
"string_template": {
"template": "Minimum",
"tooltip": {"content": "expect_column_min_to_be_between"},
},
},
f"{result.result['observed_value']:.2f}",
]
@override
def _validate(
self,
metrics: Dict,
runtime_configuration: Optional[dict] = None,
execution_engine: Optional[ExecutionEngine] = None,
):
return self._validate_metric_value_between(
metric_name="column.min",
metrics=metrics,
runtime_configuration=runtime_configuration,
execution_engine=execution_engine,
)
| ExpectColumnMinToBeBetween |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_drop_lines03.py | {
"start": 315,
"end": 1547
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_drop_lines03.xlsx")
def test_create_file(self):
"""Test the creation of an XlsxWriter file with drop down lines."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "area"})
chart.axis_ids = [61151872, 63947136]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 1, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.set_drop_lines()
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$5",
"values": "=Sheet1!$B$1:$B$5",
}
)
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$5",
"values": "=Sheet1!$C$1:$C$5",
}
)
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | MongoEngine__mongoengine | mongoengine/fields.py | {
"start": 51496,
"end": 55764
} | class ____(BaseField):
"""A reference to *any* :class:`~mongoengine.document.Document` subclass
that will be automatically dereferenced on access (lazily).
Note this field works the same way as :class:`~mongoengine.document.ReferenceField`,
doing database I/O access the first time it is accessed (even if it's to access
it ``pk`` or ``id`` field).
To solve this you should consider using the
:class:`~mongoengine.fields.GenericLazyReferenceField`.
.. note ::
* Any documents used as a generic reference must be registered in the
document registry. Importing the model will automatically register
it.
* You can use the choices param to limit the acceptable Document types
"""
def __init__(self, *args, **kwargs):
choices = kwargs.pop("choices", None)
super().__init__(*args, **kwargs)
self.choices = []
# Keep the choices as a list of allowed Document class names
if choices:
for choice in choices:
if isinstance(choice, str):
self.choices.append(choice)
elif isinstance(choice, type) and issubclass(choice, Document):
self.choices.append(choice._class_name)
else:
# XXX ValidationError raised outside of the "validate"
# method.
self.error(
"Invalid choices provided: must be a list of"
"Document subclasses and/or str"
)
def _validate_choices(self, value):
if isinstance(value, dict):
# If the field has not been dereferenced, it is still a dict
# of class and DBRef
value = value.get("_cls")
elif isinstance(value, Document):
value = value._class_name
super()._validate_choices(value)
@staticmethod
def _lazy_load_ref(ref_cls, dbref):
dereferenced_son = ref_cls._get_db().dereference(dbref, session=_get_session())
if dereferenced_son is None:
raise DoesNotExist(f"Trying to dereference unknown document {dbref}")
return ref_cls._from_son(dereferenced_son)
def __get__(self, instance, owner):
if instance is None:
return self
value = instance._data.get(self.name)
auto_dereference = instance._fields[self.name]._auto_dereference
if auto_dereference and isinstance(value, dict):
doc_cls = _DocumentRegistry.get(value["_cls"])
instance._data[self.name] = self._lazy_load_ref(doc_cls, value["_ref"])
return super().__get__(instance, owner)
def validate(self, value):
if not isinstance(value, (Document, DBRef, dict, SON)):
self.error("GenericReferences can only contain documents")
if isinstance(value, (dict, SON)):
if "_ref" not in value or "_cls" not in value:
self.error("GenericReferences can only contain documents")
# We need the id from the saved object to create the DBRef
elif isinstance(value, Document) and value.id is None:
self.error(_unsaved_object_error(value.__class__.__name__))
def to_mongo(self, document):
if document is None:
return None
if isinstance(document, (dict, SON, ObjectId, DBRef)):
return document
id_field_name = document.__class__._meta["id_field"]
id_field = document.__class__._fields[id_field_name]
if isinstance(document, Document):
# We need the id from the saved object to create the DBRef
id_ = document.id
if id_ is None:
# XXX ValidationError raised outside of the "validate" method.
self.error(_unsaved_object_error(document.__class__.__name__))
else:
id_ = document
id_ = id_field.to_mongo(id_)
collection = document._get_collection_name()
ref = DBRef(collection, id_)
return SON((("_cls", document._class_name), ("_ref", ref)))
def prepare_query_value(self, op, value):
if value is None:
return None
return self.to_mongo(value)
| GenericReferenceField |
python | huggingface__transformers | src/transformers/models/gemma3n/processing_gemma3n.py | {
"start": 954,
"end": 1084
} | class ____(ProcessingKwargs, total=False):
_defaults = {
"text_kwargs": {"padding": False},
}
| Gemma3nProcessorKwargs |
python | kamyu104__LeetCode-Solutions | Python/count-complete-tree-nodes.py | {
"start": 798,
"end": 1673
} | class ____(object):
def countNodes(self, root):
"""
:type root: TreeNode
:rtype: int
"""
def check(node, n):
base = 1
while base <= n:
base <<= 1
base >>= 2
while base:
if (n & base) == 0:
node = node.left
else:
node = node.right
base >>= 1
return bool(node)
if not root:
return 0
node, level = root, 0
while node.left:
node = node.left
level += 1
left, right = 2**level, 2**(level+1)-1
while left <= right:
mid = left+(right-left)//2
if not check(root, mid):
right = mid-1
else:
left = mid+1
return right
| Solution2 |
python | getsentry__sentry | src/sentry/tagstore/types.py | {
"start": 2626,
"end": 3099
} | class ____(TagType):
__slots__ = ("group_id", "key", "values_seen", "count", "top_values")
_sort_key = "values_seen"
def __init__(
self,
group_id: int,
key: str,
values_seen: int | None = None,
count: int | None = None,
top_values=None,
):
self.group_id = group_id
self.key = key
self.values_seen = values_seen
self.count = count
self.top_values = top_values
| GroupTagKey |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.