language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | django__django | django/contrib/gis/db/models/functions.py | {
"start": 12311,
"end": 13036
} | class ____(GeoFunc):
arity = 2
geom_param_pos = ()
def __init__(self, expression, srid=0, **extra):
expressions = [
expression,
self._handle_param(srid, "srid", int),
]
if "output_field" not in extra:
extra["output_field"] = GeometryField(srid=srid)
super().__init__(*expressions, **extra)
def as_oracle(self, compiler, connection, **extra_context):
# Oracle doesn't support the srid parameter.
source_expressions = self.get_source_expressions()
clone = self.copy()
clone.set_source_expressions(source_expressions[:1])
return super(FromWKB, clone).as_sql(compiler, connection, **extra_context)
| FromWKB |
python | getsentry__sentry | tests/sentry/integrations/slack/notifications/test_resolved_in_pull_request.py | {
"start": 540,
"end": 5761
} | class ____(
SlackActivityNotificationTest, PerformanceIssueTestCase
):
def setUp(self) -> None:
super().setUp()
self.pull_request_url = "https://github.com/example/pull/123"
def create_notification(self, group):
return ResolvedInPullRequestActivityNotification(
Activity(
project=self.project,
group=group,
user_id=self.user.id,
type=ActivityType.SET_RESOLVED_IN_PULL_REQUEST,
data={"pull_request": {"externalUrl": self.pull_request_url}},
)
)
def test_resolved_in_pull_request_block(self) -> None:
notification = self.create_notification(self.group)
with self.tasks():
notification.send()
blocks = orjson.loads(self.mock_post.call_args.kwargs["blocks"])
fallback_text = self.mock_post.call_args.kwargs["text"]
notification_uuid = self.get_notification_uuid(blocks[1]["text"]["text"])
assert (
fallback_text
== f"{self.name} made a <{self.pull_request_url}| pull request> that will resolve <http://testserver/organizations/{self.organization.slug}/issues/{self.group.id}/?referrer=activity_notification¬ification_uuid={notification_uuid}|{self.short_id}>"
)
assert blocks[0]["text"]["text"] == fallback_text
assert (
blocks[1]["text"]["text"]
== f":red_circle: <http://testserver/organizations/{self.organization.slug}/issues/{self.group.id}/?referrer=resolved_in_pull_request_activity-slack¬ification_uuid={notification_uuid}|*{self.group.title}*>"
)
assert (
blocks[3]["elements"][0]["text"]
== f"{self.project.slug} | <http://testserver/settings/account/notifications/workflow/?referrer=resolved_in_pull_request_activity-slack-user¬ification_uuid={notification_uuid}&organizationId={self.organization.id}|Notification Settings>"
)
@mock.patch(
"sentry.services.eventstore.models.GroupEvent.occurrence",
return_value=TEST_PERF_ISSUE_OCCURRENCE,
new_callable=mock.PropertyMock,
)
def test_resolved_in_pull_request_performance_issue_block_with_culprit_blocks(
self, occurrence: mock.MagicMock
) -> None:
"""
Test that a Slack message is sent with the expected payload when a performance issue is resolved in a pull request
and block kit is enabled.
"""
event = self.create_performance_issue()
assert event.group is not None
notification = self.create_notification(event.group)
with self.tasks():
notification.send()
blocks = orjson.loads(self.mock_post.call_args.kwargs["blocks"])
fallback_text = self.mock_post.call_args.kwargs["text"]
notification_uuid = self.get_notification_uuid(blocks[1]["text"]["text"])
assert (
fallback_text
== f"{self.name} made a <{self.pull_request_url}| pull request> that will resolve <http://testserver/organizations/{event.organization.slug}/issues/{event.group.id}/?referrer=activity_notification¬ification_uuid={notification_uuid}|{event.group.qualified_short_id}>"
)
assert blocks[0]["text"]["text"] == fallback_text
self.assert_performance_issue_blocks_with_culprit_blocks(
blocks,
event.organization,
event.project.slug,
event.group,
"resolved_in_pull_request_activity-slack",
)
@mock.patch(
"sentry.services.eventstore.models.GroupEvent.occurrence",
return_value=TEST_ISSUE_OCCURRENCE,
new_callable=mock.PropertyMock,
)
def test_resolved_in_pull_request_generic_issue_block(self, occurrence: mock.MagicMock) -> None:
"""
Test that a Slack message is sent with the expected payload when a generic issue type is resolved in a pull request
and block kit is enabled.
"""
event = self.store_event(
data={"message": "Hellboy's world", "level": "error"}, project_id=self.project.id
)
group_event = event.for_group(event.groups[0])
notification = self.create_notification(group_event.group)
with self.tasks():
notification.send()
blocks = orjson.loads(self.mock_post.call_args.kwargs["blocks"])
fallback_text = self.mock_post.call_args.kwargs["text"]
notification_uuid = self.get_notification_uuid(blocks[1]["text"]["text"])
assert (
fallback_text
== f"{self.name} made a <{self.pull_request_url}| pull request> that will resolve <http://testserver/organizations/{group_event.organization.slug}/issues/{group_event.group.id}/?referrer=activity_notification¬ification_uuid={notification_uuid}|{group_event.group.qualified_short_id}>"
)
assert blocks[0]["text"]["text"] == fallback_text
self.assert_generic_issue_blocks(
blocks,
group_event.organization,
group_event.project.slug,
group_event.group,
"resolved_in_pull_request_activity-slack",
)
| SlackResolvedInPullRequestNotificationTest |
python | google__jax | jax/experimental/jax2tf/tests/flax_models/transformer_wmt.py | {
"start": 1126,
"end": 3089
} | class ____:
"""Global hyperparameters used to minimize obnoxious kwarg plumbing."""
vocab_size: int
output_vocab_size: int
share_embeddings: bool = False
logits_via_embedding: bool = False
dtype: Any = jnp.float32
emb_dim: int = 512
num_heads: int = 8
num_layers: int = 6
qkv_dim: int = 512
mlp_dim: int = 2048
max_len: int = 2048
dropout_rate: float = 0.1
attention_dropout_rate: float = 0.1
deterministic: bool = False
decode: bool = False
kernel_init: Callable = nn.initializers.xavier_uniform()
bias_init: Callable = nn.initializers.normal(stddev=1e-6)
posemb_init: Callable | None = None
def shift_right(x, axis=1):
"""Shift the input to the right by padding on axis 1."""
pad_widths = [(0, 0)] * len(x.shape)
pad_widths[axis] = (1, 0)
padded = jnp.pad(
x, pad_widths, mode='constant', constant_values=x.dtype.type(0))
return padded[:, :-1]
def sinusoidal_init(max_len=2048,
min_scale=1.0,
max_scale=10000.0):
"""1D Sinusoidal Position Embedding Initializer.
Args:
max_len: maximum possible length for the input.
min_scale: float: minimum frequency-scale in sine grating.
max_scale: float: maximum frequency-scale in sine grating.
Returns:
output: init function returning `(1, max_len, d_feature)`
"""
def init(key, shape, dtype=np.float32):
"""Sinusoidal init."""
del key, dtype
d_feature = shape[-1]
pe = np.zeros((max_len, d_feature), dtype=np.float32)
position = np.arange(0, max_len)[:, np.newaxis]
scale_factor = -np.log(max_scale / min_scale) / (d_feature // 2 - 1)
div_term = min_scale * np.exp(np.arange(0, d_feature // 2) * scale_factor)
pe[:, :d_feature // 2] = np.sin(position * div_term)
pe[:, d_feature // 2: 2 * (d_feature // 2)] = np.cos(position * div_term)
pe = pe[np.newaxis, :, :] # [1, max_len, d_feature]
return jnp.array(pe)
return init
| TransformerConfig |
python | getsentry__sentry | src/sentry/monitors/serializers.py | {
"start": 4935,
"end": 5042
} | class ____(TypedDict):
targetIdentifier: int
targetType: str
| MonitorAlertRuleTargetSerializerResponse |
python | coleifer__peewee | tests/reflection.py | {
"start": 22732,
"end": 23431
} | class ____(ModelTestCase):
requires = [Category, Event]
def test_generate_models(self):
M = generate_models(self.database)
self.assertTrue('category' in M)
self.assertTrue('event' in M)
def assertFields(m, expected):
actual = [(f.name, f.field_type) for f in m._meta.sorted_fields]
self.assertEqual(actual, expected)
assertFields(M['category'], [('id', 'AUTO'), ('name', 'VARCHAR'),
('parent', 'INT')])
assertFields(M['event'], [
('id', 'AUTO'),
('key', 'TEXT'),
('timestamp', 'DATETIME'),
('metadata', 'TEXT')])
| TestInteractiveHelpers |
python | PrefectHQ__prefect | src/prefect/client/schemas/filters.py | {
"start": 32431,
"end": 33229
} | class ____(PrefectBaseModel):
"""Filter by `ArtifactCollection.key`."""
any_: Optional[List[str]] = Field(
default=None, description="A list of artifact keys to include"
)
like_: Optional[str] = Field(
default=None,
description=(
"A string to match artifact keys against. This can include "
"SQL wildcard characters like `%` and `_`."
),
examples=["my-artifact-%"],
)
exists_: Optional[bool] = Field(
default=None,
description=(
"If `true`, only include artifacts with a non-null key. If `false`, "
"only include artifacts with a null key. Should return all rows in "
"the ArtifactCollection table if specified."
),
)
| ArtifactCollectionFilterKey |
python | huggingface__transformers | tests/models/switch_transformers/test_modeling_switch_transformers.py | {
"start": 21387,
"end": 28718
} | class ____(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(SwitchTransformersModel, SwitchTransformersForConditionalGeneration) if is_torch_available() else ()
)
pipeline_model_mapping = (
{
"feature-extraction": SwitchTransformersModel,
"summarization": SwitchTransformersForConditionalGeneration,
"text2text-generation": SwitchTransformersForConditionalGeneration,
"translation": SwitchTransformersForConditionalGeneration,
}
if is_torch_available()
else {}
)
test_resize_embeddings = True
is_encoder_decoder = True
# The small SWITCH_TRANSFORMERS model needs higher percentages for CPU/MP tests
model_split_percents = [0.5, 0.8, 0.9]
# `SwitchTransformers` is a MOE in which not all experts will get gradients because they are not all used in a single forward pass
test_all_params_have_gradient = False
test_head_masking = False
def setUp(self):
self.model_tester = SwitchTransformersModelTester(self)
self.config_tester = ConfigTester(self, config_class=SwitchTransformersConfig, d_model=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_shift_right(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_prepare_lm_labels_via_shift_left(*config_and_inputs)
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_model_v1_1(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
# check that gated gelu feed forward and different word embeddings work
config = config_and_inputs[0]
config.tie_word_embeddings = False
config.feed_forward_proj = "gated-gelu"
self.model_tester.create_and_check_model(config, *config_and_inputs[1:])
def test_config_and_model_silu_gated(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
config = config_and_inputs[0]
config.feed_forward_proj = "gated-silu"
self.model_tester.create_and_check_model(*config_and_inputs)
def test_with_lm_head(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_with_lm_head(*config_and_inputs)
def test_decoder_model_past(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*config_and_inputs)
def test_decoder_model_past_with_attn_mask(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_attention_mask_past(*config_and_inputs)
def test_decoder_model_past_with_3d_attn_mask(self):
(
config,
input_ids,
decoder_input_ids,
attention_mask,
decoder_attention_mask,
lm_labels,
) = self.model_tester.prepare_config_and_inputs()
attention_mask = ids_tensor(
[self.model_tester.batch_size, self.model_tester.encoder_seq_length, self.model_tester.encoder_seq_length],
vocab_size=2,
)
decoder_attention_mask = ids_tensor(
[self.model_tester.batch_size, self.model_tester.decoder_seq_length, self.model_tester.decoder_seq_length],
vocab_size=2,
)
self.model_tester.create_and_check_decoder_model_attention_mask_past(
config,
input_ids,
decoder_input_ids,
attention_mask,
decoder_attention_mask,
lm_labels,
)
# overwrite because T5 doesn't accept position ids as input and expects `decoder_input_ids`
def test_custom_4d_attention_mask(self):
for model_class in self.all_generative_model_classes:
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
model = model_class(config).to(device=torch_device, dtype=torch.float32)
(
input_ids,
_,
input_ids_shared_prefix,
mask_shared_prefix,
_,
) = self._get_custom_4d_mask_test_data()
logits = model.forward(
decoder_input_ids=input_ids,
input_ids=input_dict["input_ids"][:3],
).logits
# logits.shape == torch.Size([3, 4, ...])
logits_shared_prefix = model(
input_ids=input_dict["input_ids"][:1],
decoder_input_ids=input_ids_shared_prefix,
decoder_attention_mask=mask_shared_prefix,
)[0]
# logits_shared_prefix.shape == torch.Size([1, 6, ...])
out_last_tokens = logits[:, -1, :] # last tokens in each batch line
out_shared_prefix_last_tokens = logits_shared_prefix[0, -3:, :] # last three tokens
# comparing softmax-normalized logits:
normalized_0 = F.softmax(out_last_tokens)
normalized_1 = F.softmax(out_shared_prefix_last_tokens)
torch.testing.assert_close(normalized_0, normalized_1, rtol=1e-3, atol=1e-4)
def test_decoder_model_past_with_large_inputs(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs)
def test_generate_with_past_key_values(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_generate_with_past_key_values(*config_and_inputs)
def test_encoder_decoder_shared_weights(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_encoder_decoder_shared_weights(*config_and_inputs)
@unittest.skipIf(torch_device == "cpu", "Can't do half precision")
def test_model_fp16_forward(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fp16_forward(*config_and_inputs)
def test_v1_1_resize_embeddings(self):
config = self.model_tester.prepare_config_and_inputs()[0]
self.model_tester.check_resize_embeddings_switch_transformers_v1_1(config)
@slow
def test_model_from_pretrained(self):
model_name = "google/switch-base-8"
model = SwitchTransformersModel.from_pretrained(model_name)
self.assertIsNotNone(model)
@unittest.skip(
reason="This architecture has tied weights by default and there is no way to remove it, check: https://github.com/huggingface/transformers/pull/31771#issuecomment-2210915245"
)
def test_load_save_without_tied_weights(self):
pass
@unittest.skip("TODO ARTHUR later on this will be fixed with t5 modular refactor")
def test_retain_grad_hidden_states_attentions(self):
pass
@unittest.skip(reason="SwitchTransformers has no separate base model without a head.")
def test_model_base_model_prefix(self):
pass
| SwitchTransformersModelTest |
python | pytorch__pytorch | test/test_fx.py | {
"start": 3031,
"end": 4585
} | class ____(torch.nn.Module):
def forward(self, x):
return torch.relu(x + 3.0)
def a_non_torch_leaf(a, b):
return a + b
# Used for test_autowrap_function. Autowrapped functions need to be global
def fx_int(x: float) -> int:
return int(x)
def fx_int_x2(x: float) -> int:
return int(x) * 2
# used in test_pytree. It's all the way out here because pickling a GraphModule
# that uses Point errors out if Point is local to the function
Point = namedtuple("Point", ["x", "y"])
# Test wrap() passing both a function name as well as a function
# directly
def a_lifted_leaf(a, b):
return a[0] + a[1] + b
wrap("a_lifted_leaf")
# Test wrapping twice doesn't break anything
wrap("a_lifted_leaf")
def a_lifted_leaf2(a, b):
return a[0] + a[1] + b
wrap(a_lifted_leaf2)
wrap("len")
wrap("getattr")
def wrapped_named_tup(p1, *, p2):
return p1.x + p2.y
wrap(wrapped_named_tup)
@wrap
def wrapped_via_decorator(a):
return a + 1
wrap("wrapped_with_submodule")
def wrapped_with_submodule(x: torch.Tensor, batchnorm1d: torch.nn.BatchNorm1d):
return batchnorm1d(x)
def my_decorator(f):
@functools.wraps(f)
def wrapper_inside_decorator(*args, **kwargs):
return f(*args, **kwargs)
return wrapper_inside_decorator
@wrap
@my_decorator
def wrapped_decorated_fn(x):
return x
real_wrapped_via_decorator = wrapped_via_decorator
real_a_lifed_leaf = a_lifted_leaf
real_a_lifed_leaf2 = a_lifted_leaf2
_sqrt = sqrt
wrap("wrapper_fn")
def wrapper_fn(x):
return torch.foo(x)
| SimpleTest |
python | pennersr__django-allauth | tests/apps/socialaccount/providers/agave/tests.py | {
"start": 238,
"end": 981
} | class ____(OAuth2TestsMixin, TestCase):
provider_id = AgaveProvider.id
def get_mocked_response(self):
return MockedResponse(
HTTPStatus.OK,
"""
{
"status": "success",
"message": "User details retrieved successfully.",
"version": "2.0.0-SNAPSHOT-rc3fad",
"result": {
"first_name": "John",
"last_name": "Doe",
"full_name": "John Doe",
"email": "jon@doe.edu",
"phone": "",
"mobile_phone": "",
"status": "Active",
"create_time": "20180322043812Z",
"username": "jdoe"
}
}
""",
)
def get_expected_to_str(self):
return "jdoe"
| AgaveTests |
python | spyder-ide__spyder | external-deps/spyder-kernels/spyder_kernels/console/kernelapp.py | {
"start": 1413,
"end": 2363
} | class ____(IPKernelApp):
outstream_class = DottedObjectName(
'spyder_kernels.console.outstream.TTYOutStream'
)
kernel_class = SpyderKernel
def init_pdb(self):
"""
This method was added in IPykernel 5.3.1 and it replaces
the debugger used by the kernel with a new class
introduced in IPython 7.15 during kernel's initialization.
Therefore, it doesn't allow us to use our debugger.
"""
pass
def close(self):
"""Close the loopback socket."""
socket = self.kernel.loopback_socket
if socket and not socket.closed:
socket.close()
return super().close()
def init_poller(self):
"""User our own poller."""
# The SPY_PARENT_PID env var must be set on the Spyder side.
parent_pid = int(os.environ.get("SPY_PARENT_PID") or 0)
self.poller = SpyderParentPoller(parent_pid=parent_pid)
| SpyderKernelApp |
python | getsentry__sentry | src/sentry/web/frontend/debug/debug_unassigned_email.py | {
"start": 217,
"end": 412
} | class ____(ActivityMailDebugView):
def get_activity(self, request: HttpRequest, event):
return {"type": ActivityType.UNASSIGNED.value, "user_id": request.user.id}
| DebugUnassignedEmailView |
python | pypa__warehouse | tests/unit/email/test_init.py | {
"start": 51922,
"end": 56855
} | class ____:
def test_primary_email_change_email(
self, pyramid_request, pyramid_config, monkeypatch
):
stub_user = pretend.stub(
id="id", email="new_email@example.com", username="username", name=""
)
subject_renderer = pyramid_config.testing_add_renderer(
"email/primary-email-change/subject.txt"
)
subject_renderer.string_response = "Email Subject"
body_renderer = pyramid_config.testing_add_renderer(
"email/primary-email-change/body.txt"
)
body_renderer.string_response = "Email Body"
html_renderer = pyramid_config.testing_add_renderer(
"email/primary-email-change/body.html"
)
html_renderer.string_response = "Email HTML Body"
send_email = pretend.stub(
delay=pretend.call_recorder(lambda *args, **kwargs: None)
)
pyramid_request.task = pretend.call_recorder(lambda *args, **kwargs: send_email)
monkeypatch.setattr(email, "send_email", send_email)
pyramid_request.db = pretend.stub(
query=lambda a: pretend.stub(
filter=lambda *a: pretend.stub(
one=lambda: pretend.stub(user_id=stub_user.id)
)
),
)
pyramid_request.user = stub_user
pyramid_request.registry.settings = {"mail.sender": "noreply@example.com"}
result = email.send_primary_email_change_email(
pyramid_request,
(stub_user, pretend.stub(email="old_email@example.com", verified=True)),
)
assert result == {
"username": stub_user.username,
"old_email": "old_email@example.com",
"new_email": stub_user.email,
}
subject_renderer.assert_()
body_renderer.assert_(username=stub_user.username)
html_renderer.assert_(username=stub_user.username)
assert pyramid_request.task.calls == [pretend.call(send_email)]
assert send_email.delay.calls == [
pretend.call(
"username <old_email@example.com>",
{
"sender": None,
"subject": "Email Subject",
"body_text": "Email Body",
"body_html": (
"<html>\n<head></head>\n"
"<body><p>Email HTML Body</p></body>\n</html>\n"
),
},
{
"tag": "account:email:sent",
"user_id": stub_user.id,
"additional": {
"from_": "noreply@example.com",
"to": "old_email@example.com",
"subject": "Email Subject",
"redact_ip": False,
},
},
)
]
def test_primary_email_change_email_unverified(
self, pyramid_request, pyramid_config, monkeypatch
):
stub_user = pretend.stub(
id="id", email="new_email@example.com", username="username", name=""
)
subject_renderer = pyramid_config.testing_add_renderer(
"email/primary-email-change/subject.txt"
)
subject_renderer.string_response = "Email Subject"
body_renderer = pyramid_config.testing_add_renderer(
"email/primary-email-change/body.txt"
)
body_renderer.string_response = "Email Body"
html_renderer = pyramid_config.testing_add_renderer(
"email/primary-email-change/body.html"
)
html_renderer.string_response = "Email HTML Body"
send_email = pretend.stub(
delay=pretend.call_recorder(lambda *args, **kwargs: None)
)
pyramid_request.task = pretend.call_recorder(lambda *args, **kwargs: send_email)
monkeypatch.setattr(email, "send_email", send_email)
pyramid_request.db = pretend.stub(
query=lambda a: pretend.stub(
filter=lambda *a: pretend.stub(
one=lambda: pretend.stub(user_id=stub_user.id)
)
),
)
pyramid_request.user = stub_user
pyramid_request.registry.settings = {"mail.sender": "noreply@example.com"}
result = email.send_primary_email_change_email(
pyramid_request,
(stub_user, pretend.stub(email="old_email@example.com", verified=False)),
)
assert result == {
"username": stub_user.username,
"old_email": "old_email@example.com",
"new_email": stub_user.email,
}
subject_renderer.assert_()
body_renderer.assert_(username=stub_user.username)
html_renderer.assert_(username=stub_user.username)
assert pyramid_request.task.calls == []
assert send_email.delay.calls == []
| TestPrimaryEmailChangeEmail |
python | readthedocs__readthedocs.org | readthedocs/rtd_tests/tests/test_resolver.py | {
"start": 26357,
"end": 26489
} | class ____(ResolverAltSetUp, ResolverDomainTests):
pass
@override_settings(PUBLIC_DOMAIN="readthedocs.org")
| ResolverDomainTestsAlt |
python | run-llama__llama_index | llama-index-integrations/vector_stores/llama-index-vector-stores-duckdb/llama_index/vector_stores/duckdb/base.py | {
"start": 2104,
"end": 22660
} | class ____(BasePydanticVectorStore):
"""
DuckDB vector store.
In this vector store, embeddings are stored within a DuckDB database.
During query time, the index uses DuckDB to query for the top
k most similar nodes.
Examples:
`pip install llama-index-vector-stores-duckdb`
```python
from llama_index.vector_stores.duckdb import DuckDBVectorStore
# in-memory
vector_store = DuckDBVectorStore()
# persist to disk
vector_store = DuckDBVectorStore("pg.duckdb", persist_dir="./persist/")
```
"""
stores_text: bool = True
flat_metadata: bool = True
database_name: str
table_name: str
# schema_name: Optional[str] # TODO: support schema name
embed_dim: Optional[int]
# hybrid_search: Optional[bool] # TODO: support hybrid search
text_search_config: Optional[dict]
persist_dir: str
_shared_conn: Optional[duckdb.DuckDBPyConnection] = PrivateAttr(default=None)
_thread_local: threading.local = PrivateAttr(default_factory=threading.local)
_is_initialized: bool = PrivateAttr(default=False)
_database_path: Optional[str] = PrivateAttr()
def __init__(
self,
database_name: str = ":memory:",
table_name: str = "documents",
embed_dim: Optional[int] = None,
# https://duckdb.org/docs/extensions/full_text_search
text_search_config: Optional[dict] = None,
persist_dir: str = "./storage",
client: Optional[duckdb.DuckDBPyConnection] = None,
**kwargs: Any, # noqa: ARG002
) -> None:
"""Init params."""
if text_search_config is None:
text_search_config = DEFAULT_TEXT_SEARCH_CONFIG
fields = {
"database_name": database_name,
"table_name": table_name,
"embed_dim": embed_dim,
"text_search_config": text_search_config,
"persist_dir": persist_dir,
}
if client is not None:
self._shared_conn = client
super().__init__(stores_text=True, **fields)
_ = self._initialize_table(self.client, self.table_name, self.embed_dim)
@classmethod
def from_local(
cls,
database_path: str,
table_name: str = "documents",
# schema_name: Optional[str] = "main",
embed_dim: Optional[int] = None,
# hybrid_search: Optional[bool] = False,
text_search_config: Optional[dict] = None,
**kwargs: Any,
) -> "DuckDBVectorStore":
"""Load a DuckDB vector store from a local file."""
db_path = Path(database_path)
return cls(
database_name=db_path.name,
table_name=table_name,
embed_dim=embed_dim,
text_search_config=text_search_config,
persist_dir=str(db_path.parent),
**kwargs,
)
@classmethod
def from_params(
cls,
database_name: str = ":memory:",
table_name: str = "documents",
# schema_name: Optional[str] = "main",
embed_dim: Optional[int] = None,
# hybrid_search: Optional[bool] = False,
text_search_config: Optional[dict] = None,
persist_dir: str = "./storage",
**kwargs: Any,
) -> "DuckDBVectorStore":
return cls(
database_name=database_name,
table_name=table_name,
# schema_name=schema_name,
embed_dim=embed_dim,
# hybrid_search=hybrid_search,
text_search_config=text_search_config,
persist_dir=persist_dir,
**kwargs,
)
@classmethod
def class_name(cls) -> str:
return "DuckDBVectorStore"
@property
def client(self) -> duckdb.DuckDBPyConnection:
"""Return client."""
if self._shared_conn is None:
self._shared_conn = self._connect(self.database_name, self.persist_dir)
if not hasattr(self._thread_local, "conn") or self._thread_local.conn is None:
self._thread_local.conn = self._shared_conn.cursor()
return self._thread_local.conn
@classmethod
def _connect(
cls, database_name: str, persist_dir: str
) -> duckdb.DuckDBPyConnection:
"""Connect to the DuckDB database -- create the data persistence directory if it doesn't exist."""
database_connection = database_name
if database_name != ":memory:":
persist_path = Path(persist_dir)
if not persist_path.exists():
persist_path.mkdir(parents=True, exist_ok=True)
database_connection = str(persist_path / database_name)
return duckdb.connect(database_connection)
@property
def table(self) -> duckdb.DuckDBPyRelation:
"""Return the table for the connection to the DuckDB database."""
return self.client.table(self.table_name)
@classmethod
def _get_embedding_type(cls, embed_dim: Optional[int]) -> str:
return f"FLOAT[{embed_dim}]" if embed_dim is not None else "FLOAT[]"
@classmethod
def _initialize_table(
cls, conn: duckdb.DuckDBPyConnection, table_name: str, embed_dim: Optional[int]
) -> None:
"""Initialize the DuckDB Database, extensions, and documents table."""
home_dir = Path.home()
conn.execute(f"SET home_directory='{home_dir}';")
conn.install_extension("json")
conn.load_extension("json")
conn.install_extension("fts")
conn.load_extension("fts")
embedding_type = cls._get_embedding_type(embed_dim)
conn.begin().execute(f"""
CREATE TABLE IF NOT EXISTS {table_name} (
node_id VARCHAR PRIMARY KEY,
text TEXT,
embedding {embedding_type},
metadata_ JSON
);
""").commit()
table = conn.table(table_name)
required_columns = ["node_id", "text", "embedding", "metadata_"]
table_columns = table.describe().columns
for column in required_columns:
if column not in table_columns:
raise DuckDBTableIncorrectColumnsError(
table_name, required_columns, table_columns
)
def _node_to_arrow_row(self, node: BaseNode) -> dict:
return {
"node_id": node.node_id,
"text": node.get_content(metadata_mode=MetadataMode.NONE),
"embedding": node.get_embedding(),
"metadata_": node_to_metadata_dict(
node, remove_text=True, flat_metadata=self.flat_metadata
),
}
def _arrow_row_to_node(self, row_dict: dict) -> BaseNode:
node = metadata_dict_to_node(
metadata=json.loads(row_dict["metadata_"]), text=row_dict["text"]
)
node.embedding = row_dict["embedding"]
return node
def _arrow_row_to_query_result(self, rows: list[dict]) -> VectorStoreQueryResult:
nodes = []
similarities = []
ids = []
for row in rows:
node = self._arrow_row_to_node(row)
nodes.append(node)
ids.append(row["node_id"])
similarities.append(row["score"])
return VectorStoreQueryResult(nodes=nodes, similarities=similarities, ids=ids)
@override
def query(self, query: VectorStoreQuery, **kwargs: Any) -> VectorStoreQueryResult: # noqa: ARG002
"""Query the vector store for top k most similar nodes."""
filter_expression = self._build_metadata_filter_expressions(
metadata_filters=query.filters
)
inner_query = self.table.select(
StarExpression(),
FunctionExpression(
"array_cosine_similarity"
if self.embed_dim is not None
else "list_cosine_similarity",
ColumnExpression("embedding"),
ConstantExpression(query.query_embedding).cast(
self._get_embedding_type(self.embed_dim)
),
).alias("score"),
).filter(filter_expression)
outer_query = (
inner_query.select(
ColumnExpression("node_id"),
ColumnExpression("text"),
ColumnExpression("embedding"),
ColumnExpression("metadata_"),
ColumnExpression("score"),
)
.filter(
ColumnExpression("score").isnotnull(),
)
.sort(
ColumnExpression("score").desc(),
)
.limit(
query.similarity_top_k,
)
)
command = outer_query.sql_query()
rows = self.client.execute(command).arrow().to_pylist()
return self._arrow_row_to_query_result(rows)
@override
async def aquery(
self, query: VectorStoreQuery, **kwargs: Any
) -> VectorStoreQueryResult: # noqa: ARG002
"""Query the vector store for top k most similar nodes."""
return await asyncio.to_thread(self.query, query, **kwargs)
@override
def add(self, nodes: Sequence[BaseNode], **add_kwargs: Any) -> list[str]: # noqa: ARG002
"""Add nodes to the vector store."""
rows: list[dict[str, Any]] = [self._node_to_arrow_row(node) for node in nodes]
arrow_table = pyarrow.Table.from_pylist(rows)
self.client.from_arrow(arrow_table).insert_into(self.table.alias)
return [node.node_id for node in nodes]
@override
async def async_add(
self, nodes: Sequence[BaseNode], **add_kwargs: Any
) -> list[str]: # noqa: ARG002
"""Add nodes to the vector store."""
return await asyncio.to_thread(self.add, nodes, **add_kwargs)
@override
def get_nodes(
self,
node_ids: Optional[list[str]] = None,
filters: Optional[MetadataFilters] = None,
**get_kwargs: Any,
) -> list[BaseNode]: # noqa: ARG002
"""Get nodes using node_ids and/or filters. If both are provided, both are considered."""
filter_expression = self._build_node_id_metadata_filter_expression(
node_ids=node_ids,
filters=filters,
)
command = self.table.filter(filter_expression).sql_query()
rows = self.client.execute(command).arrow().to_pylist()
return [self._arrow_row_to_node(row) for row in rows]
@override
async def aget_nodes(
self,
node_ids: Optional[list[str]] = None,
filters: Optional[MetadataFilters] = None,
**get_kwargs: Any,
) -> list[BaseNode]: # noqa: ARG002
"""Get nodes using node_ids and/or filters. If both are provided, both are considered."""
return await asyncio.to_thread(self.get_nodes, node_ids, filters, **get_kwargs)
@override
def delete_nodes(
self,
node_ids: Optional[list[str]] = None,
filters: Optional[MetadataFilters] = None,
**delete_kwargs: Any,
) -> None: # noqa: ARG002
"""Delete nodes using node_ids and/or filters. If both are provided, both are considered."""
filter_expression = self._build_node_id_metadata_filter_expression(
node_ids=node_ids,
filters=filters,
)
command = f"DELETE FROM {self.table.alias} WHERE {filter_expression}"
self.client.execute(command)
@override
async def adelete_nodes(
self,
node_ids: Optional[list[str]] = None,
filters: Optional[MetadataFilters] = None,
**delete_kwargs: Any,
) -> None: # noqa: ARG002
"""Delete nodes using node_ids and/or filters. If both are provided, both are considered."""
return await asyncio.to_thread(
self.delete_nodes, node_ids, filters, **delete_kwargs
)
@override
def clear(self, **clear_kwargs: Any) -> None: # noqa: ARG002
"""Clear the vector store."""
command = f"DELETE FROM {self.table.alias}"
self.client.execute(command)
@override
async def aclear(self, **clear_kwargs: Any) -> None: # noqa: ARG002
"""Clear the vector store."""
return await asyncio.to_thread(self.clear, **clear_kwargs)
@override
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None: # noqa: ARG002
"""
Delete nodes using with ref_doc_id.
Args:
ref_doc_id (str): The doc_id of the document to delete.
"""
where_clause = self._build_metadata_filter_expression(
"ref_doc_id", ref_doc_id, FilterOperator.EQ
)
command = f"DELETE FROM {self.table.alias} WHERE {where_clause}"
self.client.execute(command)
@override
async def adelete(self, ref_doc_id: str, **delete_kwargs: Any) -> None: # noqa: ARG002
"""
Delete nodes using with ref_doc_id.
"""
return await asyncio.to_thread(self.delete, ref_doc_id, **delete_kwargs)
def _build_node_id_metadata_filter_expression(
self,
node_ids: Optional[list[str]] = None,
filters: Optional[MetadataFilters] = None,
) -> Expression:
filter_expression = Expression(True)
if filters is not None:
filter_expression = self._build_metadata_filter_expressions(
metadata_filters=filters
)
if node_ids is not None:
node_id_expression = FunctionExpression(
"list_contains",
ConstantExpression(node_ids),
ColumnExpression("node_id"),
)
filter_expression = filter_expression.__and__(node_id_expression)
return filter_expression
def _build_metadata_filter_expression(
self, key: str, value: Any, operator: FilterOperator
) -> Expression:
metadata_column = ColumnExpression(f"metadata_.{key}")
sample_value = value[0] if isinstance(value, list) else value
value_type = filter_value_type_to_duckdb_type.get(type(sample_value))
metadata_type_expression = FunctionExpression(
"json_type",
ColumnExpression("metadata_"),
ConstantExpression(f"$.{key}"),
)
if value_type is None:
# If the value is a JSON Null, we want to swap the 'Null' for an actual null
metadata_column = CaseExpression(
condition=metadata_type_expression.__eq__(ConstantExpression("NULL")),
value=ConstantExpression(None),
).otherwise(metadata_column)
if value_type == VARCHAR:
# If the value is a string, it means the column is a JSON string
# and so we need to unpack it otherwise we'll get back a JSON string (a string wrapped in quotes)
# https://github.com/duckdb/duckdb/issues/17681
metadata_column = FunctionExpression(
"json_extract_string",
ColumnExpression("metadata_"),
ConstantExpression(f"$.{key}"),
)
metadata_value = ConstantExpression(value)
return self._build_filter_expression(metadata_column, metadata_value, operator)
def _build_filter_expression(
self, column: Expression, value: Expression, operator: FilterOperator
) -> Expression:
"""
Build a filter expression for a given column and value.
Args:
column: The key in the document to use in the filter.
value: The value to use in the filter.
operator: The filter operator to use.
"""
if operator_func := li_filter_to_py_operator.get(operator):
# We have a straightforward operator, and DuckDB can handle just take the Python operator
# i.e. FilterOperator.EQ -> `==` (operator.eq)
# i.e. FilterOperator.GTE -> `>=` (operator.ge)
# ...
return operator_func(column, value)
if operator == FilterOperator.IN:
# Given a list of values, check to see if the document's value is in the list
return FunctionExpression(
"list_contains", # list_contains(list_to_look_in, element_to_find)
value,
column,
)
if operator == FilterOperator.NIN:
# Given a list of values, check to see if the document's value is not in the list
return FunctionExpression(
"list_contains", # list_contains(list_to_look_in, element_to_find)
value,
column,
).__eq__(ConstantExpression(False))
if operator == FilterOperator.CONTAINS:
# filter_value is in the document value
# This will never be true so long as the DuckDB vector store
# requires flat metadata
return Expression(False)
# return FunctionExpression(
# "list_contains", # list_contains(list_to_look_in, element_to_find)
# value,
# column,
# )
if operator == FilterOperator.ANY:
# Check if the intersection of the two lists has at least one element
return FunctionExpression(
"list_has_any",
column,
value,
)
if operator == FilterOperator.ALL:
# Check if all of the provided values are in the document's value
return FunctionExpression(
"list_has_all", # list_has_all(list, sub-list)
column,
value,
)
if operator == FilterOperator.TEXT_MATCH:
return FunctionExpression(
"contains",
column,
value,
)
if operator == FilterOperator.TEXT_MATCH_INSENSITIVE:
return FunctionExpression(
"contains",
FunctionExpression(
"lower",
column,
),
FunctionExpression(
"lower",
value,
),
)
if operator == FilterOperator.IS_EMPTY:
# column is null or the array is empty
return column.isnull().__or__(
CaseExpression(
condition=FunctionExpression("typeof", column).__eq__(
ConstantExpression("ARRAY")
),
value=FunctionExpression("length", column).__eq__(
ConstantExpression(0)
),
)
)
raise NotImplementedError(f"Unsupported operator: {operator}")
def _build_metadata_filter_expressions(
self, metadata_filters: Optional[MetadataFilters] = None
) -> Expression:
expressions: list[Expression] = []
if metadata_filters is None or len(metadata_filters.filters) == 0:
return Expression(True)
for metadata_filter in metadata_filters.filters:
if isinstance(metadata_filter, MetadataFilter):
expressions.append(
self._build_metadata_filter_expression(
metadata_filter.key,
metadata_filter.value,
metadata_filter.operator,
)
)
elif isinstance(metadata_filter, MetadataFilters):
expressions.append(
self._build_metadata_filter_expressions(metadata_filter)
)
else:
raise NotImplementedError(
f"Unsupported metadata filter: {metadata_filter}"
)
final_expression: Expression = expressions[0]
for expression in expressions[1:]:
# We will do an implicit AND for NOT conditions
if metadata_filters.condition in [FilterCondition.AND, FilterCondition.NOT]:
final_expression = final_expression.__and__(expression)
continue
if metadata_filters.condition == FilterCondition.OR:
final_expression = final_expression.__or__(expression)
continue
raise NotImplementedError(
f"Unsupported condition: {metadata_filters.condition}"
)
if metadata_filters.condition == FilterCondition.NOT:
final_expression = final_expression.__invert__()
return final_expression
| DuckDBVectorStore |
python | pennersr__django-allauth | allauth/socialaccount/providers/openid/provider.py | {
"start": 720,
"end": 2966
} | class ____(Provider):
id = "openid"
name = "OpenID"
account_class = OpenIDAccount
uses_apps = False
def get_login_url(self, request, **kwargs):
url = reverse("openid_login")
if kwargs:
url += "?" + urlencode(kwargs)
return url
def get_brands(self):
default_servers = []
return self.get_settings().get("SERVERS", default_servers)
def get_server_settings(self, endpoint):
servers = self.get_settings().get("SERVERS", [])
for server in servers:
if endpoint is not None and endpoint.startswith(server.get("openid_url")):
return server
return {}
def extract_extra_data(self, response):
extra_data = {}
server_settings = self.get_server_settings(response.endpoint.server_url)
extra_attributes = server_settings.get("extra_attributes", [])
for attribute_id, name, _ in extra_attributes:
extra_data[attribute_id] = get_value_from_response(
response, ax_names=[name]
)
return extra_data
def extract_uid(self, response):
return response.identity_url
def extract_common_fields(self, response):
first_name = (
get_value_from_response(
response,
ax_names=[
AXAttribute.PERSON_FIRST_NAME,
OldAXAttribute.PERSON_FIRST_NAME,
],
)
or ""
)
last_name = (
get_value_from_response(
response,
ax_names=[
AXAttribute.PERSON_LAST_NAME,
OldAXAttribute.PERSON_LAST_NAME,
],
)
or ""
)
name = (
get_value_from_response(
response,
sreg_names=[SRegField.NAME],
ax_names=[AXAttribute.PERSON_NAME, OldAXAttribute.PERSON_NAME],
)
or ""
)
return dict(
email=get_email_from_response(response),
first_name=first_name,
last_name=last_name,
name=name,
)
provider_classes = [OpenIDProvider]
| OpenIDProvider |
python | getsentry__sentry | src/sentry/analytics/events/first_event_sent.py | {
"start": 445,
"end": 671
} | class ____(FirstEventSentEvent):
sdk_name: str | None = None
# first error with minified stack trace for a project
@analytics.eventclass("first_event_with_minified_stack_trace_for_project.sent")
| FirstEventSentForProjectEvent |
python | allegroai__clearml | clearml/backend_api/services/v2_23/queues.py | {
"start": 75943,
"end": 77250
} | class ____(Response):
"""
Response of queues.move_task_backward endpoint.
:param position: The new position of the task entry in the queue (index, -1
represents bottom of queue)
:type position: int
"""
_service = "queues"
_action = "move_task_backward"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"position": {
"description": "The new position of the task entry in the queue (index, -1 represents bottom of queue)",
"type": ["integer", "null"],
}
},
"type": "object",
}
def __init__(self, position: Optional[int] = None, **kwargs: Any) -> None:
super(MoveTaskBackwardResponse, self).__init__(**kwargs)
self.position = position
@schema_property("position")
def position(self) -> Optional[int]:
return self._property_position
@position.setter
def position(self, value: Optional[int]) -> None:
if value is None:
self._property_position = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "position", six.integer_types)
self._property_position = value
| MoveTaskBackwardResponse |
python | chroma-core__chroma | chromadb/utils/embedding_functions/open_clip_embedding_function.py | {
"start": 395,
"end": 6070
} | class ____(EmbeddingFunction[Embeddable]):
"""
This class is used to generate embeddings for a list of texts or images using the Open CLIP model.
"""
def __init__(
self,
model_name: str = "ViT-B-32",
checkpoint: str = "laion2b_s34b_b79k",
device: Optional[str] = "cpu",
) -> None:
"""
Initialize the OpenCLIPEmbeddingFunction.
Args:
model_name (str, optional): The name of the model to use for embeddings.
Defaults to "ViT-B-32".
checkpoint (str, optional): The checkpoint to use for the model.
Defaults to "laion2b_s34b_b79k".
device (str, optional): The device to use for computation.
Defaults to "cpu".
"""
try:
import open_clip
except ImportError:
raise ValueError(
"The open_clip python package is not installed. Please install it with `pip install open-clip-torch`. https://github.com/mlfoundations/open_clip"
)
try:
self._torch = importlib.import_module("torch")
except ImportError:
raise ValueError(
"The torch python package is not installed. Please install it with `pip install torch`"
)
try:
self._PILImage = importlib.import_module("PIL.Image")
except ImportError:
raise ValueError(
"The PIL python package is not installed. Please install it with `pip install pillow`"
)
self.model_name = model_name
self.checkpoint = checkpoint
self.device = device
model, _, preprocess = open_clip.create_model_and_transforms(
model_name=model_name, pretrained=checkpoint
)
self._model = model
self._model.to(device)
self._preprocess = preprocess
self._tokenizer = open_clip.get_tokenizer(model_name=model_name)
def _encode_image(self, image: Image) -> Embedding:
"""
Encode an image using the Open CLIP model.
Args:
image: The image to encode.
Returns:
The embedding for the image.
"""
pil_image = self._PILImage.fromarray(image)
with self._torch.no_grad():
image_features = self._model.encode_image(
self._preprocess(pil_image).unsqueeze(0).to(self.device)
)
image_features /= image_features.norm(dim=-1, keepdim=True)
return cast(Embedding, image_features.squeeze().cpu().numpy())
def _encode_text(self, text: Document) -> Embedding:
"""
Encode a text using the Open CLIP model.
Args:
text: The text to encode.
Returns:
The embedding for the text.
"""
with self._torch.no_grad():
text_features = self._model.encode_text(
self._tokenizer(text).to(self.device)
)
text_features /= text_features.norm(dim=-1, keepdim=True)
return cast(Embedding, text_features.squeeze().cpu().numpy())
def __call__(self, input: Embeddable) -> Embeddings:
"""
Generate embeddings for the given documents or images.
Args:
input: Documents or images to generate embeddings for.
Returns:
Embeddings for the documents or images.
"""
embeddings: Embeddings = []
for item in input:
if is_image(item):
embeddings.append(
np.array(self._encode_image(cast(Image, item)), dtype=np.float32)
)
elif is_document(item):
embeddings.append(
np.array(self._encode_text(cast(Document, item)), dtype=np.float32)
)
return embeddings
@staticmethod
def name() -> str:
return "open_clip"
def default_space(self) -> Space:
return "cosine"
def supported_spaces(self) -> List[Space]:
return ["cosine", "l2", "ip"]
@staticmethod
def build_from_config(
config: Dict[str, Any]
) -> "EmbeddingFunction[Union[Documents, Images]]":
model_name = config.get("model_name")
checkpoint = config.get("checkpoint")
device = config.get("device")
if model_name is None or checkpoint is None or device is None:
assert False, "This code should not be reached"
return OpenCLIPEmbeddingFunction(
model_name=model_name, checkpoint=checkpoint, device=device
)
def get_config(self) -> Dict[str, Any]:
return {
"model_name": self.model_name,
"checkpoint": self.checkpoint,
"device": self.device,
}
def validate_config_update(
self, old_config: Dict[str, Any], new_config: Dict[str, Any]
) -> None:
if "model_name" in new_config:
raise ValueError(
"The model name cannot be changed after the embedding function has been initialized."
)
if "checkpoint" in new_config:
raise ValueError(
"The checkpoint cannot be changed after the embedding function has been initialized."
)
@staticmethod
def validate_config(config: Dict[str, Any]) -> None:
"""
Validate the configuration using the JSON schema.
Args:
config: Configuration to validate
Raises:
ValidationError: If the configuration does not match the schema
"""
validate_config_schema(config, "open_clip")
| OpenCLIPEmbeddingFunction |
python | tensorflow__tensorflow | tensorflow/python/tpu/tests/tpu_embedding_v2_mp_strategy_test.py | {
"start": 1683,
"end": 6957
} | class ____(
tpu_embedding_base_test.TPUEmbeddingBaseTest
):
def setUp(self):
super().setUp()
self._num_replicas = 1
self._num_cores_per_replica = 2
def _get_strategy(self) -> tpu_strategy.TPUStrategy:
topology = self._init_tpu_system()
d_assign = device_lib.device_assignment(
topology,
computation_shape=[1, 1, 1, 2],
num_replicas=1,
)
self.strategy = tpu_strategy.TPUStrategyV2(
self.resolver,
experimental_device_assignment=d_assign,
experimental_spmd_xla_partitioning=True,
)
self.embedding_devices = sum(
(list(replica) for replica in self.strategy.extended._tpu_devices), []
)
return self.strategy
def enqueue(self, inp, mid_level_api, use_device, training):
if use_device:
for emb, device in zip(inp, self.embedding_devices):
mid_level_api.enqueue(emb, device=device, training=training)
else:
mid_level_api.enqueue(inp[0], training=training)
@parameterized.parameters(False, True)
def test_spmd_training(self, use_device):
num_steps = 10
num_steps_float = float(num_steps)
starting_lr = 1.0
ending_lr = 0.5
strategy = self._get_strategy()
# Create model with Keras.
with strategy.scope():
step_counter = tf_variables.Variable(0.0, dtypes.float32)
def lr_function():
return gen_math_ops.maximum(
ending_lr,
starting_lr
+ ((ending_lr - starting_lr) * step_counter) / num_steps_float,
)
optimizer = tpu_embedding_v2_utils.SGD(learning_rate=lr_function)
table_config = tpu_embedding_v2_utils.TableConfig(
vocabulary_size=2,
dim=4,
initializer=init_ops_v2.Constant(np.zeros((2, 4))),
combiner='sum',
name='table',
)
mid_level_api = tpu_embedding_v2.TPUEmbedding(
feature_config={
'feature': tpu_embedding_v2_utils.FeatureConfig(
table=table_config, name='feature'
)
},
optimizer=optimizer,
)
def input_fn(ctx):
del ctx
feature = {
'feature': constant_op.constant(
[0, 1], shape=(2, 1), dtype=dtypes.int32
)
}
return dataset_ops.DatasetV2.from_tensors(feature).repeat()
def create_datasets():
"""Creates either a per-replica dataset, or multiple per-devices ones.
This function explicitly creates per-device datasets because the strategy
does not produce a distributed dataset in the model-parallel case; there
is only one replica. Without this consideration, the embeddings would be
read as [0, 0] instead of the expected [0, 1] since all the devices would
receive the same value.
Returns:
A list of one or more dataset(s).
"""
if use_device:
datasets = []
for i in range(len(self.embedding_devices)):
datasets.append(
dataset_ops.DatasetV2.from_tensor_slices(
{'feature': [[[i % self._num_cores_per_replica]]]}
).repeat()
)
return datasets
else:
dataset = strategy.distribute_datasets_from_function(
input_fn,
options=distribute_lib.InputOptions(
experimental_fetch_to_device=False
),
)
return [dataset]
datasets = create_datasets()
iterators = [iter(ds) for ds in datasets]
@def_function.function(jit_compile=True)
def test_fn():
def step():
with backprop.GradientTape() as tape:
activations = mid_level_api.dequeue()
tape.watch(activations)
result = math_ops.reduce_sum(activations['feature'])
loss = result / self._num_replicas
grads = tape.gradient(loss, activations)
mid_level_api.apply_gradients(grads)
return activations
inp = [next(it) for it in iterators]
self.enqueue(inp, mid_level_api, use_device, training=True)
return strategy.run(step)
# Run model.
results = []
for _ in range(num_steps):
result = test_fn()
results.append(self._unpack(strategy, result['feature']))
step_counter.assign_add(1.0)
# Table is 2 elements wide, per-replica batch size of 1, with id 0.
# Loss for the gradient is the sum of the entries divided by the number of
# replicas. Thus the per replica gradient is 1/#of replicas for row 0 and no
# other updates. The reduced gradient is therefore 1.
# Learning rate schedule over num_steps steps:
# 1.0 0.95 0.9 0.85 0.8 ...
# Since use SGD and the gradient is one, the first row of the table is
# [0, 0] [-1.0, -1.0] [-1.95, -1.95] [-2.85, -2.85] ... (the negative
# partial sums of the above).
learning_rates = [starting_lr - (starting_lr - ending_lr) / num_steps * j
for j in range(num_steps)]
cumsum = [sum(learning_rates[0:j]) for j in range(num_steps)]
goldens = [[[-cumsum[i]] * table_config.dim] * self._num_cores_per_replica
for i in range(10)]
self.assertAllClose(results, goldens)
if __name__ == '__main__':
v2_compat.enable_v2_behavior()
test.main()
| TPUEmbeddingTPUStrategyV2Test |
python | spack__spack | lib/spack/spack/vendor/attr/exceptions.py | {
"start": 661,
"end": 834
} | class ____(ValueError):
"""
An ``attrs`` function couldn't find an attribute that the user asked for.
.. versionadded:: 16.2.0
"""
| AttrsAttributeNotFoundError |
python | kamyu104__LeetCode-Solutions | Python/next-greater-node-in-linked-list.py | {
"start": 165,
"end": 579
} | class ____(object):
def nextLargerNodes(self, head):
"""
:type head: ListNode
:rtype: List[int]
"""
result, stk = [], []
while head:
while stk and stk[-1][1] < head.val:
result[stk.pop()[0]] = head.val
stk.append([len(result), head.val])
result.append(0)
head = head.next
return result
| Solution |
python | pydantic__pydantic | pydantic/types.py | {
"start": 35822,
"end": 36082
} | class ____(BaseModel):
uuid4: UUID4
Model(uuid4=uuid.uuid4())
```
"""
UUID5 = Annotated[UUID, UuidVersion(5)]
"""A [UUID](https://docs.python.org/3/library/uuid.html) that must be version 5.
```python
import uuid
from pydantic import UUID5, BaseModel
| Model |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql_tests/graphql/test_permissions.py | {
"start": 2323,
"end": 2501
} | class ____:
@check_permission(Permissions.LAUNCH_PIPELINE_REEXECUTION)
def mutate(self, graphene_info: ResolveInfo, **_kwargs):
pass
| FakeOtherEnumPermissionMutation |
python | ray-project__ray | python/ray/dag/tests/experimental/test_execution_schedule.py | {
"start": 2733,
"end": 16147
} | class ____:
"""
Test whether `_select_next_nodes` function selects the next nodes for
topological sort to generate execution schedule correctly.
task_idx: Each DAG node has a unique global index.
exec_task_idx: The DAG node's index in the actor's `executable_tasks` list.
"""
def test_two_candidates_on_same_actor(self, monkeypatch):
"""
Simulate the case where there are two candidates on the same actor.
The candidate with the smaller index in the `executable_tasks` list
should be selected.
driver -> fake_actor.op -> fake_actor.op -> driver
In the example above, both READ operations on the fake_actor have zero
in-degree. The operation with the smaller index in the executable_tasks
list should be selected first; therefore, the one on the left side will
be selected first.
"""
monkeypatch.setattr(ActorHandle, "__init__", mock_actor_handle_init)
fake_actor = ActorHandle("fake_actor")
# The DAG node has a global index of 1, and its index in the
# actor's `executable_tasks` list is 0.
task_idx_1 = 1
dag_node_1 = _DAGOperationGraphNode(
_DAGNodeOperation(0, _DAGNodeOperationType.READ),
task_idx_1,
fake_actor,
False,
)
# The DAG node has a global index of 2, and its index in the
# actor's `executable_tasks` list is 1.
task_idx_2 = 2
dag_node_2 = _DAGOperationGraphNode(
_DAGNodeOperation(1, _DAGNodeOperationType.READ),
task_idx_2,
fake_actor,
False,
)
mock_actor_to_candidates = {
fake_actor: [
dag_node_1,
dag_node_2,
],
}
next_nodes = _select_next_nodes(mock_actor_to_candidates, None)
assert len(next_nodes) == 1
assert next_nodes[0] == dag_node_1
def test_only_one_nccl_write(self, monkeypatch):
"""
Simulate the case where there is only one candidate which is a NCCL
WRITE operation. In this case, `_select_next_nodes` should return both
the NCCL WRITE operation and the corresponding READ operation.
driver -> fake_actor_1.op -> fake_actor_2.op -> driver
In the example above, communication between fake_actor_1 and fake_actor_2
is done using NCCL. The following test case simulates a scenario where the
READ and COMPUTE operations on fake_actor_1 have already been added to the
execution schedule.
"""
monkeypatch.setattr(ActorHandle, "__init__", mock_actor_handle_init)
fake_actor_1, task_idx_1, exec_task_idx_1 = ActorHandle("fake_actor_1"), 1, 0
fake_actor_2, task_idx_2, exec_task_idx_2 = ActorHandle("fake_actor_2"), 2, 0
mock_graph = {
task_idx_1: generate_dag_graph_nodes(
exec_task_idx_1,
task_idx_1,
fake_actor_1,
requires_nccl_write=True,
),
task_idx_2: generate_dag_graph_nodes(
exec_task_idx_2,
task_idx_2,
fake_actor_2,
requires_nccl_read=True,
),
}
del mock_graph[task_idx_1][_DAGNodeOperationType.READ]
del mock_graph[task_idx_1][_DAGNodeOperationType.COMPUTE]
_add_edge(
mock_graph[task_idx_1][_DAGNodeOperationType.WRITE],
mock_graph[task_idx_2][_DAGNodeOperationType.READ],
)
_add_edge(
mock_graph[task_idx_2][_DAGNodeOperationType.READ],
mock_graph[task_idx_2][_DAGNodeOperationType.COMPUTE],
)
_add_edge(
mock_graph[task_idx_2][_DAGNodeOperationType.COMPUTE],
mock_graph[task_idx_2][_DAGNodeOperationType.WRITE],
)
set_sync_idxs_p2p(mock_graph, task_idx_1, task_idx_2)
mock_actor_to_candidates = {
fake_actor_1: [mock_graph[task_idx_1][_DAGNodeOperationType.WRITE]],
fake_actor_2: [mock_graph[task_idx_2][_DAGNodeOperationType.READ]],
}
next_nodes = _select_next_nodes(mock_actor_to_candidates, mock_graph)
assert next_nodes == [
mock_graph[task_idx_1][_DAGNodeOperationType.WRITE],
mock_graph[task_idx_2][_DAGNodeOperationType.READ],
]
def test_two_nccl_writes(self, monkeypatch):
"""
Simulate a scenario where there are two candidates that are NCCL WRITE
operations. In this case, _select_next_nodes can choose either of the
two NCCL WRITE operations and their corresponding READ operations.
driver -> fake_actor_1.op -> fake_actor_2.op -> driver
| |
-> fake_actor_2.op -> fake_actor_1.op -
In the example above, communication between fake_actor_1 and fake_actor_2 is
done using NCCL. The following test case simulates a scenario where the READ
and COMPUTE operations on both the DAG nodes with smaller bind_index on
fake_actor_1 and fake_actor_2 have already been added to the execution schedule.
"""
monkeypatch.setattr(ActorHandle, "__init__", mock_actor_handle_init)
fake_actor_1 = ActorHandle("fake_actor_1")
task_idx_1_0, exec_task_idx_1_0 = 1, 0
task_idx_1_1, exec_task_idx_1_1 = 3, 1
fake_actor_2 = ActorHandle("fake_actor_2")
task_idx_2_0, exec_task_idx_2_0 = 2, 0
task_idx_2_1, exec_task_idx_2_1 = 4, 1
# Run the test 10 times to ensure that the result of `_select_next_nodes`
# is deterministic.
for _ in range(20):
mock_graph = {
task_idx_1_0: generate_dag_graph_nodes(
exec_task_idx_1_0,
task_idx_1_0,
fake_actor_1,
requires_nccl_write=True,
),
task_idx_1_1: generate_dag_graph_nodes(
exec_task_idx_1_1,
task_idx_1_1,
fake_actor_1,
requires_nccl_read=True,
),
task_idx_2_0: generate_dag_graph_nodes(
exec_task_idx_2_0,
task_idx_2_0,
fake_actor_2,
requires_nccl_write=True,
),
task_idx_2_1: generate_dag_graph_nodes(
exec_task_idx_2_1,
task_idx_2_1,
fake_actor_2,
requires_nccl_read=True,
),
}
del mock_graph[task_idx_1_0][_DAGNodeOperationType.READ]
del mock_graph[task_idx_1_0][_DAGNodeOperationType.COMPUTE]
del mock_graph[task_idx_2_0][_DAGNodeOperationType.READ]
del mock_graph[task_idx_2_0][_DAGNodeOperationType.COMPUTE]
_add_edge(
mock_graph[task_idx_1_0][_DAGNodeOperationType.WRITE],
mock_graph[task_idx_2_1][_DAGNodeOperationType.READ],
)
_add_edge(
mock_graph[task_idx_2_0][_DAGNodeOperationType.WRITE],
mock_graph[task_idx_1_1][_DAGNodeOperationType.READ],
)
_add_edge(
mock_graph[task_idx_2_1][_DAGNodeOperationType.READ],
mock_graph[task_idx_2_1][_DAGNodeOperationType.COMPUTE],
)
_add_edge(
mock_graph[task_idx_2_1][_DAGNodeOperationType.COMPUTE],
mock_graph[task_idx_2_1][_DAGNodeOperationType.WRITE],
)
_add_edge(
mock_graph[task_idx_1_1][_DAGNodeOperationType.READ],
mock_graph[task_idx_1_1][_DAGNodeOperationType.COMPUTE],
)
_add_edge(
mock_graph[task_idx_1_1][_DAGNodeOperationType.COMPUTE],
mock_graph[task_idx_1_1][_DAGNodeOperationType.WRITE],
)
set_sync_idxs_p2p(mock_graph, task_idx_1_0, task_idx_2_1)
set_sync_idxs_p2p(mock_graph, task_idx_2_0, task_idx_1_1)
mock_actor_to_candidates = {
fake_actor_1: [
mock_graph[task_idx_1_0][_DAGNodeOperationType.WRITE],
mock_graph[task_idx_1_1][_DAGNodeOperationType.READ],
],
fake_actor_2: [
mock_graph[task_idx_2_0][_DAGNodeOperationType.WRITE],
mock_graph[task_idx_2_1][_DAGNodeOperationType.READ],
],
}
next_nodes = _select_next_nodes(mock_actor_to_candidates, mock_graph)
assert next_nodes == [
mock_graph[task_idx_1_0][_DAGNodeOperationType.WRITE],
mock_graph[task_idx_2_1][_DAGNodeOperationType.READ],
]
def test_only_one_nccl_collective(self, monkeypatch):
"""
Simulate the case where there is only one candidate which is a NCCL
collective operation. In this case, `_select_next_nodes` should return
all the NCCL collective nodes.
driver -> fake_actor_1.allreduce_1 -> driver
| |
-> fake_actor_2.allreduce_1 ->
"""
monkeypatch.setattr(ActorHandle, "__init__", mock_actor_handle_init)
fake_actor_1, dag_idx_1, local_idx_1 = ActorHandle("fake_actor_1"), 1, 0
fake_actor_2, dag_idx_2, local_idx_2 = ActorHandle("fake_actor_2"), 2, 0
mock_graph = {
dag_idx_1: generate_dag_graph_nodes(
local_idx_1,
dag_idx_1,
fake_actor_1,
requires_nccl_compute=True,
),
dag_idx_2: generate_dag_graph_nodes(
local_idx_2,
dag_idx_2,
fake_actor_2,
requires_nccl_compute=True,
),
}
set_sync_idxs_collective(mock_graph, [dag_idx_1, dag_idx_2])
mock_actor_to_candidates = {
fake_actor_1: [mock_graph[dag_idx_1][_DAGNodeOperationType.COMPUTE]],
fake_actor_2: [mock_graph[dag_idx_2][_DAGNodeOperationType.COMPUTE]],
}
next_nodes = _select_next_nodes(mock_actor_to_candidates, mock_graph)
assert set(next_nodes) == {
mock_graph[dag_idx_1][_DAGNodeOperationType.COMPUTE],
mock_graph[dag_idx_2][_DAGNodeOperationType.COMPUTE],
}
def test_two_nccl_collectives(self, monkeypatch):
"""
Simulate the case where there are two candidates that are NCCL collective
operations. In this case, `_select_next_nodes` should return all the NCCL
collective nodes that are bond earlier.
driver -> fake_actor_1.allreduce_1 -> driver
| |
-> fake_actor_2.allreduce_1 ->
| |
-> fake_actor_3.allreduce_2 ->
| |
-> fake_actor_4.allreduce_2 ->
"""
monkeypatch.setattr(ActorHandle, "__init__", mock_actor_handle_init)
fake_actor_1, dag_idx_1, local_idx_1 = ActorHandle("fake_actor_1"), 1, 0
fake_actor_2, dag_idx_2, local_idx_2 = ActorHandle("fake_actor_2"), 2, 0
fake_actor_3, dag_idx_3, local_idx_3 = ActorHandle("fake_actor_3"), 3, 0
fake_actor_4, dag_idx_4, local_idx_4 = ActorHandle("fake_actor_4"), 4, 0
mock_graph = {
dag_idx_1: generate_dag_graph_nodes(
local_idx_1,
dag_idx_1,
fake_actor_1,
requires_nccl_compute=True,
),
dag_idx_2: generate_dag_graph_nodes(
local_idx_2,
dag_idx_2,
fake_actor_2,
requires_nccl_compute=True,
),
dag_idx_3: generate_dag_graph_nodes(
local_idx_3,
dag_idx_3,
fake_actor_3,
requires_nccl_compute=True,
),
dag_idx_4: generate_dag_graph_nodes(
local_idx_4,
dag_idx_4,
fake_actor_4,
requires_nccl_compute=True,
),
}
set_sync_idxs_collective(mock_graph, [dag_idx_1, dag_idx_2])
set_sync_idxs_collective(mock_graph, [dag_idx_3, dag_idx_4])
mock_actor_to_candidates = {
fake_actor_1: [mock_graph[dag_idx_1][_DAGNodeOperationType.COMPUTE]],
fake_actor_2: [mock_graph[dag_idx_2][_DAGNodeOperationType.COMPUTE]],
fake_actor_3: [mock_graph[dag_idx_3][_DAGNodeOperationType.COMPUTE]],
fake_actor_4: [mock_graph[dag_idx_4][_DAGNodeOperationType.COMPUTE]],
}
next_nodes = _select_next_nodes(mock_actor_to_candidates, mock_graph)
assert set(next_nodes) == {
mock_graph[dag_idx_1][_DAGNodeOperationType.COMPUTE],
mock_graph[dag_idx_2][_DAGNodeOperationType.COMPUTE],
}
next_nodes = _select_next_nodes(mock_actor_to_candidates, mock_graph)
assert set(next_nodes) == {
mock_graph[dag_idx_3][_DAGNodeOperationType.COMPUTE],
mock_graph[dag_idx_4][_DAGNodeOperationType.COMPUTE],
}
| TestSelectNextNodes |
python | PyCQA__pylint | tests/functional/i/invalid/invalid_unary_operand_type.py | {
"start": 1595,
"end": 1765
} | class ____:
def __init__(self):
"""https://github.com/pylint-dev/pylint/issues/8554"""
if not isinstance(super(), float):
pass
| NoArgumentSuper |
python | ansible__ansible | lib/ansible/parsing/vault/__init__.py | {
"start": 44911,
"end": 50844
} | class ____:
"""
Vault implementation using AES-CTR with an HMAC-SHA256 authentication code.
Keys are derived using PBKDF2
"""
# http://www.daemonology.net/blog/2009-06-11-cryptographic-right-answers.html
# Note: strings in this class should be byte strings by default.
def __init__(self):
if not HAS_CRYPTOGRAPHY:
raise AnsibleError(NEED_CRYPTO_LIBRARY)
@staticmethod
def _create_key_cryptography(b_password, b_salt, key_length, iv_length):
kdf = PBKDF2HMAC(
algorithm=hashes.SHA256(),
length=2 * key_length + iv_length,
salt=b_salt,
iterations=10000,
backend=CRYPTOGRAPHY_BACKEND)
b_derivedkey = kdf.derive(b_password)
return b_derivedkey
@classmethod
@functools.cache # Concurrent first-use by multiple threads will all execute the method body.
def _gen_key_initctr(cls, b_password, b_salt):
# 16 for AES 128, 32 for AES256
key_length = 32
if HAS_CRYPTOGRAPHY:
# AES is a 128-bit block cipher, so IVs and counter nonces are 16 bytes
iv_length = algorithms.AES.block_size // 8
b_derivedkey = cls._create_key_cryptography(b_password, b_salt, key_length, iv_length)
b_iv = b_derivedkey[(key_length * 2):(key_length * 2) + iv_length]
else:
raise AnsibleError(NEED_CRYPTO_LIBRARY + '(Detected in initctr)')
b_key1 = b_derivedkey[:key_length]
b_key2 = b_derivedkey[key_length:(key_length * 2)]
return b_key1, b_key2, b_iv
@staticmethod
def _encrypt_cryptography(b_plaintext, b_key1, b_key2, b_iv):
cipher = C_Cipher(algorithms.AES(b_key1), modes.CTR(b_iv), CRYPTOGRAPHY_BACKEND)
encryptor = cipher.encryptor()
padder = padding.PKCS7(algorithms.AES.block_size).padder()
b_ciphertext = encryptor.update(padder.update(b_plaintext) + padder.finalize())
b_ciphertext += encryptor.finalize()
# COMBINE SALT, DIGEST AND DATA
hmac = HMAC(b_key2, hashes.SHA256(), CRYPTOGRAPHY_BACKEND)
hmac.update(b_ciphertext)
b_hmac = hmac.finalize()
return to_bytes(hexlify(b_hmac), errors='surrogate_or_strict'), hexlify(b_ciphertext)
@classmethod
def _get_salt(cls):
custom_salt = C.config.get_config_value('VAULT_ENCRYPT_SALT')
if not custom_salt:
custom_salt = os.urandom(32)
return to_bytes(custom_salt)
@classmethod
def encrypt(cls, b_plaintext, secret, salt=None):
if secret is None:
raise AnsibleVaultError('The secret passed to encrypt() was None')
if salt is None:
b_salt = cls._get_salt()
elif not salt:
raise AnsibleVaultError('Empty or invalid salt passed to encrypt()')
else:
b_salt = to_bytes(salt)
b_password = secret.bytes
b_key1, b_key2, b_iv = cls._gen_key_initctr(b_password, b_salt)
if HAS_CRYPTOGRAPHY:
b_hmac, b_ciphertext = cls._encrypt_cryptography(b_plaintext, b_key1, b_key2, b_iv)
else:
raise AnsibleError(NEED_CRYPTO_LIBRARY + '(Detected in encrypt)')
b_vaulttext = b'\n'.join([hexlify(b_salt), b_hmac, b_ciphertext])
# Unnecessary but getting rid of it is a backwards incompatible vault
# format change
b_vaulttext = hexlify(b_vaulttext)
return b_vaulttext
@classmethod
def _decrypt_cryptography(cls, b_ciphertext, b_crypted_hmac, b_key1, b_key2, b_iv):
# b_key1, b_key2, b_iv = self._gen_key_initctr(b_password, b_salt)
# EXIT EARLY IF DIGEST DOESN'T MATCH
hmac = HMAC(b_key2, hashes.SHA256(), CRYPTOGRAPHY_BACKEND)
hmac.update(b_ciphertext)
try:
hmac.verify(_unhexlify(b_crypted_hmac))
except InvalidSignature as e:
raise AnsibleVaultError('HMAC verification failed: %s' % e)
cipher = C_Cipher(algorithms.AES(b_key1), modes.CTR(b_iv), CRYPTOGRAPHY_BACKEND)
decryptor = cipher.decryptor()
unpadder = padding.PKCS7(128).unpadder()
b_plaintext = unpadder.update(
decryptor.update(b_ciphertext) + decryptor.finalize()
) + unpadder.finalize()
return b_plaintext
@staticmethod
def _is_equal(b_a, b_b):
"""
Comparing 2 byte arrays in constant time to avoid timing attacks.
It would be nice if there were a library for this but hey.
"""
if not (isinstance(b_a, bytes) and isinstance(b_b, bytes)):
raise TypeError('_is_equal can only be used to compare two byte strings')
# http://codahale.com/a-lesson-in-timing-attacks/
if len(b_a) != len(b_b):
return False
result = 0
for b_x, b_y in zip(b_a, b_b):
result |= b_x ^ b_y
return result == 0
@classmethod
def decrypt(cls, b_vaulttext, secret):
b_ciphertext, b_salt, b_crypted_hmac = parse_vaulttext(b_vaulttext)
# TODO: would be nice if a VaultSecret could be passed directly to _decrypt_*
# (move _gen_key_initctr() to a AES256 VaultSecret or VaultContext impl?)
# though, likely needs to be python cryptography specific impl that basically
# creates a Cipher() with b_key1, a Mode.CTR() with b_iv, and a HMAC() with sign key b_key2
b_password = secret.bytes
b_key1, b_key2, b_iv = cls._gen_key_initctr(b_password, b_salt)
if HAS_CRYPTOGRAPHY:
b_plaintext = cls._decrypt_cryptography(b_ciphertext, b_crypted_hmac, b_key1, b_key2, b_iv)
else:
raise AnsibleError(NEED_CRYPTO_LIBRARY + '(Detected in decrypt)')
return b_plaintext
# Keys could be made bytes later if the code that gets the data is more
# naturally byte-oriented
CIPHER_MAPPING = {
u'AES256': VaultAES256,
}
| VaultAES256 |
python | huggingface__transformers | src/transformers/models/camembert/modeling_camembert.py | {
"start": 22243,
"end": 23653
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([CamembertLayer(config, layer_idx=i) for i in range(config.num_hidden_layers)])
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
past_key_values: Optional[Cache] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
for i, layer_module in enumerate(self.layer):
hidden_states = layer_module(
hidden_states,
attention_mask,
encoder_hidden_states, # as a positional argument for gradient checkpointing
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
cache_position=cache_position,
**kwargs,
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=past_key_values if use_cache else None,
)
| CamembertEncoder |
python | django__django | tests/test_utils/test_testcase.py | {
"start": 5796,
"end": 6413
} | class ____(TestCase):
"""
In-memory data isolation is respected for model instances assigned to class
attributes during setUpTestData.
"""
@classmethod
def setUpTestData(cls):
cls.car = Car.objects.create(name="Volkswagen Beetle")
def test_book_name_deutsh(self):
self.assertEqual(self.car.name, "Volkswagen Beetle")
self.car.name = "VW sKäfer"
self.car.save()
def test_book_name_french(self):
self.assertEqual(self.car.name, "Volkswagen Beetle")
self.car.name = "Volkswagen Coccinelle"
self.car.save()
| SetupTestDataIsolationTests |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typedDict9.py | {
"start": 148,
"end": 197
} | class ____(TypedDict):
inner_key: Inner1
| Inner2 |
python | google__pytype | pytype/tests/test_builtins3.py | {
"start": 254,
"end": 10535
} | class ____(test_base.BaseTest):
"""Tests for builtin methods and classes."""
def test_super_attribute(self):
ty = self.Infer("""
x = super.__name__
""")
self.assertTypesMatchPytd(
ty,
"""
x = ... # type: str
""",
)
def test_slice(self):
ty = self.Infer("""
x1 = [1,2,3][1:None]
x2 = [1,2,3][None:2]
x3 = [1,2,3][None:None]
x4 = [1,2,3][1:3:None]
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import List
x1 = ... # type: List[int]
x2 = ... # type: List[int]
x3 = ... # type: List[int]
x4 = ... # type: List[int]
""",
)
def test_slice_attributes(self):
ty = self.Infer("""
v = slice(1)
start = v.start
stop = v.stop
step = v.step
indices = v.indices(0)
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Optional, Tuple
v = ... # type: slice
start = ... # type: Optional[int]
stop = ... # type: Optional[int]
step = ... # type: Optional[int]
indices = ... # type: Tuple[int, int, int]
""",
)
def test_next_function(self):
ty = self.Infer("""
a = next(iter([1, 2, 3]))
b = next(iter([1, 2, 3]), default = 4)
c = next(iter([1, 2, 3]), "hello")
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Union
a = ... # type: int
b = ... # type: int
c = ... # type: Union[int, str]
""",
)
def test_implicit_typevar_import(self):
ty, _ = self.InferWithErrors(f"""
v = {abstract_utils.T} # name-error
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Any
v = ... # type: Any
""",
)
def test_explicit_typevar_import(self):
self.Check("""
from builtins import _T
_T
""")
def test_class_of_type(self):
ty = self.Infer("""
v = int.__class__
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Type
v = ... # type: Type[type]
""",
)
@test_base.skip("broken")
def test_clear(self):
ty = self.Infer("""
x = {1, 2}
x.clear()
y = {"foo": 1}
y.clear()
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Dict, Set
x = ... # type: Set[nothing]
y = ... # type: Dict[nothing, nothing]
""",
)
def test_cmp(self):
ty = self.Infer("""
if not cmp(4, 4):
x = 42
""")
self.assertTypesMatchPytd(
ty,
"""
x = ... # type: int
""",
)
def test_repr(self):
ty = self.Infer("""
if repr("hello world"):
x = 42
""")
self.assertTypesMatchPytd(
ty,
"""
x = ... # type: int
""",
)
def test_int_init(self):
errors = self.CheckWithErrors("""
int()
int(0)
int("0")
int("0", 10)
int(u"0")
int(u"0", 10)
int(0, 1, 2) # wrong-arg-count[e]
""")
self.assertErrorRegexes(errors, {"e": r"1.*4"})
def test_newlines(self):
with test_utils.Tempdir() as d:
d.create_file(
"newlines.txt",
"""
1
2
3
""",
)
self.Check("""
l = []
with open("newlines.txt", "rU") as f:
for line in f:
l.append(line)
newlines = f.newlines
""")
def test_init_with_unicode(self):
self.Check("""
int(u"123.0")
float(u"123.0")
complex(u"123.0")
""")
def test_io_write(self):
self.Check("""
import sys
sys.stdout.write("hello world")
""")
def test_binary_io_write(self):
self.Check("""
with open('foo', 'wb') as f:
f.write(bytearray([1, 2, 3]))
""")
def test_hasattr_none(self):
self.assertNoCrash(self.Check, "hasattr(int, None)")
def test_number_attrs(self):
ty = self.Infer("""
a = (42).denominator
b = (42).numerator
c = (42).real
d = (42).imag
e = (3.14).conjugate()
f = (3.14).is_integer()
g = (3.14).real
h = (3.14).imag
i = (2j).conjugate()
j = (2j).real
k = (2j).imag
""")
self.assertTypesMatchPytd(
ty,
"""
a = ... # type: int
b = ... # type: int
c = ... # type: int
d = ... # type: int
e = ... # type: float
f = ... # type: bool
g = ... # type: float
h = ... # type: float
i = ... # type: complex
j = ... # type: float
k = ... # type: float
""",
)
def test_builtins(self):
# This module doesn't exist, on Python 2. However, it exists in typeshed, so
# make sure that we don't break (report pyi-error) when we import it.
self.Check("""
import builtins # pytype: disable=import-error
""")
def test_special_builtin_types(self):
self.InferWithErrors("""
isinstance(1, int)
isinstance(1, "no") # wrong-arg-types
issubclass(int, object)
issubclass(0, 0) # wrong-arg-types
issubclass(int, 0) # wrong-arg-types
hasattr(str, "upper")
hasattr(int, int) # wrong-arg-types
""")
def test_unpack_list(self):
ty = self.Infer("""
x = [1, ""]
a, b = x
x.append(2)
c, d, e = x
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import List, Union
x = ... # type: List[Union[int, str]]
a = ... # type: int
b = ... # type: str
c = ... # type: Union[int, str]
d = ... # type: Union[int, str]
e = ... # type: Union[int, str]
""",
)
def test_bytearray_setitem(self):
self.Check("""
ba = bytearray(b"hello")
ba[0] = 106
ba[4:] = [121, 102, 105, 115, 104]
ba[4:] = b"yfish"
ba[4:] = bytearray("yfish")
ba[:5] = b""
ba[1:2] = b"la"
ba[2:3:2] = b"u"
""")
def test_bytearray_setitem_py3(self):
self.Check("""
ba = bytearray(b"hello")
ba[0] = 106
ba[:1] = [106]
ba[:1] = b"j"
ba[:1] = bytearray(b"j")
ba[:1] = memoryview(b"j")
ba[4:] = b"yfish"
ba[0:5] = b""
ba[1:4:2] = b"at"
""")
def test_bytearray_contains(self):
self.Check("""
ba = bytearray(b"test")
1 in ba
"world" in ba
b"world" in ba
bytearray(b"t") in ba
""")
def test_from_hex(self):
ty = self.Infer("""
f = float.fromhex("feed")
b = bytearray.fromhex("beef")
""")
self.assertTypesMatchPytd(
ty,
"""
f = ... # type: float
b = ... # type: bytearray
""",
)
def test_none_length(self):
errors = self.CheckWithErrors("len(None) # wrong-arg-types[e]")
self.assertErrorRegexes(errors, {"e": r"Sized.*None"})
def test_sequence_length(self):
self.Check("""
len("")
len(u"")
len(bytearray())
len([])
len(())
len(frozenset())
len(range(0))
""")
def test_mapping_length(self):
self.Check("""
len({})
""")
def test_print_bare_type(self):
ty = self.Infer("""
from typing import Any, Dict, Type
d1 = {} # type: Dict[str, type]
d2 = {} # type: Dict[str, Type[Any]]
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Dict
d1 = ... # type: Dict[str, type]
d2 = ... # type: Dict[str, type]
""",
)
def test_get_function_attr(self):
self.Check("getattr(lambda: None, '__defaults__')")
def test_str_startswith(self):
self.Check("""
s = "some str"
s.startswith("s")
s.startswith(("s", "t"))
s.startswith("a", start=1, end=2)
""")
def test_str_endswith(self):
self.Check("""
s = "some str"
s.endswith("r")
s.endswith(("r", "t"))
s.endswith("a", start=1, end=2)
""")
def test_path(self):
with test_utils.Tempdir() as d:
d.create_file("foo/__init__.py")
self.Check(
"""
import foo
__path__, foo.__path__
""",
pythonpath=[d.path],
)
def test_del_byte_array_slice(self):
self.Check("""
ba = bytearray(b"hello")
del ba[0:2]
""")
def test_input(self):
self.Check("""
input()
input('input: ')
""")
def test_set_default_error(self):
ty, errors = self.InferWithErrors("""
x = {}
y = x.setdefault() # wrong-arg-count[e1]
z = x.setdefault(1, 2, 3, *[]) # wrong-arg-count[e2]
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Any, Dict
x = ... # type: Dict[nothing, nothing]
y = ... # type: Any
z = ... # type: Any
""",
)
self.assertErrorRegexes(errors, {"e1": r"2.*0", "e2": r"2.*3"})
def test_tuple(self):
ty = self.Infer("""
def f(x, y):
return y
def g():
args = (4, )
return f(3, *args)
g()
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Any, TypeVar
_T1 = TypeVar("_T1")
def f(x, y: _T1) -> _T1: ...
def g() -> int: ...
""",
)
def test_str_join_error(self):
errors = self.CheckWithErrors("', '.join([1, 2, 3]) # wrong-arg-types[e]")
self.assertErrorRegexes(
errors, {"e": r"Expected.*Iterable\[str\].*Actual.*list\[int\]"}
)
def test_int_protocols(self):
self.Check("""
class Foo:
def __int__(self):
return 0
class Bar:
def __trunc__(self):
return 0
int(Foo())
int(Bar())
""")
def test_bool_methods(self):
ty = self.Infer("""
x = True
print((not x) * (1,))
print((not x) * [1])
print((1,) * (not x))
print([1] * (not x))
a = True ** True
b = True ** 1.0
""")
self.assertTypesMatchPytd(
ty,
"""
a: int
b: float
x: bool
""",
)
def test_delattr(self):
self.Check("""
class Foo:
def __delattr__(self, name):
super(Foo, self).__delattr__(name)
""")
if __name__ == "__main__":
test_base.main()
| BuiltinTests3 |
python | python-jsonschema__jsonschema | jsonschema/cli.py | {
"start": 670,
"end": 723
} | class ____(Exception):
pass
@define
| _CannotLoadFile |
python | langchain-ai__langchain | libs/langchain_v1/tests/unit_tests/agents/middleware/core/test_overrides.py | {
"start": 6923,
"end": 13445
} | class ____:
"""Test the ToolCallRequest.override() method."""
def test_override_tool_call(self) -> None:
"""Test overriding tool_call dict."""
from langchain_core.tools import tool
@tool
def test_tool(x: int) -> str:
"""A test tool."""
return f"Result: {x}"
original_call = {"name": "test_tool", "args": {"x": 5}, "id": "1", "type": "tool_call"}
modified_call = {"name": "test_tool", "args": {"x": 10}, "id": "1", "type": "tool_call"}
original_request = ToolCallRequest(
tool_call=original_call,
tool=test_tool,
state={"messages": []},
runtime=None,
)
new_request = original_request.override(tool_call=modified_call)
# New request should have modified tool_call
assert new_request.tool_call["args"]["x"] == 10
# Original should be unchanged
assert original_request.tool_call["args"]["x"] == 5
# Other attributes should be the same
assert new_request.tool is original_request.tool
assert new_request.state is original_request.state
def test_override_state(self) -> None:
"""Test overriding state."""
from langchain_core.tools import tool
@tool
def test_tool(x: int) -> str:
"""A test tool."""
return f"Result: {x}"
tool_call = {"name": "test_tool", "args": {"x": 5}, "id": "1", "type": "tool_call"}
original_state = {"messages": [HumanMessage("Hi")]}
new_state = {"messages": [HumanMessage("Hi"), AIMessage("Hello")]}
original_request = ToolCallRequest(
tool_call=tool_call,
tool=test_tool,
state=original_state,
runtime=None,
)
new_request = original_request.override(state=new_state)
assert len(new_request.state["messages"]) == 2
assert len(original_request.state["messages"]) == 1
def test_override_multiple_attributes(self) -> None:
"""Test overriding multiple attributes at once."""
from langchain_core.tools import tool
@tool
def test_tool(x: int) -> str:
"""A test tool."""
return f"Result: {x}"
@tool
def another_tool(y: str) -> str:
"""Another test tool."""
return f"Output: {y}"
original_call = {"name": "test_tool", "args": {"x": 5}, "id": "1", "type": "tool_call"}
modified_call = {
"name": "another_tool",
"args": {"y": "hello"},
"id": "2",
"type": "tool_call",
}
original_request = ToolCallRequest(
tool_call=original_call,
tool=test_tool,
state={"count": 1},
runtime=None,
)
new_request = original_request.override(
tool_call=modified_call,
tool=another_tool,
state={"count": 2},
)
assert new_request.tool_call["name"] == "another_tool"
assert new_request.tool.name == "another_tool"
assert new_request.state == {"count": 2}
# Original unchanged
assert original_request.tool_call["name"] == "test_tool"
assert original_request.tool.name == "test_tool"
assert original_request.state == {"count": 1}
def test_override_with_copy_pattern(self) -> None:
"""Test common pattern of copying and modifying tool_call."""
from langchain_core.tools import tool
@tool
def test_tool(value: int) -> str:
"""A test tool."""
return f"Result: {value}"
original_call = {
"name": "test_tool",
"args": {"value": 5},
"id": "call_123",
"type": "tool_call",
}
original_request = ToolCallRequest(
tool_call=original_call,
tool=test_tool,
state={},
runtime=None,
)
# Common pattern: copy tool_call and modify args
modified_call = {**original_request.tool_call, "args": {"value": 10}}
new_request = original_request.override(tool_call=modified_call)
assert new_request.tool_call["args"]["value"] == 10
assert new_request.tool_call["id"] == "call_123"
assert new_request.tool_call["name"] == "test_tool"
# Original unchanged
assert original_request.tool_call["args"]["value"] == 5
def test_override_preserves_identity(self) -> None:
"""Test that unchanged attributes maintain object identity."""
from langchain_core.tools import tool
@tool
def test_tool(x: int) -> str:
"""A test tool."""
return f"Result: {x}"
tool_call = {"name": "test_tool", "args": {"x": 5}, "id": "1", "type": "tool_call"}
state = {"messages": []}
original_request = ToolCallRequest(
tool_call=tool_call,
tool=test_tool,
state=state,
runtime=None,
)
new_call = {"name": "test_tool", "args": {"x": 10}, "id": "1", "type": "tool_call"}
new_request = original_request.override(tool_call=new_call)
# Unchanged objects should be the same instance
assert new_request.tool is test_tool
assert new_request.state is state
def test_override_chaining(self) -> None:
"""Test chaining multiple override calls."""
from langchain_core.tools import tool
@tool
def test_tool(x: int) -> str:
"""A test tool."""
return f"Result: {x}"
tool_call = {"name": "test_tool", "args": {"x": 5}, "id": "1", "type": "tool_call"}
original_request = ToolCallRequest(
tool_call=tool_call,
tool=test_tool,
state={"count": 1},
runtime=None,
)
call_2 = {"name": "test_tool", "args": {"x": 10}, "id": "1", "type": "tool_call"}
call_3 = {"name": "test_tool", "args": {"x": 15}, "id": "1", "type": "tool_call"}
final_request = (
original_request.override(tool_call=call_2)
.override(state={"count": 2})
.override(tool_call=call_3)
)
assert final_request.tool_call["args"]["x"] == 15
assert final_request.state == {"count": 2}
# Original unchanged
assert original_request.tool_call["args"]["x"] == 5
assert original_request.state == {"count": 1}
| TestToolCallRequestOverride |
python | wandb__wandb | wandb/sdk/data_types/saved_model.py | {
"start": 1824,
"end": 10981
} | class ____(WBValue, Generic[SavedModelObjType]):
"""Internal W&B Artifact model storage.
_model_type_id: (str) The id of the SavedModel subclass used to serialize the model.
"""
_log_type: ClassVar[str]
_path_extension: ClassVar[str]
_model_obj: SavedModelObjType | None
_path: str | None
_input_obj_or_path: SavedModelObjType | str | pathlib.Path
# Public Methods
def __init__(
self, obj_or_path: SavedModelObjType | str | pathlib.Path, **kwargs: Any
) -> None:
super().__init__()
if self.__class__ == _SavedModel:
raise TypeError(
"Cannot instantiate abstract SavedModel class - please use SavedModel.init(...) instead."
)
self._model_obj = None
self._path = None
self._input_obj_or_path = obj_or_path
input_is_path = isinstance(obj_or_path, (str, pathlib.Path)) and os.path.exists(
obj_or_path
)
if input_is_path:
obj_or_path = str(obj_or_path)
self._set_obj(self._deserialize(obj_or_path))
else:
self._set_obj(obj_or_path)
self._copy_to_disk()
# At this point, the model will be saved to a temp path,
# and self._path will be set to such temp path. If the model
# provided was a path, then both self._path and self._model_obj
# are copies of the user-provided data. However, if the input
# was a model object, then we want to clear the model object. The first
# accessing of the model object (via .model_obj()) will load the model
# from the temp path.
if not input_is_path:
self._unset_obj()
@staticmethod
def init(obj_or_path: Any, **kwargs: Any) -> _SavedModel:
maybe_instance = _SavedModel._maybe_init(obj_or_path, **kwargs)
if maybe_instance is None:
raise ValueError(
f"No suitable SavedModel subclass constructor found for obj_or_path: {obj_or_path}"
)
return maybe_instance
@classmethod
def from_json(
cls: type[_SavedModel], json_obj: dict, source_artifact: Artifact
) -> _SavedModel:
path = json_obj["path"]
# First, if the entry is a file, the download it.
entry = source_artifact.manifest.entries.get(path)
if entry is not None:
dl_path = str(source_artifact.get_entry(path).download())
else:
# If not, assume it is directory.
# FUTURE: Add this functionality to the artifact loader
# (would be nice to parallelize)
dl_path = _load_dir_from_artifact(source_artifact, path)
# Return the SavedModel object instantiated with the downloaded path
# and specified adapter.
return cls(dl_path)
def to_json(self, run_or_artifact: wandb.Run | Artifact) -> dict:
# Unlike other data types, we do not allow adding to a Run directly. There is a
# bit of tech debt in the other data types which requires the input to `to_json`
# to accept a Run or Artifact. However, Run additions should be deprecated in the future.
# This check helps ensure we do not add to the debt.
if isinstance(run_or_artifact, wandb.Run):
raise TypeError("SavedModel cannot be added to run - must use artifact")
artifact = run_or_artifact
json_obj = {
"type": self._log_type,
}
assert self._path is not None, "Cannot add SavedModel to Artifact without path"
if os.path.isfile(self._path):
# If the path is a file, then we can just add it to the artifact,
# First checking to see if the artifact already has the file (use the cache)
# Else, add it directly, allowing the artifact adder to rename the file deterministically.
already_added_path = artifact.get_added_local_path_name(self._path)
if already_added_path is not None:
json_obj["path"] = already_added_path
else:
target_path = os.path.join(
".wb_data", "saved_models", os.path.basename(self._path)
)
json_obj["path"] = artifact.add_file(self._path, target_path, True).path
elif os.path.isdir(self._path):
# If the path is a directory, then we need to add all of the files
# The directory must be named deterministically based on the contents of the directory,
# but the files themselves need to have their name preserved.
# FUTURE: Add this functionality to the artifact adder itself
json_obj["path"] = _add_deterministic_dir_to_artifact(
artifact, self._path, os.path.join(".wb_data", "saved_models")
)
else:
raise ValueError(
f"Expected a path to a file or directory, got {self._path}"
)
return json_obj
def model_obj(self) -> SavedModelObjType:
"""Return the model object."""
if self._model_obj is None:
assert self._path is not None, "Cannot load model object without path"
self._set_obj(self._deserialize(self._path))
assert self._model_obj is not None, "Model object is None"
return self._model_obj
# Methods to be implemented by subclasses
@staticmethod
def _deserialize(path: str) -> SavedModelObjType:
"""Return the model object from a path. Allowed to throw errors."""
raise NotImplementedError
@staticmethod
def _validate_obj(obj: Any) -> bool:
"""Validate the model object. Allowed to throw errors."""
raise NotImplementedError
@staticmethod
def _serialize(obj: SavedModelObjType, dir_or_file_path: str) -> None:
"""Save the model to disk.
The method will receive a directory path which all files needed for
deserialization should be saved. A directory will always be passed if
_path_extension is an empty string, else a single file will be passed. Allowed
to throw errors.
"""
raise NotImplementedError
# Private Class Methods
@classmethod
def _maybe_init(
cls: type[_SavedModel], obj_or_path: Any, **kwargs: Any
) -> _SavedModel | None:
# _maybe_init is an exception-safe method that will return an instance of this class
# (or any subclass of this class - recursively) OR None if no subclass constructor is found.
# We first try the current class, then recursively call this method on children classes. This pattern
# conforms to the new "Weave-type" pattern developed by Shawn. This way, we can for example have a
# pytorch subclass that can itself have two subclasses: one for a TorchScript model, and one for a PyTorch model.
# The children subclasses will know how to serialize/deserialize their respective payloads, but the pytorch
# parent class can know how to execute inference on the model - regardless of serialization strategy.
try:
return cls(obj_or_path, **kwargs)
except Exception as e:
if DEBUG_MODE:
print(f"{cls}._maybe_init({obj_or_path}) failed: {e}") # noqa: T201
for child_cls in cls.__subclasses__():
maybe_instance = child_cls._maybe_init(obj_or_path, **kwargs)
if maybe_instance is not None:
return maybe_instance
return None
@classmethod
def _tmp_path(cls: type[_SavedModel]) -> str:
# Generates a tmp path under our MEDIA_TMP directory which confirms to the file
# or folder preferences of the class.
assert isinstance(cls._path_extension, str), "_path_extension must be a string"
tmp_path = os.path.abspath(os.path.join(MEDIA_TMP.name, runid.generate_id()))
if cls._path_extension != "":
tmp_path += "." + cls._path_extension
return tmp_path
# Private Instance Methods
def _copy_to_disk(self) -> None:
# Creates a temporary path and writes a fresh copy of the
# model to disk - updating the _path appropriately.
tmp_path = self._tmp_path()
self._dump(tmp_path)
self._path = tmp_path
def _unset_obj(self) -> None:
assert self._path is not None, "Cannot unset object if path is None"
self._model_obj = None
def _set_obj(self, model_obj: Any) -> None:
assert model_obj is not None and self._validate_obj(model_obj), (
f"Invalid model object {model_obj}"
)
self._model_obj = model_obj
def _dump(self, target_path: str) -> None:
assert self._model_obj is not None, "Cannot dump if model object is None"
self._serialize(self._model_obj, target_path)
def _get_cloudpickle() -> ModuleType:
return cast(
ModuleType,
util.get_module("cloudpickle", "ModelAdapter requires `cloudpickle`"),
)
# TODO: Add pip deps
# TODO: potentially move this up to the saved model class
PicklingSavedModelObjType = TypeVar("PicklingSavedModelObjType")
| _SavedModel |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/tuple7.py | {
"start": 651,
"end": 1071
} | class ____(tuple[_T, ...]):
def __new__(cls) -> Self: ...
objB = ClassB[complex]()
(x, y, z) = objB
reveal_type(x, expected_text="complex")
reveal_type(y, expected_text="complex")
reveal_type(z, expected_text="complex")
xx2: complex = objB[0]
yy2: complex = objB[1]
zz2: complex = objB[2]
def func1(lst: list[str] | None) -> None:
for item in lst or ():
reveal_type(item, expected_text="str")
| ClassB |
python | huggingface__transformers | tests/models/lxmert/test_modeling_lxmert.py | {
"start": 28346,
"end": 29441
} | class ____(unittest.TestCase):
@slow
def test_inference_no_head_absolute_embedding(self):
model = LxmertModel.from_pretrained("unc-nlp/lxmert-base-uncased")
input_ids = torch.tensor([[101, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 102]])
num_visual_features = 10
_, visual_feats = np.random.seed(0), np.random.rand(1, num_visual_features, model.config.visual_feat_dim)
_, visual_pos = np.random.seed(0), np.random.rand(1, num_visual_features, 4)
visual_feats = torch.as_tensor(visual_feats, dtype=torch.float32)
visual_pos = torch.as_tensor(visual_pos, dtype=torch.float32)
output = model(input_ids, visual_feats=visual_feats, visual_pos=visual_pos)[0]
expected_shape = torch.Size([1, 11, 768])
self.assertEqual(expected_shape, output.shape)
expected_slice = torch.tensor(
[[[0.2417, -0.9807, 0.1480], [1.2541, -0.8320, 0.5112], [1.4070, -1.1052, 0.6990]]]
)
torch.testing.assert_close(output[:, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
| LxmertModelIntegrationTest |
python | numba__numba | numba/core/types/containers.py | {
"start": 16230,
"end": 16547
} | class ____(Type):
"""
Internal type class for the entries of a Set's hash table.
"""
def __init__(self, set_type):
self.set_type = set_type
name = "entry(%s)" % set_type
super(SetEntry, self).__init__(name)
@property
def key(self):
return self.set_type
| SetEntry |
python | agronholm__apscheduler | src/apscheduler/triggers/combining.py | {
"start": 4353,
"end": 5726
} | class ____(BaseCombiningTrigger):
"""
Fires on every fire time of every trigger in chronological order.
If two or more triggers produce the same fire time, it will only be used once.
This trigger will be finished when none of the enclosed triggers can produce any new
fire times.
:param triggers: triggers to combine
"""
def next(self) -> datetime | None:
# Fill out the fire times on the first run
if not self._next_fire_times:
self._next_fire_times = [t.next() for t in self.triggers]
# Find out the earliest of the fire times
earliest_time: datetime | None = min(
(fire_time for fire_time in self._next_fire_times if fire_time is not None),
default=None,
)
if earliest_time is not None:
# Generate new fire times for the trigger(s) that generated the earliest
# fire time
for i, fire_time in enumerate(self._next_fire_times):
if fire_time == earliest_time:
self._next_fire_times[i] = self.triggers[i].next()
return earliest_time
def __setstate__(self, state: dict[str, Any]) -> None:
require_state_version(self, state, 1)
super().__setstate__(state)
def __repr__(self) -> str:
return f"{self.__class__.__name__}({self.triggers})"
| OrTrigger |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_pie/PIE794.py | {
"start": 407,
"end": 561
} | class ____(BaseModel):
@property
def buzz(self) -> str:
...
@buzz.setter
def buzz(self, value: str | int) -> None:
...
| User |
python | google__python-fire | fire/test_components.py | {
"start": 4714,
"end": 4889
} | class ____:
def __init__(self, arg1, arg2):
self.arg1 = arg1
self.arg2 = arg2
def run(self, arg1, arg2):
return (self.arg1, self.arg2, arg1, arg2)
| InstanceVars |
python | wandb__wandb | wandb/vendor/graphql-core-1.1/wandb_graphql/validation/rules/known_directives.py | {
"start": 146,
"end": 3434
} | class ____(ValidationRule):
def enter_Directive(self, node, key, parent, path, ancestors):
directive_def = next((
definition for definition in self.context.get_schema().get_directives()
if definition.name == node.name.value
), None)
if not directive_def:
return self.context.report_error(GraphQLError(
self.unknown_directive_message(node.name.value),
[node]
))
candidate_location = get_directive_location_for_ast_path(ancestors)
if not candidate_location:
self.context.report_error(GraphQLError(
self.misplaced_directive_message(node.name.value, node.type),
[node]
))
elif candidate_location not in directive_def.locations:
self.context.report_error(GraphQLError(
self.misplaced_directive_message(node.name.value, candidate_location),
[node]
))
@staticmethod
def unknown_directive_message(directive_name):
return 'Unknown directive "{}".'.format(directive_name)
@staticmethod
def misplaced_directive_message(directive_name, location):
return 'Directive "{}" may not be used on "{}".'.format(directive_name, location)
_operation_definition_map = {
'query': DirectiveLocation.QUERY,
'mutation': DirectiveLocation.MUTATION,
'subscription': DirectiveLocation.SUBSCRIPTION,
}
def get_directive_location_for_ast_path(ancestors):
applied_to = ancestors[-1]
if isinstance(applied_to, ast.OperationDefinition):
return _operation_definition_map.get(applied_to.operation)
elif isinstance(applied_to, ast.Field):
return DirectiveLocation.FIELD
elif isinstance(applied_to, ast.FragmentSpread):
return DirectiveLocation.FRAGMENT_SPREAD
elif isinstance(applied_to, ast.InlineFragment):
return DirectiveLocation.INLINE_FRAGMENT
elif isinstance(applied_to, ast.FragmentDefinition):
return DirectiveLocation.FRAGMENT_DEFINITION
elif isinstance(applied_to, ast.SchemaDefinition):
return DirectiveLocation.SCHEMA
elif isinstance(applied_to, ast.ScalarTypeDefinition):
return DirectiveLocation.SCALAR
elif isinstance(applied_to, ast.ObjectTypeDefinition):
return DirectiveLocation.OBJECT
elif isinstance(applied_to, ast.FieldDefinition):
return DirectiveLocation.FIELD_DEFINITION
elif isinstance(applied_to, ast.InterfaceTypeDefinition):
return DirectiveLocation.INTERFACE
elif isinstance(applied_to, ast.UnionTypeDefinition):
return DirectiveLocation.UNION
elif isinstance(applied_to, ast.EnumTypeDefinition):
return DirectiveLocation.ENUM
elif isinstance(applied_to, ast.EnumValueDefinition):
return DirectiveLocation.ENUM_VALUE
elif isinstance(applied_to, ast.InputObjectTypeDefinition):
return DirectiveLocation.INPUT_OBJECT
elif isinstance(applied_to, ast.InputValueDefinition):
parent_node = ancestors[-3]
return (DirectiveLocation.INPUT_FIELD_DEFINITION
if isinstance(parent_node, ast.InputObjectTypeDefinition)
else DirectiveLocation.ARGUMENT_DEFINITION)
| KnownDirectives |
python | pytorch__pytorch | torch/ao/quantization/utils.py | {
"start": 1728,
"end": 30101
} | class ____:
"""A node pattern that matches all nodes, used in defining
fusion patterns in FX Graph Mode Quantization
"""
module_type_list = {
torch.nn.ReLU,
torch.nn.ReLU6,
torch.nn.AdaptiveAvgPool1d,
torch.nn.AdaptiveAvgPool2d,
torch.nn.AdaptiveAvgPool3d,
torch.nn.AvgPool1d,
torch.nn.AvgPool2d,
torch.nn.AvgPool3d,
torch.nn.MaxPool1d,
torch.nn.MaxPool2d,
torch.nn.MaxPool3d,
torch.nn.Identity,
torch.nn.Hardsigmoid,
torch.nn.Sigmoid,
torch.nn.Tanh,
}
func_list = {
torch.nn.functional.adaptive_avg_pool1d,
torch.nn.functional.adaptive_avg_pool2d,
torch.nn.functional.adaptive_avg_pool3d,
torch.nn.functional.elu,
torch.nn.functional.hardswish,
torch.nn.functional.instance_norm,
torch.nn.functional.layer_norm,
torch.nn.functional.leaky_relu,
torch.nn.functional.silu,
torch.nn.functional.mish,
torch.nn.functional.dropout,
torch.nn.functional.max_pool1d,
torch.nn.functional.max_pool2d,
torch.nn.functional.max_pool3d,
torch.nn.functional.relu,
torch.nn.functional.hardtanh,
torch.nn.functional.hardtanh_,
torch.nn.functional.hardsigmoid,
torch.nn.functional.sigmoid,
torch.transpose,
torch.repeat_interleave,
torch.sigmoid,
torch.squeeze,
torch.stack,
torch.sum,
torch.tanh,
torch.unsqueeze,
torch.cat,
}
method_list = {
torch.mean,
"relu",
"relu_",
"contiguous",
"detach",
"detach_",
"hardsigmoid",
"hardsigmoid_",
"permute",
"repeat",
"repeat_interleave",
"reshape",
"resize_",
"shape",
"sigmoid",
"sigmoid_",
"size",
"squeeze",
"squeeze_",
"tanh",
"tanh_",
"transpose",
"unsqueeze",
"unsqueeze_",
"view",
}
# TODO: not used now, remove
def check_node(node, modules):
# TODO: reuse is_fixed_qparam_node after we move this function to _lower_to_native_backend.py
is_call_function = node.op == "call_function" and node.target in func_list
is_call_method = node.op == "call_method" and node.target in method_list
is_call_module = (
node.op == "call_module" and type(modules[str(node.target)]) in module_type_list
)
return is_call_function, is_call_method, is_call_module
def get_combined_dict(default_dict, additional_dict):
"""
Combines two dictionaries.
This function takes two dictionaries as input and returns a new dictionary
that contains all the key-value pairs from both input dictionaries.
If there are any duplicate keys in the `additional_dict`, the values
from the `additional_dict` will overwrite those in the `default_dict`.
Args:
default_dict (dict): The main dictionary that will be used as the base
additional_dict (dict): The dictionary used to update `default_dict`
Returns:
dict: The resulting dictionary
Example:
>>> x = dict(a=1, b=1)
>>> y = dict(b=2, c=3)
>>> get_combined_dict(x, y)
{'a': 1, 'b': 2, 'c': 3}
"""
d = default_dict.copy()
d.update(additional_dict)
return d
def is_per_tensor(qscheme):
return qscheme == torch.per_tensor_affine or qscheme == torch.per_tensor_symmetric
def is_per_channel(qscheme):
return qscheme in [
torch.per_channel_affine,
torch.per_channel_affine_float_qparams,
torch.per_channel_symmetric,
]
def getattr_from_fqn(obj: Any, fqn: str) -> Any:
"""
Given an obj and a fqn such as "foo.bar.baz", returns gm.foo.bar.baz.
"""
return functools.reduce(getattr, fqn.split("."), obj)
def to_underlying_dtype(qdtype):
DTYPE_MAPPING = {
torch.quint8: torch.uint8,
torch.qint8: torch.int8,
torch.qint32: torch.int32,
torch.quint4x2: torch.uint8,
torch.quint2x4: torch.uint8,
torch.uint8: torch.uint8,
torch.int8: torch.int8,
torch.uint16: torch.uint16,
torch.int16: torch.int16,
torch.int32: torch.int32,
torch.float8_e5m2: torch.float8_e5m2,
torch.float8_e4m3fn: torch.float8_e4m3fn,
}
if qdtype not in DTYPE_MAPPING:
raise AssertionError("Unsupported dtype: " + str(qdtype))
return DTYPE_MAPPING[qdtype]
def get_qparam_dict(observer_or_fake_quant):
from torch.ao.quantization.observer import PlaceholderObserver
qscheme = getattr(observer_or_fake_quant, "qscheme", None)
dtype = observer_or_fake_quant.dtype
qparams = {"qscheme": qscheme, "dtype": dtype}
if not qscheme or isinstance(observer_or_fake_quant, PlaceholderObserver):
return {"qscheme": None, "dtype": dtype}
if is_per_tensor(qscheme):
qscheme = torch.per_tensor_affine
elif is_per_channel(qscheme):
# change symmetric to affine since we do not have symmetric
# quantized Tensor
if qscheme == torch.per_channel_symmetric:
qscheme = torch.per_channel_affine
qparams["axis"] = observer_or_fake_quant.ch_axis
else:
raise RuntimeError(f"Unrecognized qscheme: {qscheme}")
# update qscheme, since we don't have symmetric quant qscheme
# in quantized Tensor
qparams["qscheme"] = qscheme
scale, zero_point = observer_or_fake_quant.calculate_qparams()
qparams["scale"] = scale
qparams["zero_point"] = zero_point
if hasattr(observer_or_fake_quant, "quant_min"):
qparams["quant_min"] = observer_or_fake_quant.quant_min
if hasattr(observer_or_fake_quant, "quant_max"):
qparams["quant_max"] = observer_or_fake_quant.quant_max
return qparams
def get_swapped_custom_module_class(
custom_module, custom_module_class_mapping, qconfig
):
"""Get the observed/quantized custom module class that we need
to swap `custom_module` to
Input:
custom_module: input, can be an instance of either a float or observed custom module
custom_module_class_mapping: the float to observed or observed to quantized custom module class mapping
qconfig: qconfig configured for the custom module
Output:
corresponding observed/quantized custom module class for input custom module instance
"""
quant_type = get_quant_type(qconfig)
class_mapping = custom_module_class_mapping.get(quant_type, {})
if type(custom_module) not in class_mapping:
raise AssertionError(
"did not find corresponding observed "
f"module class for {type(custom_module)} in mapping: {class_mapping}"
)
return class_mapping[type(custom_module)]
def activation_dtype(qconfig):
if qconfig is None:
raise AssertionError("qconfig must be provided to determine activation dtype")
activation = qconfig.activation()
return activation.dtype
def weight_dtype(qconfig):
if qconfig is None:
raise AssertionError("qconfig must be provided to determine weight dtype")
weight = qconfig.weight()
return weight.dtype
def activation_is_statically_quantized(qconfig):
"""Given a qconfig, decide if the activation needs to be
quantized or not, this includes quantizing to quint8, qint8 and qint32 and float16
"""
return activation_dtype(qconfig) in [
torch.quint8,
torch.qint8,
torch.qint32,
torch.float16,
torch.uint8,
torch.int8,
torch.int16,
torch.int32,
torch.float8_e5m2,
torch.float8_e4m3fn,
] and (not activation_is_dynamically_quantized(qconfig))
def activation_is_dynamically_quantized(qconfig):
"""Given a qconfig, decide if the activation needs to be
dynamically quantized or not, this includes dynamically quantizing to
quint8, qint8 and float16
"""
_activation_dtype, _, activation_is_dynamic = get_qconfig_dtypes(qconfig)
return activation_is_dynamic
def activation_is_int8_quantized(qconfig):
"""Given a qconfig, decide if the activation needs to be
quantized to int8 or not, this includes quantizing to quint8, qint8
"""
return activation_dtype(qconfig) in [
torch.quint8,
torch.qint8,
torch.uint8,
torch.int8,
]
def activation_is_int32_quantized(qconfig):
"""Given a qconfig, decide if the activation needs to be
quantized to int32 or not
"""
return activation_dtype(qconfig) in [torch.qint32, torch.int32]
def weight_is_quantized(qconfig):
"""Given a qconfig, decide if the weight needs to be
quantized or not
"""
return weight_dtype(qconfig) in [
torch.quint8,
torch.qint8,
torch.float16,
torch.quint4x2,
torch.uint8,
torch.int8,
torch.int16,
torch.int32,
torch.float8_e5m2,
torch.float8_e4m3fn,
]
def weight_is_statically_quantized(qconfig):
"""Given a qconfig, decide if the weight needs to be statically
quantized or not
"""
return weight_dtype(qconfig) in [torch.quint8, torch.qint8, torch.uint8, torch.int8]
def op_is_int8_dynamically_quantized(qconfig) -> bool:
"""Given a qconfig, returns True if this op is using int8 dynamic
quantization
"""
activation_dtype, weight_dtype, activation_is_dynamic = get_qconfig_dtypes(qconfig)
return (
activation_dtype in [torch.quint8, torch.uint8]
and
# for now, the lines below assume fbgemm or qnnpack
weight_dtype in [torch.qint8, torch.int8]
and activation_is_dynamic
)
def get_qconfig_dtypes(qconfig):
r"""returns the qconfig tuple for qconfig:
(activation_dtype, weight_dtype, activation_is_dynamic)
"""
if qconfig is None:
raise AssertionError("qconfig must be provided to extract dtypes")
activation = qconfig.activation()
weight = qconfig.weight()
act_is_dynamic = getattr(activation, "is_dynamic", False)
return (activation.dtype, weight.dtype, act_is_dynamic)
def get_quant_type(qconfig):
if qconfig is None:
raise AssertionError("qconfig must be provided to determine quant type")
activation = qconfig.activation()
weight = qconfig.weight()
static_dtypes = [
torch.quint8,
torch.qint8,
torch.quint4x2,
torch.qint32,
torch.uint8,
torch.int8,
torch.int16,
torch.int32,
torch.float8_e5m2,
torch.float8_e4m3fn,
]
if weight.dtype in static_dtypes:
if hasattr(activation, "is_dynamic") and activation.is_dynamic:
return QuantType.DYNAMIC
elif activation.dtype in static_dtypes:
return QuantType.STATIC
else:
return QuantType.WEIGHT_ONLY
if weight.dtype == torch.float16:
if hasattr(activation, "is_dynamic") and activation.is_dynamic:
return QuantType.DYNAMIC
elif activation.dtype == torch.float16:
return QuantType.STATIC
raise Exception( # noqa: TRY002
f"Unrecognized dtype combination in get_quant_type: activation({activation.dtype}),"
f"weight({weight.dtype})"
)
def check_min_max_valid(min_val: torch.Tensor, max_val: torch.Tensor) -> bool:
"""Checks if the given minimum and maximum values are valid, meaning that
they exist and the min value is less than the max value.
"""
if min_val.numel() == 0 or max_val.numel() == 0:
warnings.warn(
"must run observer before calling calculate_qparams. "
+ "Returning default values.",
stacklevel=2,
)
return False
if min_val.dim() == 0 or max_val.dim() == 0:
if min_val == float("inf") and max_val == float("-inf"):
warnings.warn(
"must run observer before calling calculate_qparams. "
+ "Returning default values.",
stacklevel=2,
)
return False
if min_val > max_val:
raise AssertionError(f"min {min_val} should be less than max {max_val}")
else:
if torch.any(min_val > max_val):
raise AssertionError(f"min {min_val} should be less than max {max_val}")
return True
def calculate_qmin_qmax(
quant_min: int,
quant_max: int,
has_customized_qrange: bool,
dtype: torch.dtype,
reduce_range: bool,
) -> tuple[int, int]:
r"""Calculates actual qmin and qmax based on the quantization range,
observer datatype and if range is reduced.
"""
# TODO(jerryzh): Figure out why custom quant_min/quant_max are still adjusted.
if has_customized_qrange:
# This initialization here is to be resolve TorchScript compilation issues and allow
# using of refinement to decouple initial_qmin and initial_qmax from quantization range.
# The actual values of initial_qmin and initial_qmax will be reset below.
if dtype in [torch.qint32, torch.int32]:
initial_quant_min, initial_quant_max = 0, 2**32 - 1
else:
initial_quant_min, initial_quant_max = 0, 255
# The following assignment of self.qmin and self.qmax to the local variables and the if check refine the
# attribute from Optional valid integers for use, based on TorchScript's requirements.
custom_quant_min, custom_quant_max = quant_min, quant_max
if custom_quant_min is not None and custom_quant_max is not None:
initial_quant_min, initial_quant_max = (
custom_quant_min,
custom_quant_max,
)
qrange_len = initial_quant_max - initial_quant_min + 1
if dtype in [torch.qint8, torch.int8]:
if not (0 < qrange_len <= 256):
raise AssertionError(
"quantization range should be positive and not exceed the maximum bit range (=256)."
)
elif dtype in [torch.qint32, torch.int32]:
if not (0 < qrange_len <= 2**32):
raise AssertionError(
"quantization range should be positive and not exceed the maximum bit range (=4294967296)."
)
if reduce_range:
quant_min, quant_max = quant_min // 2, quant_max // 2
else:
# Fallback onto default 8-bit qmin and qmax calculation if dynamic range is not used.
if dtype in [torch.qint8, torch.int8]:
if reduce_range:
quant_min, quant_max = -64, 63
else:
quant_min, quant_max = -128, 127
elif dtype in [torch.quint8, torch.uint8]:
if reduce_range:
quant_min, quant_max = 0, 127
else:
quant_min, quant_max = 0, 255
elif dtype in [torch.qint32, torch.int32]:
quant_min, quant_max = -1 * (2**31), (2**31) - 1
elif dtype == torch.uint16:
quant_min, quant_max = 0, 2**16 - 1
elif dtype == torch.int16:
quant_min, quant_max = -(2**15), 2**15 - 1
else:
quant_min, quant_max = 0, 15
return quant_min, quant_max
def _parent_name(target):
"""
Turn 'foo.bar' into ['foo', 'bar']
"""
r = target.rsplit(".", 1)
if len(r) == 1:
return "", r[0]
else:
return r[0], r[1]
def has_no_children_ignoring_parametrizations(module):
"""
Checks if module._modules is empty or
if module is a parametrization, checks that module._modules only has
the 'parametrizations' module
"""
if len(module._modules) == 0:
return True
elif is_parametrized(module):
return len(module._modules) == 1 and "parametrizations" in module._modules
else:
return False
def _get_path_of_module(
root: torch.nn.Module, submodule: torch.nn.Module
) -> str | None:
"""Get the path (fully qualified name) of a submodule
Example::
>> class M(torch.nn.Module):
def __init__(self) -> None:
self.linear = torch.nn.Linear(5, 5)
def forward(self, x):
return self.linear(x)
>> m = M()
>> l = m.linear
>> _get_path_of_module(m, l)
"linear"
"""
for n, p in root.named_modules():
if submodule is p:
return n
return None
def _get_signature_locals(f: Callable, loc: dict[str, Any]) -> dict[str, Any]:
"""Get local keyword arguments
Example::
>> def f(self, a, b=9):
pass
>> loc = {"a": 6, "c": 7}
>> _get_signature_locals(f, loc)
{"a": 6}
"""
return {k: v for k, v in loc.items() if k in signature(f).parameters}
def _get_default_kwargs(f: Callable) -> "OrderedDict[str, Any]":
"""Get all default keyword arguments from function signature
Example::
>> def f(self, a, b=9):
pass
>> _get_default_kwargs(f)
{"b": 9}
"""
kwargs = {}
for name, param in signature(f).parameters.items():
if param.default is not param.empty:
kwargs[name] = param.default
elif param.kind is param.VAR_POSITIONAL:
kwargs[name] = ()
elif param.kind is param.VAR_KEYWORD:
kwargs[name] = {}
return OrderedDict(kwargs)
def _normalize_kwargs(func: Callable, loc: dict[str, Any]) -> "OrderedDict[str, Any]":
"""Given a function and local function arguments, normalize the keyword
arguments by filling in default arguments from function signature
Example::
>> def f(self, key1=3, key2=3):
pass
>> loc = {"key2": 6}
>> _normalize_kwargs(f, loc)
{"key1": 3, "key2": 6}
"""
default_kwargs = _get_default_kwargs(func)
local_kwargs = _get_signature_locals(func, loc)
normalized_kwargs = default_kwargs.copy()
for attr, val in local_kwargs.items():
if attr in normalized_kwargs:
# override the default keyword arguments
normalized_kwargs[attr] = val
return normalized_kwargs
def validate_qmin_qmax(quant_min: int, quant_max: int) -> None:
r"""Validates that the user-specified quantization range is properly initialized
and within the given bound supported by the observer dtype.
To accommodate lower-bit quantization with respect to the existing torch.qint8 and
torch.quint8 datatypes, the user can choose to use dynamic quantization range by passing
in a tuple of initial qmin and qmax values. One use case is these customized qmin and qmax
values are used to calculate static estimates of the scale and zero point for aggressive lower-bit
fake quantization. These estimates are compared against parameters learned through backpropagation.
The related literatures for scale and zero point via backpropagation are as follows:
Learned Step Size Quantization: https://openreview.net/pdf?id=rkgO66VKDS
Trained Quantization Thresholds: https://arxiv.org/pdf/1903.08066.pdf
"""
# The variable names are prefixed with "initial" because their values (qmin and qmax) might be adjusted
# based on whether quantization range is reduced and the datatype (signed/unsigned) used by the observer.
if not (quant_min <= 0 <= quant_max):
raise AssertionError("Used-specified quantization range must include 0.")
if quant_min >= quant_max:
raise AssertionError(
"qmin must be strictly less than qmax for user-specified quantization range."
)
# Functionally equivalent to '_calculate_qparams' in observer.py. Observers must be torchscriptable however and qscheme
# as far as I can tell is not allowed to passed as a parameter in torchscript functions. This makes refactoring observer
# to use this utility a massive pain and very gross. For now Im opting just to duplicate as this code seems unlikely to change
# (last update over 1 year ago) and when torchscript is fully deprecated we can refactor. TODO(jakeszwe, jerryzh168)
def determine_qparams(
min_val: torch.Tensor,
max_val: torch.Tensor,
quant_min: int,
quant_max: int,
dtype: torch.dtype,
eps: torch.Tensor,
has_customized_qrange: bool,
qscheme: torch.qscheme = torch.per_tensor_affine,
) -> tuple[torch.Tensor, torch.Tensor]:
r"""Calculates the quantization parameters, given min and max
value tensors. Works for both per tensor and per channel cases
Args:
min_val: Minimum values per channel
max_val: Maximum values per channel
Returns:
scales: Scales tensor of shape (#channels,)
zero_points: Zero points tensor of shape (#channels,)
"""
if not check_min_max_valid(min_val, max_val):
return torch.tensor([1.0], device=min_val.device.type), torch.tensor(
[0], device=min_val.device.type
)
min_val_neg = torch.min(min_val, torch.zeros_like(min_val))
max_val_pos = torch.max(max_val, torch.zeros_like(max_val))
device = min_val_neg.device
scale = torch.ones(min_val_neg.size(), dtype=torch.double, device=device)
zero_point = torch.zeros(min_val_neg.size(), dtype=torch.int64, device=device)
eps = eps.to(device)
if qscheme == torch.per_tensor_symmetric or qscheme == torch.per_channel_symmetric:
max_val_pos = torch.max(-min_val_neg, max_val_pos)
scale = max_val_pos / (float(quant_max - quant_min) / 2)
scale = torch.max(scale, eps)
if dtype in [torch.uint8, torch.quint8]:
if has_customized_qrange:
# When customized quantization range is used, down-rounded midpoint of the range is chosen.
zero_point = zero_point.new_full(
zero_point.size(), (quant_min + quant_max) // 2
)
else:
zero_point = zero_point.new_full(zero_point.size(), 128)
elif qscheme == torch.per_channel_affine_float_qparams:
scale = (max_val - min_val) / float(quant_max - quant_min)
scale = torch.where(scale > eps, scale, torch.ones_like(scale))
# We use the quantize function
# xq = Round(Xf * inv_scale + zero_point),
# setting zero_point to (-1 * min *inv_scale) we get
# Xq = Round((Xf - min) * inv_scale)
zero_point = -1 * min_val / scale
else:
scale = (max_val_pos - min_val_neg) / float(quant_max - quant_min)
scale = torch.max(scale, eps)
zero_point = quant_min - torch.round(min_val_neg / scale).to(torch.int)
zero_point = torch.clamp(zero_point, quant_min, quant_max)
# For scalar values, cast them to Tensors of size 1 to keep the shape
# consistent with default values in FakeQuantize.
if len(scale.shape) == 0:
# TODO: switch to scale.item() after adding JIT support
scale = torch.tensor([float(scale)], dtype=scale.dtype, device=device)
if len(zero_point.shape) == 0:
# TODO: switch to zero_point.item() after adding JIT support
zero_point = torch.tensor(
[int(zero_point)], dtype=zero_point.dtype, device=device
)
if qscheme == torch.per_channel_affine_float_qparams:
zero_point = torch.tensor(
[float(zero_point)], dtype=zero_point.dtype, device=device
)
return scale.to(torch.double), zero_point.to(torch.int64)
def _get_num_pos_args(f: Callable) -> int:
"""Get number of positional args for a function
Example::
>> def f(self, key1=3, key2=3):
pass
>> _get_num_pos_args(f)
3
"""
return len(getfullargspec(f).args)
def get_fqn_to_example_inputs(
model: torch.nn.Module, example_inputs: tuple[Any, ...]
) -> dict[str, tuple[Any, ...]]:
"""Given a model and its example inputs, return a dictionary from
fully qualified name of submodules to example_inputs for that submodule,
e.g. {"linear1": (tensor1,), "linear2": (tensor2,), "sub": (tensor3,),
"sub.linear1": (tensor4,), ...}
Used to make quantizing submodules easier now that FX Graph Mode Quantization requires
example inputs.
Also works for keyword arguments with default values, we would flatten keyword
arguments as positional arguments and fill in the missing keyword args with default
values, e.g. if we have a forward function:
def forward(self, x, key1=3, key2=3):
...
and we call it with self.submodule(x, key2=6)
we'll get example_inputs: (x, 3, 6)
user can also override `key1` with positional arguments as well:
for self.submodule(x, 5, key2=6)
we'll get: (x, 5, 6)
variable positional arguments and variable positional keyword arguments in forward
function are not supported currently, so please make sure no submodules is using
them.
"""
root = model
fqn_to_example_inputs = {}
def _patched_module_call(self, *args, **kwargs):
submodule_example_inputs = list(args).copy()
normalized_kwargs = _normalize_kwargs(self.forward, kwargs)
# minus 1 to skipping counting `self`
num_args = _get_num_pos_args(self.forward) - 1
num_to_pop = num_args - len(submodule_example_inputs)
while num_to_pop and normalized_kwargs:
normalized_kwargs.popitem(last=False)
num_to_pop -= 1
submodule_example_inputs.extend(normalized_kwargs.values())
submodule_example_inputs_tuple = tuple(submodule_example_inputs)
fqn = _get_path_of_module(root, self)
if fqn is not None:
fqn_to_example_inputs[fqn] = submodule_example_inputs_tuple
return orig_module_call(self, *args, **kwargs)
orig_module_call = torch.nn.Module.__call__
torch.nn.Module.__call__ = _patched_module_call # type: ignore[method-assign]
try:
model(*example_inputs)
finally:
# restore the module call even if there is an exception
torch.nn.Module.__call__ = orig_module_call # type: ignore[method-assign]
return fqn_to_example_inputs
def _assert_and_get_unique_device(module: torch.nn.Module) -> Any:
"""
Returns the unique device for a module, or None if no device is found.
Throws an error if multiple devices are detected.
"""
devices = {p.device for p in module.parameters()} | {
p.device for p in module.buffers()
}
"""
As a temp workaround for AIMP HHC publish we added CPU check.remove it later. T163614564
"""
if {torch.device("cpu"), torch.device("meta")} == devices:
warnings.warn(
"Both 'meta' and 'cpu' are present in the list of devices. Module can have one device. We Select 'cpu'.",
stacklevel=2,
)
devices = {torch.device("cpu")}
""
if len(devices) > 1:
raise AssertionError(
"prepare only works with cpu or single-device CUDA modules, "
f"but got devices {devices}"
)
device = next(iter(devices)) if len(devices) > 0 else None
return device
DEPRECATION_WARNING = (
"torch.ao.quantization is deprecated and will be removed in 2.10. \n"
"For migrations of users: \n"
"1. Eager mode quantization (torch.ao.quantization.quantize, "
"torch.ao.quantization.quantize_dynamic), please migrate to use torchao eager mode "
"quantize_ API instead \n"
"2. FX graph mode quantization (torch.ao.quantization.quantize_fx.prepare_fx,"
"torch.ao.quantization.quantize_fx.convert_fx, please migrate to use torchao pt2e quantization "
"API instead (prepare_pt2e, convert_pt2e) \n"
"3. pt2e quantization has been migrated to torchao (https://github.com/pytorch/ao/tree/main/torchao/quantization/pt2e) \n"
"see https://github.com/pytorch/ao/issues/2259 for more details"
)
__all__ = [
"NodePattern",
"Pattern",
"MatchAllNode",
"check_node",
"get_combined_dict",
"is_per_tensor",
"is_per_channel",
"getattr_from_fqn",
"get_qparam_dict",
"get_swapped_custom_module_class",
"activation_dtype",
"weight_dtype",
"activation_is_statically_quantized",
"activation_is_dynamically_quantized",
"activation_is_int8_quantized",
"activation_is_int32_quantized",
"weight_is_quantized",
"weight_is_statically_quantized",
"op_is_int8_dynamically_quantized",
"get_qconfig_dtypes",
"get_quant_type",
"check_min_max_valid",
"calculate_qmin_qmax",
"has_no_children_ignoring_parametrizations",
"get_fqn_to_example_inputs",
"to_underlying_dtype",
"determine_qparams",
"validate_qmin_qmax",
"DEPRECATION_WARNING",
]
| MatchAllNode |
python | getsentry__sentry | src/sentry/integrations/msteams/card_builder/issues.py | {
"start": 1509,
"end": 10506
} | class ____(MSTeamsMessageBuilder):
def __init__(
self,
group: Group,
event: Event | GroupEvent,
rules: Sequence[Rule],
integration: RpcIntegration,
):
self.group = group
self.event = event
self.rules = rules
self.integration = integration
def generate_action_payload(self, action_type: ACTION_TYPE) -> Any:
# we need nested data or else Teams won't handle the payload correctly
assert self.event.group is not None
return {
"payload": {
"actionType": action_type,
"groupId": self.event.group.id,
"eventId": self.event.event_id,
"rules": [rule.id for rule in self.rules],
"integrationId": self.integration.id,
}
}
def build_group_title(self, notification_uuid: str | None = None) -> TextBlock:
text = build_attachment_title(self.group)
params = {"referrer": IntegrationProviderSlug.MSTEAMS.value}
if notification_uuid:
params.update({"notification_uuid": notification_uuid})
link = self.group.get_absolute_url(params=params)
title_text = f"[{text}]({link})"
return create_text_block(
title_text,
size=TextSize.LARGE,
weight=TextWeight.BOLDER,
)
def build_group_descr(self) -> TextBlock | None:
# TODO: implement with event as well
text = build_attachment_text(self.group)
if text:
return create_text_block(
text,
size=TextSize.MEDIUM,
weight=TextWeight.BOLDER,
)
return None
def get_timestamp(self) -> str:
ts: datetime = self.group.last_seen
date = max(ts, self.event.datetime) if self.event else ts
# Adaptive cards is strict about the isoformat.
date_str: str = date.replace(microsecond=0).isoformat()
return date_str
def create_date_block(self) -> TextBlock:
date_str = self.get_timestamp()
return create_text_block(
IssueConstants.DATE_FORMAT.format(date=date_str),
size=TextSize.SMALL,
weight=TextWeight.LIGHTER,
horizontalAlignment=ContentAlignment.CENTER,
wrap=False,
)
def build_group_footer(self) -> ColumnSetBlock:
project = Project.objects.get_from_cache(id=self.group.project_id)
image_column = create_footer_logo_block()
text = build_footer(
group=self.group,
project=project,
url_format=MSTEAMS_URL_FORMAT,
rules=self.rules,
)
text_column = create_footer_column_block(create_footer_text_block(text))
date_column = create_column_block(
self.create_date_block(),
verticalContentAlignment=ContentAlignment.CENTER,
)
return create_column_set_block(
create_column_block(image_column),
text_column,
date_column,
)
@staticmethod
def build_input_choice_card(
data: Any,
card_title: str,
input_id: str,
submit_button_title: str,
choices: Sequence[tuple[str, Any]],
default_choice: Any = None,
) -> AdaptiveCard:
return MSTeamsMessageBuilder().build(
title=create_text_block(card_title, weight=TextWeight.BOLDER),
text=create_input_choice_set_block(
id=input_id, choices=choices, default_choice=default_choice
),
actions=[SubmitAction(type=ActionType.SUBMIT, title=submit_button_title, data=data)],
)
def create_issue_action_block(
self,
toggled: bool,
action: ACTION_TYPE,
action_title: str,
reverse_action: ACTION_TYPE,
reverse_action_title: str,
**card_kwargs: Any,
) -> Action:
"""
Build an action block for a particular `action` (Resolve).
It could be one of the following depending on if the state is `toggled` (Resolved issue).
If the issue is `toggled` then present a button with the `reverse_action` (Unresolve).
If it is not `toggled` then present a button which reveals a card with options to
perform the action ([Immediately, In current release, ...])
"""
if toggled:
data = self.generate_action_payload(reverse_action)
return SubmitAction(type=ActionType.SUBMIT, title=reverse_action_title, data=data)
data = self.generate_action_payload(action)
card = self.build_input_choice_card(data=data, **card_kwargs)
return ShowCardAction(type=ActionType.SHOW_CARD, title=action_title, card=card)
def get_teams_choices(self) -> Sequence[tuple[str, str]]:
teams = self.group.project.teams.all().order_by("slug")
return [("Me", ME)] + [
(team["text"], team["value"]) for team in format_actor_options_non_slack(teams)
]
def build_group_actions(self) -> ContainerBlock:
status = self.group.get_status()
resolve_action = self.create_issue_action_block(
toggled=GroupStatus.RESOLVED == status,
action=ACTION_TYPE.RESOLVE,
action_title=IssueConstants.RESOLVE,
reverse_action=ACTION_TYPE.UNRESOLVE,
reverse_action_title=IssueConstants.UNRESOLVE,
# card_kwargs
card_title=IssueConstants.RESOLVE,
submit_button_title=IssueConstants.RESOLVE,
input_id=IssueConstants.RESOLVE_INPUT_ID,
choices=IssueConstants.RESOLVE_INPUT_CHOICES,
)
ignore_action = self.create_issue_action_block(
toggled=GroupStatus.IGNORED == status,
action=ACTION_TYPE.ARCHIVE,
action_title=IssueConstants.ARCHIVE,
reverse_action=ACTION_TYPE.UNRESOLVE,
reverse_action_title=IssueConstants.UNARCHIVE,
# card_kwargs
card_title=IssueConstants.ARCHIVE_INPUT_TITLE,
submit_button_title=IssueConstants.ARCHIVE,
input_id=IssueConstants.ARCHIVE_INPUT_ID,
choices=IssueConstants.ARCHIVE_INPUT_CHOICES,
)
teams_choices = self.get_teams_choices()
assign_action = self.create_issue_action_block(
toggled=self.group.get_assignee() is not None,
action=ACTION_TYPE.ASSIGN,
action_title=IssueConstants.ASSIGN,
reverse_action=ACTION_TYPE.UNASSIGN,
reverse_action_title=IssueConstants.UNASSIGN,
# card_kwargs
card_title=IssueConstants.ASSIGN_INPUT_TITLE,
submit_button_title=IssueConstants.ASSIGN,
input_id=IssueConstants.ASSIGN_INPUT_ID,
choices=teams_choices,
default_choice=ME,
)
logger.info(
"msteams.build_group_actions",
extra={
"group_id": self.group.id,
"project_id": self.group.project.id,
"organization": self.group.project.organization.id,
"ignore_action": ignore_action,
},
)
return create_container_block(
create_action_set_block(
resolve_action,
ignore_action,
assign_action,
)
)
def build_assignee_note(self) -> TextBlock | None:
assignee = self.group.get_assignee()
if assignee:
assignee_text = format_actor_option_non_slack(assignee)["text"]
return create_text_block(
IssueConstants.ASSIGNEE_NOTE.format(assignee=assignee_text),
size=TextSize.SMALL,
)
return None
def build_group_card(self, notification_uuid: str | None = None) -> AdaptiveCard:
"""
The issue (group) card has the following components stacked vertically,
1. The issue title which links to the issue.
2. A description of the issue if it is available. (Optional)
3. A footer block, which again has 3 components stacked horizontally,
3a. The short id of the group.
3b. The alert rule(s) that fired this notification.
3c. The date and time of the event.
4. Details of the assignee if the issue is assigned to an actor. (Optional)
5. A set of three actions, resolve, ignore and assign which can
futher reveal cards with dropdowns for selecting options.
"""
# Explicit typing to satisfy mypy.
fields: list[Block | None] = [
self.build_group_descr(),
self.build_group_footer(),
self.build_assignee_note(),
self.build_group_actions(),
]
return super().build(
title=self.build_group_title(notification_uuid=notification_uuid),
fields=fields,
)
| MSTeamsIssueMessageBuilder |
python | django__django | tests/model_fields/test_jsonfield.py | {
"start": 6066,
"end": 7840
} | class ____(SimpleTestCase):
test_data = (
'[{"fields": {"value": %s}, "model": "model_fields.jsonmodel", "pk": null}]'
)
test_values = (
# (Python value, serialized value),
({"a": "b", "c": None}, '{"a": "b", "c": null}'),
("abc", '"abc"'),
('{"a": "a"}', '"{\\"a\\": \\"a\\"}"'),
)
def test_dumping(self):
for value, serialized in self.test_values:
with self.subTest(value=value):
instance = JSONModel(value=value)
data = serializers.serialize("json", [instance])
self.assertJSONEqual(data, self.test_data % serialized)
def test_loading(self):
for value, serialized in self.test_values:
with self.subTest(value=value):
instance = list(
serializers.deserialize("json", self.test_data % serialized)
)[0].object
self.assertEqual(instance.value, value)
def test_xml_serialization(self):
test_xml_data = (
'<django-objects version="1.0">'
'<object model="model_fields.nullablejsonmodel">'
'<field name="value" type="JSONField">%s'
"</field></object></django-objects>"
)
for value, serialized in self.test_values:
with self.subTest(value=value):
instance = NullableJSONModel(value=value)
data = serializers.serialize("xml", [instance], fields=["value"])
self.assertXMLEqual(data, test_xml_data % serialized)
new_instance = list(serializers.deserialize("xml", data))[0].object
self.assertEqual(new_instance.value, instance.value)
@skipUnlessDBFeature("supports_json_field")
| TestSerialization |
python | graphql-python__graphene | graphene/types/tests/test_generic.py | {
"start": 102,
"end": 2226
} | class ____(ObjectType):
generic = GenericScalar(input=GenericScalar())
def resolve_generic(self, info, input=None):
return input
schema = Schema(query=Query)
def test_generic_query_variable():
for generic_value in [
1,
1.1,
True,
"str",
[1, 2, 3],
[1.1, 2.2, 3.3],
[True, False],
["str1", "str2"],
{"key_a": "a", "key_b": "b"},
{
"int": 1,
"float": 1.1,
"boolean": True,
"string": "str",
"int_list": [1, 2, 3],
"float_list": [1.1, 2.2, 3.3],
"boolean_list": [True, False],
"string_list": ["str1", "str2"],
"nested_dict": {"key_a": "a", "key_b": "b"},
},
None,
]:
result = schema.execute(
"""query Test($generic: GenericScalar){ generic(input: $generic) }""",
variables={"generic": generic_value},
)
assert not result.errors
assert result.data == {"generic": generic_value}
def test_generic_parse_literal_query():
result = schema.execute(
"""
query {
generic(input: {
int: 1,
float: 1.1
boolean: true,
string: "str",
int_list: [1, 2, 3],
float_list: [1.1, 2.2, 3.3],
boolean_list: [true, false]
string_list: ["str1", "str2"],
nested_dict: {
key_a: "a",
key_b: "b"
},
empty_key: undefined
})
}
"""
)
assert not result.errors
assert result.data == {
"generic": {
"int": 1,
"float": 1.1,
"boolean": True,
"string": "str",
"int_list": [1, 2, 3],
"float_list": [1.1, 2.2, 3.3],
"boolean_list": [True, False],
"string_list": ["str1", "str2"],
"nested_dict": {"key_a": "a", "key_b": "b"},
"empty_key": None,
}
}
| Query |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-github/llama_index/readers/github/repository/utils.py | {
"start": 2244,
"end": 6166
} | class ____(BufferedAsyncIterator):
"""
Buffered async iterator for Git blobs.
This class is an async iterator that buffers the results of the get_blob operation.
It is used to retrieve the contents of the files in a Github repository.
getBlob endpoint supports up to 100 megabytes of content for blobs.
This concrete implementation of BufferedAsyncIterator allows you to lazily retrieve
the contents of the files in a Github repository.
Otherwise you would have to retrieve all the contents of
the files in the repository at once, which would
be problematic if the repository is large.
"""
def __init__(
self,
blobs_and_paths: List[Tuple[GitTreeResponseModel.GitTreeObject, str]],
github_client: GithubClient,
owner: str,
repo: str,
loop: asyncio.AbstractEventLoop,
buffer_size: int,
verbose: bool = False,
timeout: Optional[int] = 5,
retries: int = 0,
):
"""
Initialize params.
Args:
- blobs_and_paths (List[Tuple[GitTreeResponseModel.GitTreeObject, str]]):
List of tuples containing the blob and the path of the file.
- github_client (GithubClient): Github client.
- owner (str): Owner of the repository.
- repo (str): Name of the repository.
- loop (asyncio.AbstractEventLoop): Event loop.
- verbose (bool): Whether to print verbose messages.
- buffer_size (int): Size of the buffer.
- timeout (int or None): Timeout for the requests to the Github API. Default is 5.
- retries (int): Number of retries for requests made to the Github API. Default is 0.
"""
super().__init__(buffer_size)
self._blobs_and_paths = blobs_and_paths
self._github_client = github_client
self._owner = owner
self._repo = repo
self._verbose = verbose
self._timeout = timeout
self._retries = retries
if loop is None:
loop = asyncio.get_event_loop()
if loop is None:
raise ValueError("No event loop found")
async def _fill_buffer(self) -> None:
"""
Fill the buffer with the results of the get_blob operation.
The get_blob operation is called for each blob in the blobs_and_paths list.
The blobs are retrieved in batches of size buffer_size.
"""
del self._buffer[:]
self._buffer = []
start = self._index
end = min(start + self._buffer_size, len(self._blobs_and_paths))
if start >= end:
return
if self._verbose:
start_t = time.time()
results: List[Optional[GitBlobResponseModel]] = await asyncio.gather(
*[
self._github_client.get_blob(
self._owner,
self._repo,
blob.sha,
timeout=self._timeout,
retries=self._retries,
)
for blob, _ in self._blobs_and_paths[
start:end
] # TODO: use batch_size instead of buffer_size for concurrent requests
]
)
filtered_results = [result for result in results if result is not None]
if self._verbose:
end_t = time.time()
blob_names_and_sizes = [
(blob.path, blob.size) for blob, _ in self._blobs_and_paths[start:end]
]
print(
"Time to get blobs ("
+ f"{blob_names_and_sizes}"
+ f"): {end_t - start_t:.2f} seconds"
)
self._buffer = [
(result, path)
for result, (_, path) in zip(
filtered_results, self._blobs_and_paths[start:end]
)
]
| BufferedGitBlobDataIterator |
python | getsentry__sentry | src/sentry/api/endpoints/project_filters.py | {
"start": 658,
"end": 797
} | class ____(TypedDict):
id: str
active: bool | list[str]
@region_silo_endpoint
@extend_schema(tags=["Projects"])
| ProjectFilterResponse |
python | jazzband__django-pipeline | tests/tests/test_storage.py | {
"start": 490,
"end": 896
} | class ____(PipelineStorage):
"""Storage without an implemented path method"""
def path(self, *args):
raise NotImplementedError()
def delete(self, *args):
return
def exists(self, *args):
return True
def save(self, *args):
return
def open(self, *args):
return StringIO()
def listdir(self, *args):
return []
| PipelineNoPathStorage |
python | networkx__networkx | networkx/algorithms/tests/test_distance_regular.py | {
"start": 441,
"end": 2264
} | class ____:
def test_is_distance_regular(self):
assert nx.is_distance_regular(nx.icosahedral_graph())
assert nx.is_distance_regular(nx.petersen_graph())
assert nx.is_distance_regular(nx.cubical_graph())
assert nx.is_distance_regular(nx.complete_bipartite_graph(3, 3))
assert nx.is_distance_regular(nx.tetrahedral_graph())
assert nx.is_distance_regular(nx.dodecahedral_graph())
assert nx.is_distance_regular(nx.pappus_graph())
assert nx.is_distance_regular(nx.heawood_graph())
assert nx.is_distance_regular(nx.cycle_graph(3))
# no distance regular
assert not nx.is_distance_regular(nx.path_graph(4))
def test_not_connected(self):
G = nx.cycle_graph(4)
nx.add_cycle(G, [5, 6, 7])
assert not nx.is_distance_regular(G)
def test_global_parameters(self):
b, c = nx.intersection_array(nx.cycle_graph(5))
g = nx.global_parameters(b, c)
assert list(g) == [(0, 0, 2), (1, 0, 1), (1, 1, 0)]
b, c = nx.intersection_array(nx.cycle_graph(3))
g = nx.global_parameters(b, c)
assert list(g) == [(0, 0, 2), (1, 1, 0)]
def test_intersection_array(self):
b, c = nx.intersection_array(nx.cycle_graph(5))
assert b == [2, 1]
assert c == [1, 1]
b, c = nx.intersection_array(nx.dodecahedral_graph())
assert b == [3, 2, 1, 1, 1]
assert c == [1, 1, 1, 2, 3]
b, c = nx.intersection_array(nx.icosahedral_graph())
assert b == [5, 2, 1]
assert c == [1, 2, 5]
@pytest.mark.parametrize("f", (nx.is_distance_regular, nx.is_strongly_regular))
def test_empty_graph_raises(f):
G = nx.Graph()
with pytest.raises(nx.NetworkXPointlessConcept, match="Graph has no nodes"):
f(G)
| TestDistanceRegular |
python | plotly__plotly.py | plotly/graph_objs/choroplethmapbox/_legendgrouptitle.py | {
"start": 233,
"end": 3003
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "choroplethmapbox"
_path_str = "choroplethmapbox.legendgrouptitle"
_valid_props = {"font", "text"}
@property
def font(self):
"""
Sets this legend group's title font.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.choroplethmapbox.legendgrouptitle.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Returns
-------
plotly.graph_objs.choroplethmapbox.legendgrouptitle.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
@property
def text(self):
"""
Sets the title of the legend group.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
@property
def _prop_descriptions(self):
return """\
font
Sets this legend group's title font.
text
Sets the title of the legend group.
"""
def __init__(self, arg=None, font=None, text=None, **kwargs):
"""
Construct a new Legendgrouptitle object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.choroplethmapb
ox.Legendgrouptitle`
font
Sets this legend group's title font.
text
Sets the title of the legend group.
Returns
-------
Legendgrouptitle
"""
super().__init__("legendgrouptitle")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.choroplethmapbox.Legendgrouptitle
constructor must be a dict or
an instance of :class:`plotly.graph_objs.choroplethmapbox.Legendgrouptitle`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("font", arg, font)
self._set_property("text", arg, text)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Legendgrouptitle |
python | matplotlib__matplotlib | lib/matplotlib/tests/test_datetime.py | {
"start": 110,
"end": 32647
} | class ____:
@mpl.style.context("default")
def test_annotate(self):
mpl.rcParams["date.converter"] = 'concise'
fig, (ax1, ax2, ax3, ax4) = plt.subplots(4, 1, layout="constrained")
start_date = datetime.datetime(2023, 10, 1)
dates = [start_date + datetime.timedelta(days=i) for i in range(31)]
data = list(range(1, 32))
test_text = "Test Text"
ax1.plot(dates, data)
ax1.annotate(text=test_text, xy=(dates[15], data[15]))
ax2.plot(data, dates)
ax2.annotate(text=test_text, xy=(data[5], dates[26]))
ax3.plot(dates, dates)
ax3.annotate(text=test_text, xy=(dates[15], dates[3]))
ax4.plot(dates, dates)
ax4.annotate(text=test_text, xy=(dates[5], dates[30]),
xytext=(dates[1], dates[7]), arrowprops=dict(facecolor='red'))
@pytest.mark.xfail(reason="Test for arrow not written yet")
@mpl.style.context("default")
def test_arrow(self):
fig, ax = plt.subplots()
ax.arrow(...)
@mpl.style.context("default")
def test_axhline(self):
mpl.rcParams["date.converter"] = 'concise'
fig, (ax1, ax2, ax3) = plt.subplots(3, 1, layout='constrained')
ax1.set_ylim(bottom=datetime.datetime(2020, 4, 1),
top=datetime.datetime(2020, 8, 1))
ax2.set_ylim(bottom=np.datetime64('2005-01-01'),
top=np.datetime64('2005-04-01'))
ax3.set_ylim(bottom=datetime.datetime(2023, 9, 1),
top=datetime.datetime(2023, 11, 1))
ax1.axhline(y=datetime.datetime(2020, 6, 3), xmin=0.5, xmax=0.7)
ax2.axhline(np.datetime64('2005-02-25T03:30'), xmin=0.1, xmax=0.9)
ax3.axhline(y=datetime.datetime(2023, 10, 24), xmin=0.4, xmax=0.7)
@mpl.style.context("default")
def test_axhspan(self):
mpl.rcParams["date.converter"] = 'concise'
start_date = datetime.datetime(2023, 1, 1)
dates = [start_date + datetime.timedelta(days=i) for i in range(31)]
numbers = list(range(1, 32))
fig, (ax1, ax2, ax3) = plt.subplots(3, 1,
constrained_layout=True,
figsize=(10, 12))
ax1.plot(dates, numbers, marker='o', color='blue')
for i in range(0, 31, 2):
ax1.axhspan(ymin=i+1, ymax=i+2, facecolor='green', alpha=0.5)
ax1.set_title('Datetime vs. Number')
ax1.set_xlabel('Date')
ax1.set_ylabel('Number')
ax2.plot(numbers, dates, marker='o', color='blue')
for i in range(0, 31, 2):
ymin = start_date + datetime.timedelta(days=i)
ymax = ymin + datetime.timedelta(days=1)
ax2.axhspan(ymin=ymin, ymax=ymax, facecolor='green', alpha=0.5)
ax2.set_title('Number vs. Datetime')
ax2.set_xlabel('Number')
ax2.set_ylabel('Date')
ax3.plot(dates, dates, marker='o', color='blue')
for i in range(0, 31, 2):
ymin = start_date + datetime.timedelta(days=i)
ymax = ymin + datetime.timedelta(days=1)
ax3.axhspan(ymin=ymin, ymax=ymax, facecolor='green', alpha=0.5)
ax3.set_title('Datetime vs. Datetime')
ax3.set_xlabel('Date')
ax3.set_ylabel('Date')
@pytest.mark.xfail(reason="Test for axline not written yet")
@mpl.style.context("default")
def test_axline(self):
fig, ax = plt.subplots()
ax.axline(...)
@mpl.style.context("default")
def test_axvline(self):
mpl.rcParams["date.converter"] = 'concise'
fig, (ax1, ax2, ax3) = plt.subplots(3, 1, layout='constrained')
ax1.set_xlim(left=datetime.datetime(2020, 4, 1),
right=datetime.datetime(2020, 8, 1))
ax2.set_xlim(left=np.datetime64('2005-01-01'),
right=np.datetime64('2005-04-01'))
ax3.set_xlim(left=datetime.datetime(2023, 9, 1),
right=datetime.datetime(2023, 11, 1))
ax1.axvline(x=datetime.datetime(2020, 6, 3), ymin=0.5, ymax=0.7)
ax2.axvline(np.datetime64('2005-02-25T03:30'), ymin=0.1, ymax=0.9)
ax3.axvline(x=datetime.datetime(2023, 10, 24), ymin=0.4, ymax=0.7)
@mpl.style.context("default")
def test_axvspan(self):
mpl.rcParams["date.converter"] = 'concise'
start_date = datetime.datetime(2023, 1, 1)
dates = [start_date + datetime.timedelta(days=i) for i in range(31)]
numbers = list(range(1, 32))
fig, (ax1, ax2, ax3) = plt.subplots(3, 1,
constrained_layout=True,
figsize=(10, 12))
ax1.plot(dates, numbers, marker='o', color='blue')
for i in range(0, 31, 2):
xmin = start_date + datetime.timedelta(days=i)
xmax = xmin + datetime.timedelta(days=1)
ax1.axvspan(xmin=xmin, xmax=xmax, facecolor='red', alpha=0.5)
ax1.set_title('Datetime vs. Number')
ax1.set_xlabel('Date')
ax1.set_ylabel('Number')
ax2.plot(numbers, dates, marker='o', color='blue')
for i in range(0, 31, 2):
ax2.axvspan(xmin=i+1, xmax=i+2, facecolor='red', alpha=0.5)
ax2.set_title('Number vs. Datetime')
ax2.set_xlabel('Number')
ax2.set_ylabel('Date')
ax3.plot(dates, dates, marker='o', color='blue')
for i in range(0, 31, 2):
xmin = start_date + datetime.timedelta(days=i)
xmax = xmin + datetime.timedelta(days=1)
ax3.axvspan(xmin=xmin, xmax=xmax, facecolor='red', alpha=0.5)
ax3.set_title('Datetime vs. Datetime')
ax3.set_xlabel('Date')
ax3.set_ylabel('Date')
@mpl.style.context("default")
def test_bar(self):
mpl.rcParams["date.converter"] = "concise"
fig, (ax1, ax2) = plt.subplots(2, 1, layout="constrained")
x_dates = np.array(
[
datetime.datetime(2020, 6, 30),
datetime.datetime(2020, 7, 22),
datetime.datetime(2020, 8, 3),
datetime.datetime(2020, 9, 14),
],
dtype=np.datetime64,
)
x_ranges = [8800, 2600, 8500, 7400]
x = np.datetime64(datetime.datetime(2020, 6, 1))
ax1.bar(x_dates, x_ranges, width=np.timedelta64(4, "D"))
ax2.bar(np.arange(4), x_dates - x, bottom=x)
@mpl.style.context("default")
def test_bar_label(self):
# Generate some example data with dateTime inputs
date_list = [datetime.datetime(2023, 1, 1) +
datetime.timedelta(days=i) for i in range(5)]
values = [10, 20, 15, 25, 30]
# Creating the plot
fig, ax = plt.subplots(1, 1, figsize=(10, 8), layout='constrained')
bars = ax.bar(date_list, values)
# Add labels to the bars using bar_label
ax.bar_label(bars, labels=[f'{val}%' for val in values],
label_type='edge', color='black')
@mpl.style.context("default")
def test_barbs(self):
plt.rcParams["date.converter"] = 'concise'
start_date = datetime.datetime(2022, 2, 8, 22)
dates = [start_date + datetime.timedelta(hours=i) for i in range(12)]
numbers = np.sin(np.linspace(0, 2 * np.pi, 12))
u = np.ones(12) * 10
v = np.arange(0, 120, 10)
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(12, 6))
axes[0].barbs(dates, numbers, u, v, length=7)
axes[0].set_title('Datetime vs. Numeric Data')
axes[0].set_xlabel('Datetime')
axes[0].set_ylabel('Numeric Data')
axes[1].barbs(numbers, dates, u, v, length=7)
axes[1].set_title('Numeric vs. Datetime Data')
axes[1].set_xlabel('Numeric Data')
axes[1].set_ylabel('Datetime')
@mpl.style.context("default")
def test_barh(self):
mpl.rcParams["date.converter"] = 'concise'
fig, (ax1, ax2) = plt.subplots(2, 1, layout='constrained')
birth_date = np.array([datetime.datetime(2020, 4, 10),
datetime.datetime(2020, 5, 30),
datetime.datetime(2020, 10, 12),
datetime.datetime(2020, 11, 15)])
year_start = datetime.datetime(2020, 1, 1)
year_end = datetime.datetime(2020, 12, 31)
age = [21, 53, 20, 24]
ax1.set_xlabel('Age')
ax1.set_ylabel('Birth Date')
ax1.barh(birth_date, width=age, height=datetime.timedelta(days=10))
ax2.set_xlim(left=year_start, right=year_end)
ax2.set_xlabel('Birth Date')
ax2.set_ylabel('Order of Birth Dates')
ax2.barh(np.arange(4), birth_date-year_start, left=year_start)
@pytest.mark.xfail(reason="Test for boxplot not written yet")
@mpl.style.context("default")
def test_boxplot(self):
fig, ax = plt.subplots()
ax.boxplot(...)
@mpl.style.context("default")
def test_broken_barh(self):
# Horizontal bar plot with gaps
mpl.rcParams["date.converter"] = 'concise'
fig, ax = plt.subplots()
ax.broken_barh([(datetime.datetime(2023, 1, 4), datetime.timedelta(days=2)),
(datetime.datetime(2023, 1, 8), datetime.timedelta(days=3))],
(10, 9), facecolors='tab:blue')
ax.broken_barh([(datetime.datetime(2023, 1, 2), datetime.timedelta(days=1)),
(datetime.datetime(2023, 1, 4), datetime.timedelta(days=4))],
(20, 9), facecolors=('tab:red'))
@mpl.style.context("default")
def test_bxp(self):
mpl.rcParams["date.converter"] = 'concise'
fig, ax = plt.subplots()
data = [{
"med": datetime.datetime(2020, 1, 15),
"q1": datetime.datetime(2020, 1, 10),
"q3": datetime.datetime(2020, 1, 20),
"whislo": datetime.datetime(2020, 1, 5),
"whishi": datetime.datetime(2020, 1, 25),
"fliers": [
datetime.datetime(2020, 1, 3),
datetime.datetime(2020, 1, 27)
]
}]
ax.bxp(data, orientation='horizontal')
ax.xaxis.set_major_formatter(mpl.dates.DateFormatter("%Y-%m-%d"))
ax.set_title('Box plot with datetime data')
@pytest.mark.xfail(reason="Test for clabel not written yet")
@mpl.style.context("default")
def test_clabel(self):
fig, ax = plt.subplots()
ax.clabel(...)
@mpl.style.context("default")
def test_contour(self):
mpl.rcParams["date.converter"] = "concise"
range_threshold = 10
fig, (ax1, ax2, ax3) = plt.subplots(3, 1, layout="constrained")
x_dates = np.array(
[datetime.datetime(2023, 10, delta) for delta in range(1, range_threshold)]
)
y_dates = np.array(
[datetime.datetime(2023, 10, delta) for delta in range(1, range_threshold)]
)
x_ranges = np.array(range(1, range_threshold))
y_ranges = np.array(range(1, range_threshold))
X_dates, Y_dates = np.meshgrid(x_dates, y_dates)
X_ranges, Y_ranges = np.meshgrid(x_ranges, y_ranges)
Z_ranges = np.cos(X_ranges / 4) + np.sin(Y_ranges / 4)
ax1.contour(X_dates, Y_dates, Z_ranges)
ax2.contour(X_dates, Y_ranges, Z_ranges)
ax3.contour(X_ranges, Y_dates, Z_ranges)
@mpl.style.context("default")
def test_contourf(self):
mpl.rcParams["date.converter"] = "concise"
range_threshold = 10
fig, (ax1, ax2, ax3) = plt.subplots(3, 1, layout="constrained")
x_dates = np.array(
[datetime.datetime(2023, 10, delta) for delta in range(1, range_threshold)]
)
y_dates = np.array(
[datetime.datetime(2023, 10, delta) for delta in range(1, range_threshold)]
)
x_ranges = np.array(range(1, range_threshold))
y_ranges = np.array(range(1, range_threshold))
X_dates, Y_dates = np.meshgrid(x_dates, y_dates)
X_ranges, Y_ranges = np.meshgrid(x_ranges, y_ranges)
Z_ranges = np.cos(X_ranges / 4) + np.sin(Y_ranges / 4)
ax1.contourf(X_dates, Y_dates, Z_ranges)
ax2.contourf(X_dates, Y_ranges, Z_ranges)
ax3.contourf(X_ranges, Y_dates, Z_ranges)
@mpl.style.context("default")
def test_errorbar(self):
mpl.rcParams["date.converter"] = "concise"
fig, (ax1, ax2, ax3, ax4) = plt.subplots(4, 1, layout="constrained")
limit = 7
start_date = datetime.datetime(2023, 1, 1)
x_dates = np.array([datetime.datetime(2023, 10, d) for d in range(1, limit)])
y_dates = np.array([datetime.datetime(2023, 10, d) for d in range(1, limit)])
x_date_error = datetime.timedelta(days=1)
y_date_error = datetime.timedelta(days=1)
x_values = list(range(1, limit))
y_values = list(range(1, limit))
x_value_error = 0.5
y_value_error = 0.5
ax1.errorbar(x_dates, y_values,
yerr=y_value_error,
capsize=10,
barsabove=True,
label='Data')
ax2.errorbar(x_values, y_dates,
xerr=x_value_error, yerr=y_date_error,
errorevery=(1, 2),
fmt='-o', label='Data')
ax3.errorbar(x_dates, y_dates,
xerr=x_date_error, yerr=y_date_error,
lolims=True, xlolims=True,
label='Data')
ax4.errorbar(x_dates, y_values,
xerr=x_date_error, yerr=y_value_error,
uplims=True, xuplims=True,
label='Data')
@mpl.style.context("default")
def test_eventplot(self):
mpl.rcParams["date.converter"] = "concise"
fig, (ax1, ax2, ax3) = plt.subplots(3, 1, layout="constrained")
x_dates1 = np.array([datetime.datetime(2020, 6, 30),
datetime.datetime(2020, 7, 22),
datetime.datetime(2020, 8, 3),
datetime.datetime(2020, 9, 14),],
dtype=np.datetime64,
)
ax1.eventplot(x_dates1)
np.random.seed(19680801)
start_date = datetime.datetime(2020, 7, 1)
end_date = datetime.datetime(2020, 10, 15)
date_range = end_date - start_date
dates1 = start_date + np.random.rand(30) * date_range
dates2 = start_date + np.random.rand(10) * date_range
dates3 = start_date + np.random.rand(50) * date_range
colors1 = ['C1', 'C2', 'C3']
lineoffsets1 = np.array([1, 6, 8])
linelengths1 = [5, 2, 3]
ax2.eventplot([dates1, dates2, dates3],
colors=colors1,
lineoffsets=lineoffsets1,
linelengths=linelengths1)
lineoffsets2 = np.array([
datetime.datetime(2020, 7, 1),
datetime.datetime(2020, 7, 15),
datetime.datetime(2020, 8, 1)
], dtype=np.datetime64)
ax3.eventplot([dates1, dates2, dates3],
colors=colors1,
lineoffsets=lineoffsets2,
linelengths=linelengths1)
@mpl.style.context("default")
def test_fill(self):
mpl.rcParams["date.converter"] = "concise"
fig, (ax1, ax2, ax3, ax4) = plt.subplots(4, 1, layout="constrained")
np.random.seed(19680801)
x_base_date = datetime.datetime(2023, 1, 1)
x_dates = [x_base_date]
for _ in range(1, 5):
x_base_date += datetime.timedelta(days=np.random.randint(1, 5))
x_dates.append(x_base_date)
y_base_date = datetime.datetime(2023, 1, 1)
y_dates = [y_base_date]
for _ in range(1, 5):
y_base_date += datetime.timedelta(days=np.random.randint(1, 5))
y_dates.append(y_base_date)
x_values = np.random.rand(5) * 5
y_values = np.random.rand(5) * 5 - 2
ax1.fill(x_dates, y_values)
ax2.fill(x_values, y_dates)
ax3.fill(x_values, y_values)
ax4.fill(x_dates, y_dates)
@mpl.style.context("default")
def test_fill_between(self):
mpl.rcParams["date.converter"] = "concise"
np.random.seed(19680801)
y_base_date = datetime.datetime(2023, 1, 1)
y_dates1 = [y_base_date]
for i in range(1, 10):
y_base_date += datetime.timedelta(days=np.random.randint(1, 5))
y_dates1.append(y_base_date)
y_dates2 = [y_base_date]
for i in range(1, 10):
y_base_date += datetime.timedelta(days=np.random.randint(1, 5))
y_dates2.append(y_base_date)
x_values = np.random.rand(10) * 10
x_values.sort()
y_values1 = np.random.rand(10) * 10
y_values2 = y_values1 + np.random.rand(10) * 10
y_values1.sort()
y_values2.sort()
x_base_date = datetime.datetime(2023, 1, 1)
x_dates = [x_base_date]
for i in range(1, 10):
x_base_date += datetime.timedelta(days=np.random.randint(1, 10))
x_dates.append(x_base_date)
fig, (ax1, ax2, ax3) = plt.subplots(3, 1, layout="constrained")
ax1.fill_between(x_values, y_dates1, y_dates2)
ax2.fill_between(x_dates, y_values1, y_values2)
ax3.fill_between(x_dates, y_dates1, y_dates2)
@mpl.style.context("default")
def test_fill_betweenx(self):
mpl.rcParams["date.converter"] = "concise"
np.random.seed(19680801)
x_base_date = datetime.datetime(2023, 1, 1)
x_dates1 = [x_base_date]
for i in range(1, 10):
x_base_date += datetime.timedelta(days=np.random.randint(1, 5))
x_dates1.append(x_base_date)
x_dates2 = [x_base_date]
for i in range(1, 10):
x_base_date += datetime.timedelta(days=np.random.randint(1, 5))
x_dates2.append(x_base_date)
y_values = np.random.rand(10) * 10
y_values.sort()
x_values1 = np.random.rand(10) * 10
x_values2 = x_values1 + np.random.rand(10) * 10
x_values1.sort()
x_values2.sort()
y_base_date = datetime.datetime(2023, 1, 1)
y_dates = [y_base_date]
for i in range(1, 10):
y_base_date += datetime.timedelta(days=np.random.randint(1, 10))
y_dates.append(y_base_date)
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, layout="constrained")
ax1.fill_betweenx(y_values, x_dates1, x_dates2)
ax2.fill_betweenx(y_dates, x_values1, x_values2)
ax3.fill_betweenx(y_dates, x_dates1, x_dates2)
@pytest.mark.xfail(reason="Test for hexbin not written yet")
@mpl.style.context("default")
def test_hexbin(self):
fig, ax = plt.subplots()
ax.hexbin(...)
@mpl.style.context("default")
def test_hist(self):
mpl.rcParams["date.converter"] = 'concise'
start_date = datetime.datetime(2023, 10, 1)
time_delta = datetime.timedelta(days=1)
values1 = np.random.randint(1, 10, 30)
values2 = np.random.randint(1, 10, 30)
values3 = np.random.randint(1, 10, 30)
bin_edges = [start_date + i * time_delta for i in range(31)]
fig, (ax1, ax2, ax3) = plt.subplots(3, 1, constrained_layout=True)
ax1.hist(
[start_date + i * time_delta for i in range(30)],
bins=10,
weights=values1
)
ax2.hist(
[start_date + i * time_delta for i in range(30)],
bins=10,
weights=values2
)
ax3.hist(
[start_date + i * time_delta for i in range(30)],
bins=10,
weights=values3
)
fig, (ax4, ax5, ax6) = plt.subplots(3, 1, constrained_layout=True)
ax4.hist(
[start_date + i * time_delta for i in range(30)],
bins=bin_edges,
weights=values1
)
ax5.hist(
[start_date + i * time_delta for i in range(30)],
bins=bin_edges,
weights=values2
)
ax6.hist(
[start_date + i * time_delta for i in range(30)],
bins=bin_edges,
weights=values3
)
@pytest.mark.xfail(reason="Test for hist2d not written yet")
@mpl.style.context("default")
def test_hist2d(self):
fig, ax = plt.subplots()
ax.hist2d(...)
@mpl.style.context("default")
def test_hlines(self):
mpl.rcParams["date.converter"] = 'concise'
fig, axs = plt.subplots(2, 4, layout='constrained')
dateStrs = ['2023-03-08',
'2023-04-09',
'2023-05-13',
'2023-07-28',
'2023-12-24']
dates = [datetime.datetime(2023, m*2, 10) for m in range(1, 6)]
date_start = [datetime.datetime(2023, 6, d) for d in range(5, 30, 5)]
date_end = [datetime.datetime(2023, 7, d) for d in range(5, 30, 5)]
npDates = [np.datetime64(s) for s in dateStrs]
axs[0, 0].hlines(y=dates,
xmin=[0.1, 0.2, 0.3, 0.4, 0.5],
xmax=[0.5, 0.6, 0.7, 0.8, 0.9])
axs[0, 1].hlines(dates,
xmin=datetime.datetime(2020, 5, 10),
xmax=datetime.datetime(2020, 5, 31))
axs[0, 2].hlines(dates,
xmin=date_start,
xmax=date_end)
axs[0, 3].hlines(dates,
xmin=0.45,
xmax=0.65)
axs[1, 0].hlines(y=npDates,
xmin=[0.5, 0.6, 0.7, 0.8, 0.9],
xmax=[0.1, 0.2, 0.3, 0.4, 0.5])
axs[1, 2].hlines(y=npDates,
xmin=date_start,
xmax=date_end)
axs[1, 1].hlines(npDates,
xmin=datetime.datetime(2020, 5, 10),
xmax=datetime.datetime(2020, 5, 31))
axs[1, 3].hlines(npDates,
xmin=0.45,
xmax=0.65)
@mpl.style.context("default")
def test_imshow(self):
fig, ax = plt.subplots()
a = np.diag(range(5))
dt_start = datetime.datetime(2010, 11, 1)
dt_end = datetime.datetime(2010, 11, 11)
extent = (dt_start, dt_end, dt_start, dt_end)
ax.imshow(a, extent=extent)
ax.tick_params(axis="x", labelrotation=90)
@pytest.mark.xfail(reason="Test for loglog not written yet")
@mpl.style.context("default")
def test_loglog(self):
fig, ax = plt.subplots()
ax.loglog(...)
@mpl.style.context("default")
def test_matshow(self):
a = np.diag(range(5))
dt_start = datetime.datetime(1980, 4, 15)
dt_end = datetime.datetime(2020, 11, 11)
extent = (dt_start, dt_end, dt_start, dt_end)
fig, ax = plt.subplots()
ax.matshow(a, extent=extent)
for label in ax.get_xticklabels():
label.set_rotation(90)
@pytest.mark.xfail(reason="Test for pcolor not written yet")
@mpl.style.context("default")
def test_pcolor(self):
fig, ax = plt.subplots()
ax.pcolor(...)
@pytest.mark.xfail(reason="Test for pcolorfast not written yet")
@mpl.style.context("default")
def test_pcolorfast(self):
fig, ax = plt.subplots()
ax.pcolorfast(...)
@pytest.mark.xfail(reason="Test for pcolormesh not written yet")
@mpl.style.context("default")
def test_pcolormesh(self):
fig, ax = plt.subplots()
ax.pcolormesh(...)
@mpl.style.context("default")
def test_plot(self):
mpl.rcParams["date.converter"] = 'concise'
N = 6
fig, (ax1, ax2, ax3) = plt.subplots(3, 1, layout='constrained')
x = np.array([datetime.datetime(2023, 9, n) for n in range(1, N)])
ax1.plot(x, range(1, N))
ax2.plot(range(1, N), x)
ax3.plot(x, x)
@pytest.mark.xfail(reason="Test for quiver not written yet")
@mpl.style.context("default")
def test_quiver(self):
fig, ax = plt.subplots()
ax.quiver(...)
@mpl.style.context("default")
def test_scatter(self):
mpl.rcParams["date.converter"] = 'concise'
base = datetime.datetime(2005, 2, 1)
dates = [base + datetime.timedelta(hours=(2 * i)) for i in range(10)]
N = len(dates)
np.random.seed(19680801)
y = np.cumsum(np.random.randn(N))
fig, axs = plt.subplots(3, 1, layout='constrained', figsize=(6, 6))
# datetime array on x axis
axs[0].scatter(dates, y)
for label in axs[0].get_xticklabels():
label.set_rotation(40)
label.set_horizontalalignment('right')
# datetime on y axis
axs[1].scatter(y, dates)
# datetime on both x, y axes
axs[2].scatter(dates, dates)
for label in axs[2].get_xticklabels():
label.set_rotation(40)
label.set_horizontalalignment('right')
@pytest.mark.xfail(reason="Test for semilogx not written yet")
@mpl.style.context("default")
def test_semilogx(self):
fig, ax = plt.subplots()
ax.semilogx(...)
@pytest.mark.xfail(reason="Test for semilogy not written yet")
@mpl.style.context("default")
def test_semilogy(self):
fig, ax = plt.subplots()
ax.semilogy(...)
@mpl.style.context("default")
def test_stackplot(self):
mpl.rcParams["date.converter"] = 'concise'
N = 10
stacked_nums = np.tile(np.arange(1, N), (4, 1))
dates = np.array([datetime.datetime(2020 + i, 1, 1) for i in range(N - 1)])
fig, ax = plt.subplots(layout='constrained')
ax.stackplot(dates, stacked_nums)
@mpl.style.context("default")
def test_stairs(self):
mpl.rcParams["date.converter"] = 'concise'
start_date = datetime.datetime(2023, 12, 1)
time_delta = datetime.timedelta(days=1)
baseline_date = datetime.datetime(1980, 1, 1)
bin_edges = [start_date + i * time_delta for i in range(31)]
edge_int = np.arange(31)
np.random.seed(123456)
values1 = np.random.randint(1, 100, 30)
values2 = [start_date + datetime.timedelta(days=int(i))
for i in np.random.randint(1, 10000, 30)]
values3 = [start_date + datetime.timedelta(days=int(i))
for i in np.random.randint(-10000, 10000, 30)]
fig, (ax1, ax2, ax3) = plt.subplots(3, 1, constrained_layout=True)
ax1.stairs(values1, edges=bin_edges)
ax2.stairs(values2, edges=edge_int, baseline=baseline_date)
ax3.stairs(values3, edges=bin_edges, baseline=baseline_date)
@mpl.style.context("default")
def test_stem(self):
mpl.rcParams["date.converter"] = "concise"
fig, (ax1, ax2, ax3, ax4, ax5, ax6) = plt.subplots(6, 1, layout="constrained")
limit_value = 10
above = datetime.datetime(2023, 9, 18)
below = datetime.datetime(2023, 11, 18)
x_ranges = np.arange(1, limit_value)
y_ranges = np.arange(1, limit_value)
x_dates = np.array(
[datetime.datetime(2023, 10, n) for n in range(1, limit_value)]
)
y_dates = np.array(
[datetime.datetime(2023, 10, n) for n in range(1, limit_value)]
)
ax1.stem(x_dates, y_dates, bottom=above)
ax2.stem(x_dates, y_ranges, bottom=5)
ax3.stem(x_ranges, y_dates, bottom=below)
ax4.stem(x_ranges, y_dates, orientation="horizontal", bottom=above)
ax5.stem(x_dates, y_ranges, orientation="horizontal", bottom=5)
ax6.stem(x_ranges, y_dates, orientation="horizontal", bottom=below)
@mpl.style.context("default")
def test_step(self):
mpl.rcParams["date.converter"] = "concise"
N = 6
fig, (ax1, ax2, ax3) = plt.subplots(3, 1, layout='constrained')
x = np.array([datetime.datetime(2023, 9, n) for n in range(1, N)])
ax1.step(x, range(1, N))
ax2.step(range(1, N), x)
ax3.step(x, x)
@pytest.mark.xfail(reason="Test for streamplot not written yet")
@mpl.style.context("default")
def test_streamplot(self):
fig, ax = plt.subplots()
ax.streamplot(...)
@mpl.style.context("default")
def test_text(self):
mpl.rcParams["date.converter"] = 'concise'
fig, (ax1, ax2, ax3) = plt.subplots(3, 1, layout="constrained")
limit_value = 10
font_properties = {'family': 'serif', 'size': 12, 'weight': 'bold'}
test_date = datetime.datetime(2023, 10, 1)
x_data = np.array(range(1, limit_value))
y_data = np.array(range(1, limit_value))
x_dates = np.array(
[datetime.datetime(2023, 10, n) for n in range(1, limit_value)]
)
y_dates = np.array(
[datetime.datetime(2023, 10, n) for n in range(1, limit_value)]
)
ax1.plot(x_dates, y_data)
ax1.text(test_date, 5, "Inserted Text", **font_properties)
ax2.plot(x_data, y_dates)
ax2.text(7, test_date, "Inserted Text", **font_properties)
ax3.plot(x_dates, y_dates)
ax3.text(test_date, test_date, "Inserted Text", **font_properties)
@pytest.mark.xfail(reason="Test for tricontour not written yet")
@mpl.style.context("default")
def test_tricontour(self):
fig, ax = plt.subplots()
ax.tricontour(...)
@pytest.mark.xfail(reason="Test for tricontourf not written yet")
@mpl.style.context("default")
def test_tricontourf(self):
fig, ax = plt.subplots()
ax.tricontourf(...)
@pytest.mark.xfail(reason="Test for tripcolor not written yet")
@mpl.style.context("default")
def test_tripcolor(self):
fig, ax = plt.subplots()
ax.tripcolor(...)
@pytest.mark.xfail(reason="Test for triplot not written yet")
@mpl.style.context("default")
def test_triplot(self):
fig, ax = plt.subplots()
ax.triplot(...)
@pytest.mark.parametrize("orientation", ["vertical", "horizontal"])
@mpl.style.context("default")
def test_violin(self, orientation):
fig, ax = plt.subplots()
datetimes = [
datetime.datetime(2023, 2, 10),
datetime.datetime(2023, 5, 18),
datetime.datetime(2023, 6, 6)
]
ax.violin(
[
{
'coords': datetimes,
'vals': [0.1, 0.5, 0.2],
'mean': datetimes[1],
'median': datetimes[1],
'min': datetimes[0],
'max': datetimes[-1],
'quantiles': datetimes
}
],
orientation=orientation,
# TODO: It should be possible for positions to be datetimes too
# https://github.com/matplotlib/matplotlib/issues/30417
# positions=[datetime.datetime(2020, 1, 1)]
)
@pytest.mark.xfail(reason="Test for violinplot not written yet")
@mpl.style.context("default")
def test_violinplot(self):
fig, ax = plt.subplots()
ax.violinplot(...)
@mpl.style.context("default")
def test_vlines(self):
mpl.rcParams["date.converter"] = 'concise'
fig, (ax1, ax2, ax3) = plt.subplots(3, 1, layout='constrained')
ax1.set_xlim(left=datetime.datetime(2023, 1, 1),
right=datetime.datetime(2023, 6, 30))
ax1.vlines(x=[datetime.datetime(2023, 2, 10),
datetime.datetime(2023, 5, 18),
datetime.datetime(2023, 6, 6)],
ymin=[0, 0.25, 0.5],
ymax=[0.25, 0.5, 0.75])
ax2.set_xlim(left=0,
right=0.5)
ax2.vlines(x=[0.3, 0.35],
ymin=[np.datetime64('2023-03-20'), np.datetime64('2023-03-31')],
ymax=[np.datetime64('2023-05-01'), np.datetime64('2023-05-16')])
ax3.set_xlim(left=datetime.datetime(2023, 7, 1),
right=datetime.datetime(2023, 12, 31))
ax3.vlines(x=[datetime.datetime(2023, 9, 1), datetime.datetime(2023, 12, 10)],
ymin=datetime.datetime(2023, 1, 15),
ymax=datetime.datetime(2023, 1, 30))
| TestDatetimePlotting |
python | huggingface__transformers | src/transformers/models/fastspeech2_conformer/modeling_fastspeech2_conformer.py | {
"start": 59650,
"end": 64537
} | class ____(PreTrainedModel):
config: FastSpeech2ConformerHifiGanConfig
main_input_name = "spectrogram"
def __init__(self, config: FastSpeech2ConformerHifiGanConfig):
super().__init__(config)
self.num_kernels = len(config.resblock_kernel_sizes)
self.num_upsamples = len(config.upsample_rates)
self.conv_pre = nn.Conv1d(
config.model_in_dim,
config.upsample_initial_channel,
kernel_size=7,
stride=1,
padding=3,
)
self.upsampler = nn.ModuleList()
for i, (upsample_rate, kernel_size) in enumerate(zip(config.upsample_rates, config.upsample_kernel_sizes)):
self.upsampler.append(
nn.ConvTranspose1d(
config.upsample_initial_channel // (2**i),
config.upsample_initial_channel // (2 ** (i + 1)),
kernel_size=kernel_size,
stride=upsample_rate,
padding=(kernel_size - upsample_rate) // 2,
)
)
self.resblocks = nn.ModuleList()
for i in range(len(self.upsampler)):
channels = config.upsample_initial_channel // (2 ** (i + 1))
for kernel_size, dilation in zip(config.resblock_kernel_sizes, config.resblock_dilation_sizes):
self.resblocks.append(HifiGanResidualBlock(channels, kernel_size, dilation, config.leaky_relu_slope))
self.conv_post = nn.Conv1d(channels, 1, kernel_size=7, stride=1, padding=3)
self.register_buffer("mean", torch.zeros(config.model_in_dim))
self.register_buffer("scale", torch.ones(config.model_in_dim))
# Initialize weights and apply final processing
self.post_init()
def apply_weight_norm(self):
weight_norm = nn.utils.weight_norm
if hasattr(nn.utils.parametrizations, "weight_norm"):
weight_norm = nn.utils.parametrizations.weight_norm
weight_norm(self.conv_pre)
for layer in self.upsampler:
weight_norm(layer)
for layer in self.resblocks:
layer.apply_weight_norm()
weight_norm(self.conv_post)
def remove_weight_norm(self):
nn.utils.remove_weight_norm(self.conv_pre)
for layer in self.upsampler:
nn.utils.remove_weight_norm(layer)
for layer in self.resblocks:
layer.remove_weight_norm()
nn.utils.remove_weight_norm(self.conv_post)
@auto_docstring(
custom_intro="""
Converts a log-mel spectrogram into a speech waveform. Passing a batch of log-mel spectrograms returns a batch
of speech waveforms. Passing a single, un-batched log-mel spectrogram returns a single, un-batched speech
waveform.
"""
)
def forward(self, spectrogram: torch.FloatTensor) -> torch.FloatTensor:
r"""
spectrogram (`torch.FloatTensor`):
Tensor containing the log-mel spectrograms. Can be batched and of shape `(batch_size, sequence_length,
config.model_in_dim)`, or un-batched and of shape `(sequence_length, config.model_in_dim)`.
Returns:
`torch.FloatTensor`: Tensor containing the speech waveform. If the input spectrogram is batched, will be of
shape `(batch_size, num_frames,)`. If un-batched, will be of shape `(num_frames,)`.
"""
if self.config.normalize_before:
spectrogram = (spectrogram - self.mean) / self.scale
is_batched = spectrogram.dim() == 3
if not is_batched:
spectrogram = spectrogram.unsqueeze(0)
hidden_states = spectrogram.transpose(2, 1)
hidden_states = self.conv_pre(hidden_states)
for i in range(self.num_upsamples):
hidden_states = nn.functional.leaky_relu(hidden_states, self.config.leaky_relu_slope)
hidden_states = self.upsampler[i](hidden_states)
res_state = self.resblocks[i * self.num_kernels](hidden_states)
for j in range(1, self.num_kernels):
res_state += self.resblocks[i * self.num_kernels + j](hidden_states)
hidden_states = res_state / self.num_kernels
hidden_states = nn.functional.leaky_relu(hidden_states)
hidden_states = self.conv_post(hidden_states)
hidden_states = torch.tanh(hidden_states)
if not is_batched:
# remove batch dim and collapse tensor to 1-d audio waveform
waveform = hidden_states.squeeze(0).transpose(1, 0).view(-1)
else:
# remove seq-len dim since this collapses to 1
waveform = hidden_states.squeeze(1)
return waveform
@auto_docstring(
custom_intro="""
The FastSpeech2ConformerModel with a FastSpeech2ConformerHifiGan vocoder head that performs text-to-speech (waveform).
"""
)
| FastSpeech2ConformerHifiGan |
python | ray-project__ray | python/ray/serve/tests/test_model_composition.py | {
"start": 5504,
"end": 5677
} | class ____:
def __init__(self, child):
self._child = child
async def __call__(self, *args):
return await self._child.remote()
@serve.deployment
| Parent |
python | keon__algorithms | algorithms/graph/strongly_connected_components_kosaraju.py | {
"start": 145,
"end": 1909
} | class ____:
"""
Kosaraju's algorithm use depth first search approach to find strongly connected components in a directed graph.
Approach:
1. Make a DFS call to keep track of finish time of each vertex.
2. Tranpose the original graph. ie 1->2 transpose is 1<-2
3. Make another DFS call to calculate strongly connected components.
"""
def dfs(self, i, V, adj, visited, stk):
visited[i] = 1
for x in adj[i]:
if visited[x] == -1:
self.dfs(x, V, adj, visited, stk)
stk.append(i)
def kosaraju(self, V, adj):
stk, visited = [], [-1]*(V+1)
for i in range(V):
if visited[i] == -1:
self.dfs(i, V, adj, visited, stk)
stk.reverse()
res = stk.copy()
ans, visited1 = 0, [-1]*(V+1)
adj1 = [[] for x in range(V)]
for i in range(len(adj)):
for x in adj[i]:
adj1[x].append(i)
for i in range(len(res)):
if visited1[res[i]] == -1:
ans += 1
self.dfs(res[i], V, adj1, visited1, stk)
return ans
def main():
"""
Let's look at the sample input.
6 7 #no of vertex, no of edges
0 2 #directed edge 0->2
1 0
2 3
3 1
3 4
4 5
5 4
calculating no of strongly connected compnenets in a directed graph.
answer should be: 2
1st strong component: 0->2->3->1->0
2nd strongly connected component: 4->5->4
"""
V, E = map(int, input().split())
adj = [[] for x in range(V)]
for i in range(E):
u, v = map(int, input().split())
adj[u].append(v)
print(Kosaraju().kosaraju(V, adj))
if __name__ == '__main__':
main()
| Kosaraju |
python | wandb__wandb | wandb/sdk/internal/_generated/server_features_query.py | {
"start": 335,
"end": 453
} | class ____(GQLResult):
features: List[Optional[ServerFeaturesQueryServerInfoFeatures]]
| ServerFeaturesQueryServerInfo |
python | huggingface__transformers | src/transformers/models/qwen2_vl/modeling_qwen2_vl.py | {
"start": 20750,
"end": 25183
} | class ____(nn.Module):
"""
Multi-headed attention from 'Attention Is All You Need' paper. Modified to use sliding window attention: Longformer
and "Generating Long Sequences with Sparse Transformers".
"""
def __init__(self, config: Qwen2VLTextConfig, layer_idx: Optional[int] = None):
super().__init__()
self.config = config
self.layer_idx = layer_idx
if layer_idx is None:
logger.warning_once(
f"Instantiating {self.__class__.__name__} without passing `layer_idx` is not recommended and will "
"to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` "
"when creating this class."
)
self.hidden_size = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = self.hidden_size // self.num_heads
self.num_key_value_heads = config.num_key_value_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.is_causal = True
self.attention_dropout = config.attention_dropout
self.rope_parameters = config.rope_parameters
self.scaling = self.head_dim**-0.5
if (self.head_dim * self.num_heads) != self.hidden_size:
raise ValueError(
f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
f" and `num_heads`: {self.num_heads})."
)
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=True)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=True)
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=True)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
self.layer_type = config.layer_types[layer_idx] if hasattr(config, "layer_types") else None
self.sliding_window = config.sliding_window if self.layer_type == "sliding_attention" else None
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
output_attentions: bool = False,
use_cache: bool = False,
cache_position: Optional[torch.LongTensor] = None,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
bsz, q_len, _ = hidden_states.size()
query_states = self.q_proj(hidden_states)
key_states = self.k_proj(hidden_states)
value_states = self.v_proj(hidden_states)
query_states = query_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2)
key_states = key_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2)
value_states = value_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_multimodal_rotary_pos_emb(
query_states, key_states, cos, sin, self.config.rope_parameters["mrope_section"]
)
if past_key_values is not None:
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} # Specific to RoPE models
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
sliding_window=self.sliding_window,
position_ids=position_ids, # pass positions for FA2
**kwargs,
)
attn_output = attn_output.reshape(bsz, q_len, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
| Qwen2VLAttention |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/hooks/lambda_function.py | {
"start": 1112,
"end": 9520
} | class ____(AwsBaseHook):
"""
Interact with AWS Lambda.
Provide thin wrapper around :external+boto3:py:class:`boto3.client("lambda") <Lambda.Client>`.
Additional arguments (such as ``aws_conn_id``) may be specified and
are passed down to the underlying AwsBaseHook.
.. seealso::
- :class:`airflow.providers.amazon.aws.hooks.base_aws.AwsBaseHook`
"""
def __init__(self, *args, **kwargs) -> None:
kwargs["client_type"] = "lambda"
super().__init__(*args, **kwargs)
def invoke_lambda(
self,
*,
function_name: str,
invocation_type: str | None = None,
log_type: str | None = None,
client_context: str | None = None,
payload: bytes | str | None = None,
qualifier: str | None = None,
):
"""
Invoke Lambda Function.
.. seealso::
- :external+boto3:py:meth:`Lambda.Client.invoke`
:param function_name: AWS Lambda Function Name
:param invocation_type: AWS Lambda Invocation Type (RequestResponse, Event etc)
:param log_type: Set to Tail to include the execution log in the response.
Applies to synchronously invoked functions only.
:param client_context: Up to 3,583 bytes of base64-encoded data about the invoking client
to pass to the function in the context object.
:param payload: The JSON that you want to provide to your Lambda function as input.
:param qualifier: AWS Lambda Function Version or Alias Name
"""
if isinstance(payload, str):
payload = payload.encode()
invoke_args = {
"FunctionName": function_name,
"InvocationType": invocation_type,
"LogType": log_type,
"ClientContext": client_context,
"Payload": payload,
"Qualifier": qualifier,
}
return self.conn.invoke(**trim_none_values(invoke_args))
def create_lambda(
self,
*,
function_name: str,
runtime: str | None = None,
role: str,
handler: str | None = None,
code: dict,
description: str | None = None,
timeout: int | None = None,
memory_size: int | None = None,
publish: bool | None = None,
vpc_config: Any | None = None,
package_type: str | None = None,
dead_letter_config: Any | None = None,
environment: Any | None = None,
kms_key_arn: str | None = None,
tracing_config: Any | None = None,
tags: Any | None = None,
layers: list | None = None,
file_system_configs: list[Any] | None = None,
image_config: Any | None = None,
code_signing_config_arn: str | None = None,
architectures: list[str] | None = None,
ephemeral_storage: Any | None = None,
snap_start: Any | None = None,
logging_config: Any | None = None,
) -> dict:
"""
Create a Lambda function.
.. seealso::
- :external+boto3:py:meth:`Lambda.Client.create_function`
- `Configuring a Lambda function to access resources in a VPC \
<https://docs.aws.amazon.com/lambda/latest/dg/configuration-vpc.html>`__
:param function_name: AWS Lambda Function Name
:param runtime: The identifier of the function's runtime.
Runtime is required if the deployment package is a .zip file archive.
:param role: The Amazon Resource Name (ARN) of the function's execution role.
:param handler: The name of the method within your code that Lambda calls to run your function.
Handler is required if the deployment package is a .zip file archive.
:param code: The code for the function.
:param description: A description of the function.
:param timeout: The amount of time (in seconds) that Lambda
allows a function to run before stopping it.
:param memory_size: The amount of memory available to the function at runtime.
Increasing the function memory also increases its CPU allocation.
:param publish: Set to true to publish the first version of the function during creation.
:param vpc_config: For network connectivity to Amazon Web Services resources in a VPC,
specify a list of security groups and subnets in the VPC.
:param package_type: The type of deployment package.
Set to `Image` for container image and set to `Zip` for .zip file archive.
:param dead_letter_config: A dead-letter queue configuration that specifies the queue or topic
where Lambda sends asynchronous events when they fail processing.
:param environment: Environment variables that are accessible from function code during execution.
:param kms_key_arn: The ARN of the Key Management Service (KMS) key that's used to
encrypt your function's environment variables.
If it's not provided, Lambda uses a default service key.
:param tracing_config: Set `Mode` to `Active` to sample and trace
a subset of incoming requests with X-Ray.
:param tags: A list of tags to apply to the function.
:param layers: A list of function layers to add to the function's execution environment.
Specify each layer by its ARN, including the version.
:param file_system_configs: Connection settings for an Amazon EFS file system.
:param image_config: Container image configuration values that override
the values in the container image Dockerfile.
:param code_signing_config_arn: To enable code signing for this function,
specify the ARN of a code-signing configuration.
A code-signing configuration includes a set of signing profiles,
which define the trusted publishers for this function.
:param architectures: The instruction set architecture that the function supports.
:param ephemeral_storage: The size of the function's /tmp directory in MB.
The default value is 512, but can be any whole number between 512 and 10,240 MB
:param snap_start: The function's SnapStart setting
:param logging_config: The function's Amazon CloudWatch Logs configuration settings
"""
if package_type == "Zip":
if handler is None:
raise TypeError("Parameter 'handler' is required if 'package_type' is 'Zip'")
if runtime is None:
raise TypeError("Parameter 'runtime' is required if 'package_type' is 'Zip'")
"""Create a Lambda Function"""
create_function_args = {
"FunctionName": function_name,
"Runtime": runtime,
"Role": role,
"Handler": handler,
"Code": code,
"Description": description,
"Timeout": timeout,
"MemorySize": memory_size,
"Publish": publish,
"VpcConfig": vpc_config,
"PackageType": package_type,
"DeadLetterConfig": dead_letter_config,
"Environment": environment,
"KMSKeyArn": kms_key_arn,
"TracingConfig": tracing_config,
"Tags": tags,
"Layers": layers,
"FileSystemConfigs": file_system_configs,
"ImageConfig": image_config,
"CodeSigningConfigArn": code_signing_config_arn,
"Architectures": architectures,
"EphemeralStorage": ephemeral_storage,
"SnapStart": snap_start,
"LoggingConfig": logging_config,
}
return self.conn.create_function(**trim_none_values(create_function_args))
@staticmethod
@return_on_error(None)
def encode_log_result(log_result: str, *, keep_empty_lines: bool = True) -> list[str] | None:
"""
Encode execution log from the response and return list of log records.
Returns ``None`` on error, e.g. invalid base64-encoded string
:param log_result: base64-encoded string which contain Lambda execution Log.
:param keep_empty_lines: Whether or not keep empty lines.
"""
encoded_log_result = base64.b64decode(log_result.encode("ascii")).decode()
return [log_row for log_row in encoded_log_result.splitlines() if keep_empty_lines or log_row]
| LambdaHook |
python | milvus-io__pymilvus | pymilvus/orm/iterator.py | {
"start": 15492,
"end": 17074
} | class ____(LoopBase):
"""Since we only support nq=1 in search iteration, so search iteration response
should be different from raw response of search operation"""
def __init__(self, res: Hits, session_ts: Optional[int] = 0):
super().__init__()
self._session_ts = session_ts
self._results = []
if res is not None:
self._results.append(res)
def get_session_ts(self):
return self._session_ts
def get_res(self):
return self._results
def __len__(self):
length = 0
for res in self._results:
length += len(res)
return length
def get__item(self, idx: Any):
if len(self._results) == 0:
return None
if idx >= self.__len__():
msg = "Index out of range"
raise IndexError(msg)
index = 0
ret = None
for res in self._results:
if index + len(res) <= idx:
index += len(res)
else:
ret = res[idx - index]
break
return ret
def merge(self, others: List[Hits]):
if others is not None:
for other in others:
self._results.append(other)
def ids(self):
ids = []
for res in self._results:
for hit in res:
ids.append(hit.id)
return ids
def distances(self):
distances = []
for res in self._results:
for hit in res:
distances.append(hit.distance)
return distances
| SearchPage |
python | huggingface__transformers | tests/models/speecht5/test_tokenization_speecht5.py | {
"start": 1108,
"end": 17614
} | class ____(TokenizerTesterMixin, unittest.TestCase):
from_pretrained_id = "microsoft/speecht5_asr"
tokenizer_class = SpeechT5Tokenizer
test_rust_tokenizer = False
test_sentencepiece = True
@classmethod
def setUpClass(cls):
super().setUpClass()
# We have a SentencePiece fixture for testing
tokenizer = SpeechT5Tokenizer(SAMPLE_VOCAB)
mask_token = AddedToken("<mask>", lstrip=True, rstrip=False)
tokenizer.mask_token = mask_token
tokenizer.add_special_tokens({"mask_token": mask_token})
tokenizer.add_tokens(["<ctc_blank>"])
tokenizer.save_pretrained(cls.tmpdirname)
def get_input_output_texts(self, tokenizer):
input_text = "this is a test"
output_text = "this is a test"
return input_text, output_text
def get_numeric_input_output_texts(self):
input_text = "I have $123.45 and owe €59.78. My balance is -₴876.90 and have 73% stocks in my company which equals to ₦72649201"
output_text = "I have one hundred and twenty three point four five dollars and owe fifty nine point seven eight euros. My balance is minus eight hundred and seventy six point nine zero ukrainian hryvnia and have seventy three percent stocks in my company which equals to seventy two million six hundred and forty nine thousand two hundred and one nigerian naira"
return input_text, output_text
def get_clean_sequence(self, tokenizer, with_prefix_space=False, max_length=20, min_length=5):
input_text, output_text = self.get_input_output_texts(tokenizer)
ids = tokenizer.encode(output_text, add_special_tokens=False)
text = tokenizer.decode(ids, clean_up_tokenization_spaces=False)
return text, ids
def test_tokenizer_normalization(self):
tokenizer = self.get_tokenizer(normalize=True)
input_text, expected_text = self.get_numeric_input_output_texts()
input_ids = tokenizer.encode(input_text)
output_text = tokenizer.decode(input_ids, skip_special_tokens=True)
self.assertEqual(output_text, expected_text)
def test_convert_token_and_id(self):
"""Test ``_convert_token_to_id`` and ``_convert_id_to_token``."""
token = "<pad>"
token_id = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id_with_added_voc(token), token_id)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(token_id), token)
def test_get_vocab(self):
vocab_keys = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0], "<s>")
self.assertEqual(vocab_keys[1], "<pad>")
self.assertEqual(vocab_keys[-4], "œ")
self.assertEqual(vocab_keys[-2], "<mask>")
self.assertEqual(vocab_keys[-1], "<ctc_blank>")
self.assertEqual(len(vocab_keys), 81)
def test_vocab_size(self):
self.assertEqual(self.get_tokenizer().vocab_size, 79)
def test_add_tokens_tokenizer(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
vocab_size = tokenizer.vocab_size
all_size = len(tokenizer)
self.assertNotEqual(vocab_size, 0)
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
new_toks = ["aaaaa bbbbbb", "cccccccccdddddddd"]
added_toks = tokenizer.add_tokens(new_toks)
vocab_size_2 = tokenizer.vocab_size
all_size_2 = len(tokenizer)
self.assertNotEqual(vocab_size_2, 0)
self.assertEqual(vocab_size, vocab_size_2)
self.assertEqual(added_toks, len(new_toks))
self.assertEqual(all_size_2, all_size + len(new_toks))
tokens = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l", add_special_tokens=False)
self.assertGreaterEqual(len(tokens), 4)
self.assertGreater(tokens[0], tokenizer.vocab_size - 1)
self.assertGreater(tokens[-3], tokenizer.vocab_size - 1)
new_toks_2 = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
added_toks_2 = tokenizer.add_special_tokens(new_toks_2)
vocab_size_3 = tokenizer.vocab_size
all_size_3 = len(tokenizer)
self.assertNotEqual(vocab_size_3, 0)
self.assertEqual(vocab_size, vocab_size_3)
self.assertEqual(added_toks_2, len(new_toks_2))
self.assertEqual(all_size_3, all_size_2 + len(new_toks_2))
tokens = tokenizer.encode(
">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l", add_special_tokens=False
)
self.assertGreaterEqual(len(tokens), 6)
self.assertGreater(tokens[0], tokenizer.vocab_size - 1)
self.assertGreater(tokens[0], tokens[1])
self.assertGreater(tokens[-3], tokenizer.vocab_size - 1)
self.assertGreater(tokens[-3], tokens[-4])
self.assertEqual(tokens[0], tokenizer.eos_token_id)
self.assertEqual(tokens[-3], tokenizer.pad_token_id)
@unittest.skip
def test_subword_regularization_tokenizer(self):
pass
def test_full_tokenizer(self):
tokenizer = self.get_tokenizer(normalize=True)
tokens = tokenizer.tokenize("This is a test")
self.assertListEqual(tokens, [SPIECE_UNDERLINE, 'T', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'a', SPIECE_UNDERLINE, 't', 'e', 's', 't']) # fmt: skip
self.assertListEqual(
tokenizer.convert_tokens_to_ids(tokens),
[4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6],
)
tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(tokens,[SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, 'n', 'i', 'n', 'e', 't', 'y', SPIECE_UNDERLINE, 't', 'w', 'o', SPIECE_UNDERLINE, 't', 'h', 'o', 'u', 's', 'a', 'n', 'd', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.']) # fmt: skip
ids = tokenizer.convert_tokens_to_ids(tokens)
self.assertListEqual(ids, [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 9, 10, 9, 5, 6, 22, 4, 6, 20, 8, 4, 6, 11, 8, 16, 12, 7, 9, 14, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26]) # fmt: skip
back_tokens = tokenizer.convert_ids_to_tokens(ids)
self.assertListEqual(back_tokens,[SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, 'n', 'i', 'n', 'e', 't', 'y', SPIECE_UNDERLINE, 't', 'w', 'o', SPIECE_UNDERLINE, 't', 'h', 'o', 'u', 's', 'a', 'n', 'd', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.']) # fmt: skip
@slow
def test_tokenizer_integration(self):
# Use custom sequence because this tokenizer does not handle numbers.
sequences = [
"Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides "
"general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural "
"Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained "
"models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.",
"BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly "
"conditioning on both left and right context in all layers.",
"The quick brown fox jumps over the lazy dog.",
]
# fmt: off
expected_encoding = {
'input_ids': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=expected_encoding,
model_name="microsoft/speecht5_asr",
revision="c5ef64c71905caeccde0e4462ef3f9077224c524",
sequences=sequences,
)
def test_encode_decode(self):
tokenizer = SpeechT5Tokenizer.from_pretrained("microsoft/speecht5_tts")
tokens = tokenizer.tokenize("a = b")
self.assertEqual(tokens, ["▁", "a", "▁", "=", "▁", "b"])
# the `'='` is unknown.
ids = tokenizer.convert_tokens_to_ids(tokens)
self.assertEqual(ids, [4, 7, 4, 3, 4, 25])
# let's make sure decoding with the special unknown tokens preserves spaces
ids = tokenizer.encode("a = b")
self.assertEqual(tokenizer.decode(ids), "a <unk> b</s>")
| SpeechT5TokenizerTest |
python | pennersr__django-allauth | allauth/headless/account/views.py | {
"start": 12525,
"end": 13156
} | class ____(AuthenticatedAPIView):
input_class = ChangePasswordInput
def post(self, request, *args, **kwargs):
password_change.change_password(
self.request.user, self.input.cleaned_data["new_password"]
)
is_set = not self.input.cleaned_data.get("current_password")
if is_set:
password_change.finalize_password_set(request, request.user)
else:
password_change.finalize_password_change(request, request.user)
return AuthenticationResponse(request)
def get_input_kwargs(self):
return {"user": self.request.user}
| ChangePasswordView |
python | Netflix__metaflow | test/core/metaflow_extensions/test_org/exceptions/mfextinit_test_org.py | {
"start": 51,
"end": 263
} | class ____(MetaflowException):
headline = "Subservice error"
def __init__(self, error):
msg = "Test error: '%s'" % error
super(MetaflowTestException, self).__init__(msg)
| MetaflowTestException |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/nocover/test_type_lookup_future_annotations.py | {
"start": 704,
"end": 1203
} | class ____(TypedDict):
a: A
b: alias
@given(st.from_type(B))
def test_complex_forward_ref_in_typed_dict(d):
assert isinstance(d["a"], dict)
assert isinstance(d["a"]["a"], int)
assert isinstance(d["b"], (int, str))
def test_complex_forward_ref_in_typed_dict_local():
local_alias = int | str
class C(TypedDict):
a: A
b: local_alias
c_strategy = st.from_type(C)
with pytest.raises(InvalidArgument):
check_can_generate_examples(c_strategy)
| B |
python | MongoEngine__mongoengine | benchmarks/test_save_with_indexes.py | {
"start": 358,
"end": 463
} | class ____(Document):
name = StringField()
age = IntField()
meta = {"indexes": [["name"]]}
| User1 |
python | geekcomputers__Python | venv/Lib/site-packages/pip/_internal/commands/show.py | {
"start": 1524,
"end": 7507
} | class ____(NamedTuple):
name: str
version: str
location: str
editable_project_location: Optional[str]
requires: List[str]
required_by: List[str]
installer: str
metadata_version: str
classifiers: List[str]
summary: str
homepage: str
project_urls: List[str]
author: str
author_email: str
license: str
entry_points: List[str]
files: Optional[List[str]]
def search_packages_info(query: List[str]) -> Generator[_PackageInfo, None, None]:
"""
Gather details from installed distributions. Print distribution name,
version, location, and installed files. Installed files requires a
pip generated 'installed-files.txt' in the distributions '.egg-info'
directory.
"""
env = get_default_environment()
installed = {dist.canonical_name: dist for dist in env.iter_all_distributions()}
query_names = [canonicalize_name(name) for name in query]
missing = sorted(
[name for name, pkg in zip(query, query_names) if pkg not in installed]
)
if missing:
logger.warning("Package(s) not found: %s", ", ".join(missing))
def _get_requiring_packages(current_dist: BaseDistribution) -> Iterator[str]:
return (
dist.metadata["Name"] or "UNKNOWN"
for dist in installed.values()
if current_dist.canonical_name
in {canonicalize_name(d.name) for d in dist.iter_dependencies()}
)
for query_name in query_names:
try:
dist = installed[query_name]
except KeyError:
continue
try:
requires = sorted(
# Avoid duplicates in requirements (e.g. due to environment markers).
{req.name for req in dist.iter_dependencies()},
key=str.lower,
)
except InvalidRequirement:
requires = sorted(dist.iter_raw_dependencies(), key=str.lower)
try:
required_by = sorted(_get_requiring_packages(dist), key=str.lower)
except InvalidRequirement:
required_by = ["#N/A"]
try:
entry_points_text = dist.read_text("entry_points.txt")
entry_points = entry_points_text.splitlines(keepends=False)
except FileNotFoundError:
entry_points = []
files_iter = dist.iter_declared_entries()
if files_iter is None:
files: Optional[List[str]] = None
else:
files = sorted(files_iter)
metadata = dist.metadata
project_urls = metadata.get_all("Project-URL", [])
homepage = metadata.get("Home-page", "")
if not homepage:
# It's common that there is a "homepage" Project-URL, but Home-page
# remains unset (especially as PEP 621 doesn't surface the field).
#
# This logic was taken from PyPI's codebase.
for url in project_urls:
url_label, url = url.split(",", maxsplit=1)
normalized_label = (
url_label.casefold().replace("-", "").replace("_", "").strip()
)
if normalized_label == "homepage":
homepage = url.strip()
break
yield _PackageInfo(
name=dist.raw_name,
version=dist.raw_version,
location=dist.location or "",
editable_project_location=dist.editable_project_location,
requires=requires,
required_by=required_by,
installer=dist.installer,
metadata_version=dist.metadata_version or "",
classifiers=metadata.get_all("Classifier", []),
summary=metadata.get("Summary", ""),
homepage=homepage,
project_urls=project_urls,
author=metadata.get("Author", ""),
author_email=metadata.get("Author-email", ""),
license=metadata.get("License", ""),
entry_points=entry_points,
files=files,
)
def print_results(
distributions: Iterable[_PackageInfo],
list_files: bool,
verbose: bool,
) -> bool:
"""
Print the information from installed distributions found.
"""
results_printed = False
for i, dist in enumerate(distributions):
results_printed = True
if i > 0:
write_output("---")
write_output("Name: %s", dist.name)
write_output("Version: %s", dist.version)
write_output("Summary: %s", dist.summary)
write_output("Home-page: %s", dist.homepage)
write_output("Author: %s", dist.author)
write_output("Author-email: %s", dist.author_email)
write_output("License: %s", dist.license)
write_output("Location: %s", dist.location)
if dist.editable_project_location is not None:
write_output(
"Editable project location: %s", dist.editable_project_location
)
write_output("Requires: %s", ", ".join(dist.requires))
write_output("Required-by: %s", ", ".join(dist.required_by))
if verbose:
write_output("Metadata-Version: %s", dist.metadata_version)
write_output("Installer: %s", dist.installer)
write_output("Classifiers:")
for classifier in dist.classifiers:
write_output(" %s", classifier)
write_output("Entry-points:")
for entry in dist.entry_points:
write_output(" %s", entry.strip())
write_output("Project-URLs:")
for project_url in dist.project_urls:
write_output(" %s", project_url)
if list_files:
write_output("Files:")
if dist.files is None:
write_output("Cannot locate RECORD or installed-files.txt")
else:
for line in dist.files:
write_output(" %s", line.strip())
return results_printed
| _PackageInfo |
python | getsentry__sentry | tests/sentry/rules/history/endpoints/test_project_rule_group_history.py | {
"start": 532,
"end": 1069
} | class ____(TestCase):
def test(self) -> None:
current_date = datetime.now()
group_history = RuleGroupHistory(self.group, 50, current_date)
result = serialize([group_history], self.user, RuleGroupHistorySerializer())
assert result == [
{
"group": serialize(self.group, self.user),
"count": group_history.count,
"lastTriggered": current_date,
"eventId": None,
}
]
@freeze_time()
| RuleGroupHistorySerializerTest |
python | plotly__plotly.py | plotly/graph_objs/layout/coloraxis/colorbar/title/_font.py | {
"start": 235,
"end": 9962
} | class ____(_BaseLayoutHierarchyType):
_parent_path_str = "layout.coloraxis.colorbar.title"
_path_str = "layout.coloraxis.colorbar.title.font"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Font object
Sets this color bar's title font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.layout.colorax
is.colorbar.title.Font`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Font
"""
super().__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.layout.coloraxis.colorbar.title.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.coloraxis.colorbar.title.Font`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("family", arg, family)
self._set_property("lineposition", arg, lineposition)
self._set_property("shadow", arg, shadow)
self._set_property("size", arg, size)
self._set_property("style", arg, style)
self._set_property("textcase", arg, textcase)
self._set_property("variant", arg, variant)
self._set_property("weight", arg, weight)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Font |
python | pola-rs__polars | py-polars/src/polars/_utils/async_.py | {
"start": 383,
"end": 2313
} | class ____(Generic[T]):
__slots__ = ("_result", "_value", "_watcher")
def __init__(self) -> None:
if not _GEVENT_AVAILABLE:
msg = (
"gevent is required for using LazyFrame.collect_async(gevent=True) or"
"polars.collect_all_async(gevent=True)"
)
raise ImportError(msg)
from gevent.event import AsyncResult # type: ignore[import-untyped]
from gevent.hub import get_hub # type: ignore[import-untyped]
self._value: None | Exception | PyDataFrame | list[PyDataFrame] = None
self._result = AsyncResult()
self._watcher = get_hub().loop.async_()
self._watcher.start(self._watcher_callback)
def get(
self,
block: bool = True, # noqa: FBT001
timeout: float | int | None = None,
) -> T:
return self.result.get(block=block, timeout=timeout)
@property
def result(self) -> Any:
# required if we did not made any switches and just want results later
# with block=False and possibly without timeout
if self._value is not None and not self._result.ready():
self._watcher_callback()
return self._result
def _watcher_callback(self) -> None:
if isinstance(self._value, Exception):
self._result.set_exception(self._value)
else:
self._result.set(self._value)
self._watcher.close()
def _callback(self, obj: PyDataFrame | Exception) -> None:
if not isinstance(obj, Exception):
obj = wrap_df(obj) # type: ignore[assignment]
self._value = obj
self._watcher.send()
def _callback_all(self, obj: list[PyDataFrame] | Exception) -> None:
if not isinstance(obj, Exception):
obj = [wrap_df(pydf) for pydf in obj] # type: ignore[misc]
self._value = obj
self._watcher.send()
| _GeventDataFrameResult |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/model_query.py | {
"start": 749,
"end": 886
} | class ____(Child):
def baz(self, z):
return 0
@lru_cache(maxsize=1)
def positional_decorated(x, y) -> int:
...
| GrandChild |
python | django__django | tests/backends/models.py | {
"start": 3089,
"end": 3356
} | class ____(models.Model):
related_objects = models.ManyToManyField(
"self", db_constraint=False, symmetrical=False
)
obj_ref = models.ForeignKey("ObjectReference", models.CASCADE, null=True)
def __str__(self):
return str(self.id)
| Object |
python | python-markdown__markdown | markdown/extensions/footnotes.py | {
"start": 14138,
"end": 16601
} | class ____(Treeprocessor):
""" Amend footnote div with duplicates. """
def __init__(self, footnotes: FootnoteExtension):
self.footnotes = footnotes
def add_duplicates(self, li: etree.Element, duplicates: int) -> None:
""" Adjust current `li` and add the duplicates: `fnref2`, `fnref3`, etc. """
for link in li.iter('a'):
# Find the link that needs to be duplicated.
if link.attrib.get('class', '') == 'footnote-backref':
ref, rest = link.attrib['href'].split(self.footnotes.get_separator(), 1)
# Duplicate link the number of times we need to
# and point the to the appropriate references.
links = []
for index in range(2, duplicates + 1):
sib_link = copy.deepcopy(link)
sib_link.attrib['href'] = '%s%d%s%s' % (ref, index, self.footnotes.get_separator(), rest)
links.append(sib_link)
self.offset += 1
# Add all the new duplicate links.
el = list(li)[-1]
for link in links:
el.append(link)
break
def get_num_duplicates(self, li: etree.Element) -> int:
""" Get the number of duplicate refs of the footnote. """
fn, rest = li.attrib.get('id', '').split(self.footnotes.get_separator(), 1)
link_id = '{}ref{}{}'.format(fn, self.footnotes.get_separator(), rest)
return self.footnotes.found_refs.get(link_id, 0)
def handle_duplicates(self, parent: etree.Element) -> None:
""" Find duplicate footnotes and format and add the duplicates. """
for li in list(parent):
# Check number of duplicates footnotes and insert
# additional links if needed.
count = self.get_num_duplicates(li)
if count > 1:
self.add_duplicates(li, count)
def run(self, root: etree.Element) -> None:
""" Crawl the footnote div and add missing duplicate footnotes. """
self.offset = 0
for div in root.iter('div'):
if div.attrib.get('class', '') == 'footnote':
# Footnotes should be under the first ordered list under
# the footnote div. So once we find it, quit.
for ol in div.iter('ol'):
self.handle_duplicates(ol)
break
| FootnotePostTreeprocessor |
python | scipy__scipy | scipy/signal/tests/test_signaltools.py | {
"start": 155964,
"end": 174050
} | class ____:
@staticmethod
def assert_rp_almost_equal(r, p, r_true, p_true, decimal=7):
xp = array_namespace(r, p)
r_true = xp.asarray(r_true)
p_true = xp.asarray(p_true)
distance = xp.hypot(abs(p[:, None] - p_true),
abs(r[:, None] - r_true))
rows, cols = linear_sum_assignment(_xp_copy_to_numpy(distance))
assert_almost_equal(p[rows], p_true[cols], decimal=decimal)
assert_almost_equal(r[rows], r_true[cols], decimal=decimal)
@skip_xp_backends(np_only=True)
def test_compute_factors(self, xp):
factors, poly = _compute_factors([1, 2, 3], [3, 2, 1])
assert len(factors) == 3
assert_almost_equal(factors[0], np.poly([2, 2, 3]))
assert_almost_equal(factors[1], np.poly([1, 1, 1, 3]))
assert_almost_equal(factors[2], np.poly([1, 1, 1, 2, 2]))
assert_almost_equal(poly, np.poly([1, 1, 1, 2, 2, 3]))
factors, poly = _compute_factors([1, 2, 3], [3, 2, 1],
include_powers=True)
assert len(factors) == 6
assert_almost_equal(factors[0], np.poly([1, 1, 2, 2, 3]))
assert_almost_equal(factors[1], np.poly([1, 2, 2, 3]))
assert_almost_equal(factors[2], np.poly([2, 2, 3]))
assert_almost_equal(factors[3], np.poly([1, 1, 1, 2, 3]))
assert_almost_equal(factors[4], np.poly([1, 1, 1, 3]))
assert_almost_equal(factors[5], np.poly([1, 1, 1, 2, 2]))
assert_almost_equal(poly, np.poly([1, 1, 1, 2, 2, 3]))
@skip_xp_backends(np_only=True)
def test_group_poles(self, xp):
unique, multiplicity = _group_poles(
[1.0, 1.001, 1.003, 2.0, 2.003, 3.0], 0.1, 'min')
xp_assert_close(unique, [1.0, 2.0, 3.0])
xp_assert_close(multiplicity, [3, 2, 1])
@make_xp_test_case(residue)
def test_residue_general(self, xp):
# Test are taken from issue #4464, note that poles in scipy are
# in increasing by absolute value order, opposite to MATLAB.
r, p, k = residue(xp.asarray([5, 3, -2, 7]), xp.asarray([-4, 0, 8, 3]))
assert_almost_equal(r, xp.asarray([1.3320, -0.6653, -1.4167]), decimal=4)
assert_almost_equal(p, xp.asarray([-0.4093, -1.1644, 1.5737]), decimal=4)
assert_almost_equal(k, xp.asarray([-1.2500]), decimal=4)
r, p, k = residue(xp.asarray([-4, 8]), xp.asarray([1, 6, 8]))
assert_almost_equal(r, xp.asarray([8, -12]))
assert_almost_equal(p, xp.asarray([-2, -4]))
assert k.size == 0
r, p, k = residue(xp.asarray([4, 1]), xp.asarray([1, -1, -2]))
assert_almost_equal(r, xp.asarray([1, 3]))
assert_almost_equal(p, xp.asarray([-1, 2]))
assert k.size == 0
r, p, k = residue(xp.asarray([4, 3]),
xp.asarray([2, -3.4, 1.98, -0.406]))
self.assert_rp_almost_equal(
r, p, [-18.125 - 13.125j, -18.125 + 13.125j, 36.25],
[0.5 - 0.2j, 0.5 + 0.2j, 0.7])
assert k.size == 0
r, p, k = residue(xp.asarray([2, 1]), xp.asarray([1, 5, 8, 4]))
self.assert_rp_almost_equal(r, p, [-1, 1, 3],
[-1, -2, -2])
assert k.size == 0
r, p, k = residue(xp.asarray([3, -1.1, 0.88, -2.396, 1.348]),
xp.asarray([1, -0.7, -0.14, 0.048]))
assert_almost_equal(r, xp.asarray([-3, 4, 1]))
assert_almost_equal(p, xp.asarray([0.2, -0.3, 0.8]))
assert_almost_equal(k, xp.asarray([3, 1]))
r, p, k = residue(xp.asarray([1]), xp.asarray([1, 2, -3]))
assert_almost_equal(r, xp.asarray([0.25, -0.25]))
assert_almost_equal(p, xp.asarray([1, -3]))
assert k.size == 0
r, p, k = residue(xp.asarray([1, 0, -5]), xp.asarray([1, 0, 0, 0, -1]))
self.assert_rp_almost_equal(r, p,
[1, 1.5j, -1.5j, -1],
[-1, -1j, 1j, 1])
assert k.size == 0
r, p, k = residue(xp.asarray([3, 8, 6]), xp.asarray([1, 3, 3, 1]))
self.assert_rp_almost_equal(r, p, [1, 2, 3],
[-1, -1, -1])
assert k.size == 0
r, p, k = residue(xp.asarray([3, -1]), xp.asarray([1, -3, 2]))
assert_almost_equal(r, xp.asarray([-2, 5]))
assert_almost_equal(p, xp.asarray([1, 2]))
assert k.size == 0
r, p, k = residue(xp.asarray([2, 3, -1]), xp.asarray([1, -3, 2]))
assert_almost_equal(r, xp.asarray([-4, 13]))
assert_almost_equal(p, xp.asarray([1, 2]))
assert_almost_equal(k, xp.asarray([2]))
r, p, k = residue(xp.asarray([7, 2, 3, -1]), xp.asarray([1, -3, 2]))
assert_almost_equal(r, xp.asarray([-11, 69]))
assert_almost_equal(p, xp.asarray([1, 2]))
assert_almost_equal(k, xp.asarray([7, 23]))
r, p, k = residue(xp.asarray([2, 3, -1]), xp.asarray([1, -3, 4, -2]))
self.assert_rp_almost_equal(r, p, [4, -1 + 3.5j, -1 - 3.5j],
[1, 1 - 1j, 1 + 1j])
assert k.size == 0
@make_xp_test_case(residue)
def test_residue_leading_zeros(self, xp):
# Leading zeros in numerator or denominator must not affect the answer.
r0, p0, k0 = residue(xp.asarray([5, 3, -2, 7]), xp.asarray([-4, 0, 8, 3]))
r1, p1, k1 = residue(xp.asarray([0, 5, 3, -2, 7]), xp.asarray([-4, 0, 8, 3]))
r2, p2, k2 = residue(xp.asarray([5, 3, -2, 7]), xp.asarray([0, -4, 0, 8, 3]))
r3, p3, k3 = residue(xp.asarray([0, 0, 5, 3, -2, 7]),
xp.asarray([0, 0, 0, -4, 0, 8, 3]))
assert_almost_equal(r0, r1)
assert_almost_equal(r0, r2)
assert_almost_equal(r0, r3)
assert_almost_equal(p0, p1)
assert_almost_equal(p0, p2)
assert_almost_equal(p0, p3)
assert_almost_equal(k0, k1)
assert_almost_equal(k0, k2)
assert_almost_equal(k0, k3)
@make_xp_test_case(residue)
def test_residue_degenerate(self, xp):
# Several tests for zero numerator and denominator.
r, p, k = residue(xp.asarray([0, 0]), xp.asarray([1, 6, 8]))
assert_almost_equal(r, xp.asarray([0, 0]))
assert_almost_equal(p, xp.asarray([-2, -4]))
assert k.size == 0
r, p, k = residue(xp.asarray(0), xp.asarray(1))
assert r.size == 0
assert p.size == 0
assert k.size == 0
with pytest.raises(ValueError, match="Denominator `a` is zero."):
residue(1, 0)
@make_xp_test_case(residuez)
def test_residuez_general(self, xp):
r, p, k = residuez(xp.asarray([1, 6, 6, 2]),
xp.asarray([1, -(2 + 1j), (1 + 2j), -1j]))
self.assert_rp_almost_equal(r, p, [-2+2.5j, 7.5+7.5j, -4.5-12j],
[1j, 1, 1])
assert_almost_equal(k, xp.asarray([2j]))
r, p, k = residuez(xp.asarray([1, 2, 1]), xp.asarray([1, -1, 0.3561]))
self.assert_rp_almost_equal(r, p,
[-0.9041 - 5.9928j, -0.9041 + 5.9928j],
[0.5 + 0.3257j, 0.5 - 0.3257j],
decimal=4)
assert_almost_equal(k, xp.asarray([2.8082]), decimal=4)
r, p, k = residuez(xp.asarray([1, -1]), xp.asarray([1, -5, 6]))
assert_almost_equal(r, xp.asarray([-1, 2]))
assert_almost_equal(p, xp.asarray([2, 3]))
assert k.size == 0
r, p, k = residuez(xp.asarray([2, 3, 4]), xp.asarray([1, 3, 3, 1]))
self.assert_rp_almost_equal(r, p, [4, -5, 3], [-1, -1, -1])
assert k.size == 0
r, p, k = residuez(xp.asarray([1, -10, -4, 4]), xp.asarray([2, -2, -4]))
assert_almost_equal(r, xp.asarray([0.5, -1.5]))
assert_almost_equal(p, xp.asarray([-1, 2]))
assert_almost_equal(k, xp.asarray([1.5, -1]))
r, p, k = residuez(xp.asarray([18]), xp.asarray([18, 3, -4, -1]))
self.assert_rp_almost_equal(r, p,
[0.36, 0.24, 0.4], [0.5, -1/3, -1/3])
assert k.size == 0
r, p, k = residuez(xp.asarray([2, 3]),
xp.asarray(np.polymul([1, -1/2], [1, 1/4])))
assert_almost_equal(r, xp.asarray([-10/3, 16/3]))
assert_almost_equal(p, xp.asarray([-0.25, 0.5]))
assert k.size == 0
r, p, k = residuez(xp.asarray([1, -2, 1]), xp.asarray([1, -1]))
assert_almost_equal(r, xp.asarray([0]))
assert_almost_equal(p, xp.asarray([1]))
assert_almost_equal(k, xp.asarray([1, -1]))
r, p, k = residuez(xp.asarray(1), xp.asarray([1, -1j]))
assert_almost_equal(r, xp.asarray([1]))
assert_almost_equal(p, xp.asarray([1j]))
assert k.size == 0
r, p, k = residuez(xp.asarray(1), xp.asarray([1, -1, 0.25]))
assert_almost_equal(r, xp.asarray([0, 1]))
assert_almost_equal(p, xp.asarray([0.5, 0.5]))
assert k.size == 0
r, p, k = residuez(xp.asarray(1), xp.asarray([1, -0.75, .125]))
assert_almost_equal(r, xp.asarray([-1, 2]))
assert_almost_equal(p, xp.asarray([0.25, 0.5]))
assert k.size == 0
r, p, k = residuez(xp.asarray([1, 6, 2]), xp.asarray([1, -2, 1]))
assert_almost_equal(r, xp.asarray([-10, 9]))
assert_almost_equal(p, xp.asarray([1, 1]))
assert_almost_equal(k, xp.asarray([2]))
r, p, k = residuez(xp.asarray([6, 2]), xp.asarray([1, -2, 1]))
assert_almost_equal(r, xp.asarray([-2, 8]))
assert_almost_equal(p, xp.asarray([1, 1]))
assert k.size == 0
r, p, k = residuez(xp.asarray([1, 6, 6, 2]), xp.asarray([1, -2, 1]))
assert_almost_equal(r, xp.asarray([-24, 15]))
assert_almost_equal(p, xp.asarray([1, 1]))
assert_almost_equal(k, xp.asarray([10, 2]))
r, p, k = residuez(xp.asarray([1, 0, 1]), xp.asarray([1, 0, 0, 0, 0, -1]))
self.assert_rp_almost_equal(r, p,
[0.2618 + 0.1902j, 0.2618 - 0.1902j,
0.4, 0.0382 - 0.1176j, 0.0382 + 0.1176j],
[-0.8090 + 0.5878j, -0.8090 - 0.5878j,
1.0, 0.3090 + 0.9511j, 0.3090 - 0.9511j],
decimal=4)
assert k.size == 0
@make_xp_test_case(residuez)
def test_residuez_trailing_zeros(self, xp):
# Trailing zeros in numerator or denominator must not affect the
# answer.
r0, p0, k0 = residuez(xp.asarray([5, 3, -2, 7]),
xp.asarray([-4, 0, 8, 3]))
r1, p1, k1 = residuez(xp.asarray([5, 3, -2, 7, 0]),
xp.asarray([-4, 0, 8, 3]))
r2, p2, k2 = residuez(xp.asarray([5, 3, -2, 7]),
xp.asarray([-4, 0, 8, 3, 0]))
r3, p3, k3 = residuez(xp.asarray([5, 3, -2, 7, 0, 0]),
xp.asarray([-4, 0, 8, 3, 0, 0, 0]))
assert_almost_equal(r0, r1)
assert_almost_equal(r0, r2)
assert_almost_equal(r0, r3)
assert_almost_equal(p0, p1)
assert_almost_equal(p0, p2)
assert_almost_equal(p0, p3)
assert_almost_equal(k0, k1)
assert_almost_equal(k0, k2)
assert_almost_equal(k0, k3)
@make_xp_test_case(residuez)
def test_residuez_degenerate(self, xp):
r, p, k = residuez(xp.asarray([0, 0]), xp.asarray([1, 6, 8]))
assert_almost_equal(r, xp.asarray([0, 0]))
assert_almost_equal(p, xp.asarray([-2, -4]))
assert k.size == 0
r, p, k = residuez(xp.asarray(0), xp.asarray(1))
assert r.size == 0
assert p.size == 0
assert k.size == 0
with pytest.raises(ValueError, match="Denominator `a` is zero."):
residuez(xp.asarray(1), xp.asarray(0))
with pytest.raises(ValueError,
match="First coefficient of determinant `a` must "
"be non-zero."):
residuez(xp.asarray(1), xp.asarray([0, 1, 2, 3]))
@make_xp_test_case(invres, invresz)
def test_inverse_unique_roots_different_rtypes(self, xp):
# This test was inspired by github issue 2496.
r = xp.asarray([3 / 10, -1 / 6, -2 / 15])
p = xp.asarray([0, -2, -5])
k = xp.asarray([])
b_expected = xp.asarray([0.0, 1, 3])
a_expected = xp.asarray([1, 7, 10, 0])
# With the default tolerance, the rtype does not matter
# for this example.
for rtype in ('avg', 'mean', 'min', 'minimum', 'max', 'maximum'):
b, a = invres(r, p, k, rtype=rtype)
xp_assert_close(b, b_expected, atol=5e-16)
xp_assert_close(a, a_expected, check_dtype=False, atol=5e-16)
b, a = invresz(r, p, k, rtype=rtype)
xp_assert_close(b, b_expected, atol=5e-16)
xp_assert_close(a, a_expected, check_dtype=False, atol=5e-16)
@make_xp_test_case(invres, invresz)
def test_inverse_repeated_roots_different_rtypes(self, xp):
r = xp.asarray([3 / 20, -7 / 36, -1 / 6, 2 / 45])
p = xp.asarray([0, -2, -2, -5])
k = xp.asarray([])
b_expected = xp.asarray([0.0, 0, 1, 3])
b_expected_z = xp.asarray([-1/6, -2/3, 11/6, 3])
a_expected = xp.asarray([1, 9, 24, 20, 0])
for rtype in ('avg', 'mean', 'min', 'minimum', 'max', 'maximum'):
b, a = invres(r, p, k, rtype=rtype)
xp_assert_close(b, b_expected, atol=1e-14)
xp_assert_close(a, a_expected, check_dtype=False)
b, a = invresz(r, p, k, rtype=rtype)
xp_assert_close(b, b_expected_z, atol=1e-14)
xp_assert_close(a, a_expected, check_dtype=False)
@make_xp_test_case(invres, invresz)
def test_inverse_bad_rtype(self, xp):
r = xp.asarray([3 / 20, -7 / 36, -1 / 6, 2 / 45])
p = xp.asarray([0, -2, -2, -5])
k = xp.asarray([])
with pytest.raises(ValueError, match="`rtype` must be one of"):
invres(r, p, k, rtype='median')
with pytest.raises(ValueError, match="`rtype` must be one of"):
invresz(r, p, k, rtype='median')
@make_xp_test_case(invresz)
def test_invresz_one_coefficient_bug(self, xp):
# Regression test for issue in gh-4646.
r = xp.asarray([1])
p = xp.asarray([2])
k = xp.asarray([0])
b, a = invresz(r, p, k)
xp_assert_close(b, xp.asarray([1]))
xp_assert_close(a, xp.asarray([1.0, -2.0]))
@make_xp_test_case(invres)
def test_invres(self, xp):
b, a = invres(xp.asarray([1]), xp.asarray([1]), xp.asarray([]))
assert_almost_equal(b, xp.asarray([1]))
assert_almost_equal(a, xp.asarray([1, -1]))
b, a = invres(xp.asarray([1 - 1j, 2, 0.5 - 3j]),
xp.asarray([1, 0.5j, 1 + 1j]), xp.asarray([]))
assert_almost_equal(b, xp.asarray([3.5 - 4j, -8.5 + 0.25j, 3.5 + 3.25j]))
assert_almost_equal(a, xp.asarray([1, -2 - 1.5j, 0.5 + 2j, 0.5 - 0.5j]))
b, a = invres(xp.asarray([0.5, 1]), xp.asarray([1 - 1j, 2 + 2j]),
xp.asarray([1, 2, 3]))
assert_almost_equal(b, xp.asarray([1, -1 - 1j, 1 - 2j, 0.5 - 3j, 10]))
assert_almost_equal(a, xp.asarray([1, -3 - 1j, 4]))
b, a = invres(xp.asarray([-1, 2, 1j, 3 - 1j, 4, -2]),
xp.asarray([-1, 2 - 1j, 2 - 1j, 3, 3, 3]), xp.asarray([]))
assert_almost_equal(b,
xp.asarray([4 - 1j, -28 + 16j, 40 - 62j, 100 + 24j,
-292 + 219j, 192 - 268j]))
assert_almost_equal(a,
xp.asarray([1, -12 + 2j, 53 - 20j, -96 + 68j, 27 - 72j,
108 - 54j, -81 + 108j]))
b, a = invres(xp.asarray([-1, 1j]), xp.asarray([1, 1]), xp.asarray([1, 2]))
assert_almost_equal(b, xp.asarray([1, 0, -4, 3 + 1j]))
assert_almost_equal(a, xp.asarray([1, -2, 1]))
@make_xp_test_case(invresz)
def test_invresz(self, xp):
b, a = invresz(xp.asarray([1]), xp.asarray([1]), xp.asarray([]))
assert_almost_equal(b, xp.asarray([1]))
assert_almost_equal(a, xp.asarray([1, -1]))
b, a = invresz(xp.asarray([1 - 1j, 2, 0.5 - 3j]),
xp.asarray([1, 0.5j, 1 + 1j]), xp.asarray([]))
assert_almost_equal(b, xp.asarray([3.5 - 4j, -8.5 + 0.25j, 3.5 + 3.25j]))
assert_almost_equal(a, xp.asarray([1, -2 - 1.5j, 0.5 + 2j, 0.5 - 0.5j]))
b, a = invresz(xp.asarray([0.5, 1]),
xp.asarray([1 - 1j, 2 + 2j]),
xp.asarray([1, 2, 3]))
assert_almost_equal(b, xp.asarray([2.5, -3 - 1j, 1 - 2j, -1 - 3j, 12]))
assert_almost_equal(a, xp.asarray([1, -3 - 1j, 4]))
b, a = invresz(xp.asarray([-1, 2, 1j, 3 - 1j, 4, -2]),
xp.asarray([-1, 2 - 1j, 2 - 1j, 3, 3, 3]),
xp.asarray([]))
assert_almost_equal(b,
xp.asarray([6, -50 + 11j, 100 - 72j, 80 + 58j,
-354 + 228j, 234 - 297j]))
assert_almost_equal(a,
xp.asarray([1, -12 + 2j, 53 - 20j, -96 + 68j, 27 - 72j,
108 - 54j, -81 + 108j]))
b, a = invresz(xp.asarray([-1, 1j]),
xp.asarray([1, 1]),
xp.asarray([1, 2]))
assert_almost_equal(b, xp.asarray([1j, 1, -3, 2]))
assert_almost_equal(a, xp.asarray([1, -2, 1]))
@skip_xp_backends(np_only=True)
@make_xp_test_case(invres, invresz)
def test_inverse_scalar_arguments(self, xp):
b, a = invres(1, 1, 1)
assert_almost_equal(b, [1, 0])
assert_almost_equal(a, [1, -1])
b, a = invresz(1, 1, 1)
assert_almost_equal(b, [2, -1])
assert_almost_equal(a, [1, -1])
@make_xp_test_case(vectorstrength)
| TestPartialFractionExpansion |
python | pytorch__pytorch | test/dynamo/test_fake_distributed.py | {
"start": 1776,
"end": 2482
} | class ____(torch.nn.Module):
def forward(self, primals_1: "Sym(s77)", primals_2: "Sym(s27)", primals_3: "f32[s77, s27]"):
floordiv: "Sym((s77//2))" = primals_1 // 2
all_to_all_single: "f32[2*((s77//2)), s27]" = torch.ops._c10d_functional.all_to_all_single.default(primals_3, [floordiv, floordiv], [floordiv, floordiv], '0'); primals_3 = None
wait_tensor: "f32[2*((s77//2)), s27]" = torch.ops._c10d_functional.wait_tensor.default(all_to_all_single); all_to_all_single = None
return (wait_tensor, primals_1, primals_2, floordiv)
""", # noqa: B950
)
self.assertExpectedInline(
normalize_graph(backend.bw_graphs[0]),
"""\
| GraphModule |
python | django__django | django/contrib/gis/gdal/field.py | {
"start": 4735,
"end": 4770
} | class ____(Field):
pass
| OFTString |
python | ethereum__web3.py | web3/_utils/module_testing/web3_module.py | {
"start": 747,
"end": 18176
} | class ____:
def test_web3_client_version(self, w3: Web3) -> None:
client_version = w3.client_version
self._check_web3_client_version(client_version)
def _check_web3_client_version(self, client_version: str) -> NoReturn:
raise NotImplementedError("Must be implemented by subclasses")
# Contract that calculated test values can be found at
# https://kovan.etherscan.io/address/0xb9be06f5b99372cf9afbccadbbb9954ccaf7f4bb#code
@pytest.mark.parametrize(
"types,values,expected",
(
(
["bool"],
[True],
HexBytes(
"0x5fe7f977e71dba2ea1a68e21057beebb9be2ac30c6410aa38d4f3fbe41dcffd2"
),
),
(
["uint8", "uint8", "uint8"],
[97, 98, 99],
HexBytes(
"0x4e03657aea45a94fc7d47ba826c8d667c0d1e6e33a64a036ec44f58fa12d6c45"
),
),
(
["uint248"],
[30],
HexBytes(
"0x30f95d210785601eb33ae4d53d405b26f920e765dff87cca8e9a4aec99f82671"
),
),
(
["bool", "uint16"],
[True, 299],
HexBytes(
"0xed18599ccd80ee9fae9a28b0e34a5573c3233d7468f808fd659bc171cf0b43bd"
),
),
(
["int256"],
[-10],
HexBytes(
"0xd6fb717f7e270a360f5093ce6a7a3752183e89c9a9afe5c0cb54b458a304d3d5"
),
),
(
["int256"],
[10],
HexBytes(
"0xc65a7bb8d6351c1cf70c95a316cc6a92839c986682d98bc35f958f4883f9d2a8"
),
),
(
["int8", "uint8"],
[-10, 18],
HexBytes(
"0x5c6ab1e634c08d9c0f4df4d789e8727943ef010dd7ca8e3c89de197a26d148be"
),
),
(
["address"],
["0x49eddd3769c0712032808d86597b84ac5c2f5614"],
InvalidAddress,
),
(
["address"],
["0x49EdDD3769c0712032808D86597B84ac5c2F5614"],
HexBytes(
"0x2ff37b5607484cd4eecf6d13292e22bd6e5401eaffcc07e279583bc742c68882"
),
),
(
["bytes2"],
["0x5402"],
HexBytes(
"0x4ed9171bda52fca71ab28e7f452bd6eacc3e5a568a47e0fa53b503159a9b8910"
),
),
(
["bytes3"],
["0x5402"],
HexBytes(
"0x4ed9171bda52fca71ab28e7f452bd6eacc3e5a568a47e0fa53b503159a9b8910"
),
),
(
["bytes"],
[
"0x636865636b6c6f6e6762797465737472696e676167"
"61696e7374736f6c6964697479736861336861736866756e6374696f6e"
],
HexBytes(
"0xd78a84d65721b67e4011b10c99dafdedcdcd7cb30153064f773e210b4762e22f"
),
),
(
["string"],
["testing a string!"],
HexBytes(
"0xe8c275c0b4070a5ec6cfcb83f0ba394b30ddd283de785d43f2eabfb04bd96747"
),
),
(
["string", "bool", "uint16", "bytes2", "address"],
[
"testing a string!",
False,
299,
"0x5402",
"0x49eddd3769c0712032808d86597b84ac5c2f5614",
],
InvalidAddress,
),
(
["string", "bool", "uint16", "bytes2", "address"],
[
"testing a string!",
False,
299,
"0x5402",
"0x49EdDD3769c0712032808D86597B84ac5c2F5614",
],
HexBytes(
"0x8cc6eabb25b842715e8ca39e2524ed946759aa37bfb7d4b81829cf5a7e266103"
),
),
(
["bool[2][]"],
[[[True, False], [False, True]]],
HexBytes(
"0x1eef261f2eb51a8c736d52be3f91ff79e78a9ec5df2b7f50d0c6f98ed1e2bc06"
),
),
(
["bool[]"],
[[True, False, True]],
HexBytes(
"0x5c6090c0461491a2941743bda5c3658bf1ea53bbd3edcde54e16205e18b45792"
),
),
(
["uint24[]"],
[[1, 0, 1]],
HexBytes(
"0x5c6090c0461491a2941743bda5c3658bf1ea53bbd3edcde54e16205e18b45792"
),
),
(
["uint8[2]"],
[[8, 9]],
HexBytes(
"0xc7694af312c4f286114180fd0ba6a52461fcee8a381636770b19a343af92538a"
),
),
(
["uint256[2]"],
[[8, 9]],
HexBytes(
"0xc7694af312c4f286114180fd0ba6a52461fcee8a381636770b19a343af92538a"
),
),
(
["uint8[]"],
[[8]],
HexBytes(
"0xf3f7a9fe364faab93b216da50a3214154f22a0a2b415b23a84c8169e8b636ee3"
),
),
(
["address[]"],
[
[
"0x49EdDD3769c0712032808D86597B84ac5c2F5614",
"0xA6b759bBbf4B59D24acf7E06e79f3a5D104fdCE5",
]
],
HexBytes(
"0xb98565c0c26a962fd54d93b0ed6fb9296e03e9da29d2281ed3e3473109ef7dde"
),
),
(
["address[]"],
[
[
"0x49EdDD3769c0712032808D86597B84ac5c2F5614",
"0xa6b759bbbf4b59d24acf7e06e79f3a5d104fdce5",
]
],
InvalidAddress,
),
),
)
def test_solidity_keccak(
self,
w3: "Web3",
types: Sequence[TypeStr],
values: Sequence[Any],
expected: HexBytes,
) -> None:
if isinstance(expected, type) and issubclass(expected, Exception):
with pytest.raises(expected):
w3.solidity_keccak(types, values)
return
actual = w3.solidity_keccak(types, values)
assert actual == expected
@pytest.mark.parametrize(
"types, values, expected",
(
(
["address"],
["one.eth"],
HexBytes(
"0x2ff37b5607484cd4eecf6d13292e22bd6e5401eaffcc07e279583bc742c68882"
),
),
(
["address[]"],
[["one.eth", "two.eth"]],
HexBytes(
"0xb98565c0c26a962fd54d93b0ed6fb9296e03e9da29d2281ed3e3473109ef7dde"
),
),
),
)
def test_solidity_keccak_ens(
self,
w3: "Web3",
types: Sequence[TypeStr],
values: Sequence[str],
expected: HexBytes,
) -> None:
with ens_addresses(
w3,
{
"one.eth": ChecksumAddress(
HexAddress(HexStr("0x49EdDD3769c0712032808D86597B84ac5c2F5614"))
),
"two.eth": ChecksumAddress(
HexAddress(HexStr("0xA6b759bBbf4B59D24acf7E06e79f3a5D104fdCE5"))
),
},
):
# when called as class method, any name lookup attempt will fail
with pytest.raises(InvalidAddress):
Web3.solidity_keccak(types, values)
# when called as instance method, ens lookups can succeed
actual = w3.solidity_keccak(types, values)
assert actual == expected
@pytest.mark.parametrize(
"types,values",
(
(["address"], ["0xA6b759bBbf4B59D24acf7E06e79f3a5D104fdCE5", True]),
(["address", "bool"], ["0xA6b759bBbf4B59D24acf7E06e79f3a5D104fdCE5"]),
([], ["0xA6b759bBbf4B59D24acf7E06e79f3a5D104fdCE5"]),
),
)
def test_solidity_keccak_same_number_of_types_and_values(
self, w3: "Web3", types: Sequence[TypeStr], values: Sequence[Any]
) -> None:
with pytest.raises(ValueError):
w3.solidity_keccak(types, values)
def test_is_connected(self, w3: "Web3") -> None:
assert w3.is_connected()
def test_batch_requests(self, w3: "Web3", math_contract: Contract) -> None:
with w3.batch_requests() as batch:
batch.add(w3.eth.get_block(6))
batch.add(w3.eth.get_block(4))
batch.add(w3.eth.get_block(2))
batch.add(w3.eth.get_block(0))
batch.add(math_contract.functions.multiply7(0))
batch.add_mapping(
{
math_contract.functions.multiply7: [1, 2, 3],
w3.eth.get_block: [1, 3, 5],
}
)
assert len(batch._requests_info) == 11
responses = batch.execute()
assert len(responses) == 11
# assert proper batch cleanup after execution
assert batch._requests_info == []
assert not w3.provider._is_batching
# assert batch cannot be added to after execution
with pytest.raises(
Web3ValueError,
match="Batch has already been executed or cancelled",
):
batch.add(w3.eth.get_block(5))
# assert batch cannot be executed again
with pytest.raises(
Web3ValueError,
match="Batch has already been executed or cancelled",
):
batch.execute()
# assert can make a request after executing
block_num = w3.eth.block_number
assert isinstance(block_num, int)
first_four_responses: Sequence[BlockData] = cast(
Sequence[BlockData], responses[:4]
)
assert first_four_responses[0]["number"] == 6
assert first_four_responses[1]["number"] == 4
assert first_four_responses[2]["number"] == 2
assert first_four_responses[3]["number"] == 0
responses_five_through_eight: Sequence[int] = cast(
Sequence[int], responses[4:8]
)
assert responses_five_through_eight[0] == 0
assert responses_five_through_eight[1] == 7
assert responses_five_through_eight[2] == 14
assert responses_five_through_eight[3] == 21
last_three_responses: Sequence[BlockData] = cast(
Sequence[BlockData], responses[8:]
)
assert last_three_responses[0]["number"] == 1
assert last_three_responses[1]["number"] == 3
assert last_three_responses[2]["number"] == 5
def test_batch_requests_initialized_as_object(
self, w3: "Web3", math_contract: Contract
) -> None:
batch = w3.batch_requests()
batch.add(w3.eth.get_block(1))
batch.add(w3.eth.get_block(2))
batch.add(math_contract.functions.multiply7(0))
batch.add_mapping(
{math_contract.functions.multiply7: [1, 2], w3.eth.get_block: [3, 4]}
)
assert len(batch._requests_info) == 7
b1, b2, m0, m1, m2, b3, b4 = batch.execute()
# assert proper batch cleanup after execution
assert batch._requests_info == []
assert not w3.provider._is_batching
# assert batch cannot be added to after execution
with pytest.raises(
Web3ValueError,
match="Batch has already been executed or cancelled",
):
batch.add(w3.eth.get_block(5))
# assert batch cannot be executed again
with pytest.raises(
Web3ValueError,
match="Batch has already been executed or cancelled",
):
batch.execute()
# assert can make a request after executing
block_num = w3.eth.block_number
assert isinstance(block_num, int)
assert cast(BlockData, b1)["number"] == 1
assert cast(BlockData, b2)["number"] == 2
assert cast(int, m0) == 0
assert cast(int, m1) == 7
assert cast(int, m2) == 14
assert cast(BlockData, b3)["number"] == 3
assert cast(BlockData, b4)["number"] == 4
def test_batch_requests_clear(self, w3: "Web3") -> None:
with w3.batch_requests() as batch:
batch.add(w3.eth.get_block(1))
batch.add(w3.eth.get_block(2))
assert len(batch._requests_info) == 2
batch.clear()
assert batch._requests_info == []
batch.add(w3.eth.get_block(3))
batch.add(w3.eth.get_block(4))
r1, r2 = batch.execute()
assert cast(BlockData, r1)["number"] == 3
assert cast(BlockData, r2)["number"] == 4
new_batch = w3.batch_requests()
new_batch.add(w3.eth.get_block(5))
assert len(new_batch._requests_info) == 1
new_batch.clear()
assert new_batch._requests_info == []
new_batch.add(w3.eth.get_block(6))
(r3,) = new_batch.execute()
assert cast(BlockData, r3)["number"] == 6
def test_batch_requests_cancel(self, w3: "Web3") -> None:
# as context manager
with w3.batch_requests() as batch:
batch.add(w3.eth.get_block(1))
batch.cancel()
with pytest.raises(
Web3ValueError,
match="Batch has already been executed or cancelled",
):
batch.add(w3.eth.get_block(2))
with pytest.raises(
Web3ValueError,
match="Batch has already been executed or cancelled",
):
batch.execute()
# can make a request after cancelling
block_num = w3.eth.block_number
assert isinstance(block_num, int)
# as obj
new_batch = w3.batch_requests()
new_batch.add(w3.eth.get_block(1))
new_batch.cancel()
with pytest.raises(
Web3ValueError,
match="Batch has already been executed or cancelled",
):
new_batch.add(w3.eth.get_block(2))
with pytest.raises(
Web3ValueError,
match="Batch has already been executed or cancelled",
):
new_batch.execute()
# assert can make a request after cancelling
block_num = w3.eth.block_number
assert isinstance(block_num, int)
def test_batch_requests_raises_for_common_unsupported_methods(
self, w3: "Web3", math_contract: Contract
) -> None:
with w3.batch_requests() as batch:
with pytest.raises(MethodNotSupported, match="eth_sendTransaction"):
batch.add(w3.eth.send_transaction({}))
batch.execute()
with w3.batch_requests() as batch:
with pytest.raises(MethodNotSupported, match="eth_sendTransaction"):
batch.add(math_contract.functions.multiply7(1).transact({}))
batch.execute()
with w3.batch_requests() as batch:
with pytest.raises(MethodNotSupported, match="eth_sendRawTransaction"):
batch.add(w3.eth.send_raw_transaction(b""))
batch.execute()
with w3.batch_requests() as batch:
with pytest.raises(MethodNotSupported, match="eth_sign"):
batch.add(w3.eth.sign(Address(b"\x00" * 20)))
batch.execute()
def test_batch_requests_concurrently_with_regular_requests(
self, w3: "Web3"
) -> None:
num_requests = 40
responses = []
batch_response = []
def make_regular_requests() -> None:
for _ in range(num_requests):
responses.append(w3.eth.get_block(0))
time.sleep(0.01)
def make_batch_request() -> None:
with w3.batch_requests() as batch:
for _ in range(num_requests):
batch.add(w3.eth.get_block(0))
time.sleep(0.01)
batch_response.extend(batch.execute())
# split into threads
regular_thread = threading.Thread(target=make_regular_requests)
batch_thread = threading.Thread(target=make_batch_request)
regular_thread.start()
batch_thread.start()
# wait for threads to finish
regular_thread.join()
batch_thread.join()
assert not regular_thread.is_alive()
assert not batch_thread.is_alive()
assert len(responses) == num_requests
assert len(batch_response) == num_requests
assert all(SOME_BLOCK_KEYS.issubset(response.keys()) for response in responses)
assert set(responses) == set(batch_response)
# -- async -- #
| Web3ModuleTest |
python | walkccc__LeetCode | solutions/3295. Report Spam Message/3295.py | {
"start": 0,
"end": 193
} | class ____:
def reportSpam(self, message: list[str], bannedWords: list[str]) -> bool:
bannedWordsSet = set(bannedWords)
return sum(word in bannedWordsSet for word in message) > 1
| Solution |
python | fastai__fastai | fastai/torch_core.py | {
"start": 23304,
"end": 23540
} | class ____:
"Base class that adds a simple `show`"
_show_args = {'label': 'text'}
def show(self, ctx=None, **kwargs):
"Show self"
return show_title(str(self), ctx=ctx, **merge(self._show_args, kwargs))
| ShowTitle |
python | joke2k__faker | faker/providers/date_time/id_ID/__init__.py | {
"start": 46,
"end": 861
} | class ____(DateTimeProvider):
def day_of_week(self) -> str:
day = self.date("%w")
DAY_NAMES = {
"0": "Senin",
"1": "Selasa",
"2": "Rabu",
"3": "Kamis",
"4": "Jumat",
"5": "Sabtu",
"6": "Minggu",
}
return DAY_NAMES[day]
def month_name(self) -> str:
month = self.month()
MONTH_NAMES = {
"01": "Januari",
"02": "Februari",
"03": "Maret",
"04": "April",
"05": "Mei",
"06": "Juni",
"07": "Juli",
"08": "Agustus",
"09": "September",
"10": "Oktober",
"11": "November",
"12": "Desember",
}
return MONTH_NAMES[month]
| Provider |
python | django__django | tests/gis_tests/gis_migrations/test_operations.py | {
"start": 15587,
"end": 16269
} | class ____(OperationTestCase):
def test_create_raster_model_on_db_without_raster_support(self):
msg = "Raster fields require backends with raster support."
with self.assertRaisesMessage(ImproperlyConfigured, msg):
self.set_up_test_model(force_raster_creation=True)
def test_add_raster_field_on_db_without_raster_support(self):
msg = "Raster fields require backends with raster support."
with self.assertRaisesMessage(ImproperlyConfigured, msg):
self.set_up_test_model()
self.alter_gis_model(
migrations.AddField, "Neighborhood", "heatmap", fields.RasterField
)
| NoRasterSupportTests |
python | pydantic__pydantic | tests/mypy/modules/plugin_fail_baseConfig.py | {
"start": 1578,
"end": 1651
} | class ____(Model):
class Config:
frozen = False
| InheritingModel |
python | sqlalchemy__sqlalchemy | test/orm/test_query.py | {
"start": 169498,
"end": 171045
} | class ____(QueryTest, AssertsCompiledSQL):
def test_one_prefix(self):
User = self.classes.User
sess = fixture_session()
query = sess.query(User.name).prefix_with("PREFIX_1")
expected = "SELECT PREFIX_1 users.name AS users_name FROM users"
self.assert_compile(query, expected, dialect=default.DefaultDialect())
def test_one_suffix(self):
User = self.classes.User
sess = fixture_session()
query = sess.query(User.name).suffix_with("SUFFIX_1")
# trailing space for some reason
expected = "SELECT users.name AS users_name FROM users SUFFIX_1 "
self.assert_compile(query, expected, dialect=default.DefaultDialect())
def test_many_prefixes(self):
User = self.classes.User
sess = fixture_session()
query = sess.query(User.name).prefix_with("PREFIX_1", "PREFIX_2")
expected = (
"SELECT PREFIX_1 PREFIX_2 users.name AS users_name FROM users"
)
self.assert_compile(query, expected, dialect=default.DefaultDialect())
def test_chained_prefixes(self):
User = self.classes.User
sess = fixture_session()
query = (
sess.query(User.name)
.prefix_with("PREFIX_1")
.prefix_with("PREFIX_2", "PREFIX_3")
)
expected = (
"SELECT PREFIX_1 PREFIX_2 PREFIX_3 "
"users.name AS users_name FROM users"
)
self.assert_compile(query, expected, dialect=default.DefaultDialect())
| PrefixSuffixWithTest |
python | viewflow__viewflow | viewflow/contrib/auth.py | {
"start": 1073,
"end": 1552
} | class ____(auth_forms.AuthenticationForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields["username"].widget = forms.TextInput(
attrs={"autofocus": True, "leading-icon": "account_box"},
)
self.fields["password"].widget = forms.PasswordInput(
attrs={"leading-icon": "lock"}
)
@method_decorator(user_passes_test(lambda u: u.is_authenticated), name="dispatch")
| AuthenticationForm |
python | EpistasisLab__tpot | tpot/search_spaces/pipelines/tree.py | {
"start": 2303,
"end": 3339
} | class ____(SearchSpace):
def __init__(self, root_search_space : SearchSpace,
leaf_search_space : SearchSpace = None,
inner_search_space : SearchSpace =None,
min_size: int = 2,
max_size: int = 10,
crossover_same_depth=False) -> None:
"""
Generates a pipeline of variable length. Pipeline will have a tree structure similar to TPOT1.
"""
self.search_space = root_search_space
self.leaf_search_space = leaf_search_space
self.inner_search_space = inner_search_space
self.min_size = min_size
self.max_size = max_size
self.crossover_same_depth = crossover_same_depth
def generate(self, rng=None):
rng = np.random.default_rng(rng)
return TreePipelineIndividual(self.search_space, self.leaf_search_space, self.inner_search_space, self.min_size, self.max_size, self.crossover_same_depth, rng=rng) | TreePipeline |
python | encode__django-rest-framework | tests/test_fields.py | {
"start": 73874,
"end": 74227
} | class ____(FieldValues):
"""
Values for an valid `ImageField`.
"""
valid_inputs = [
(MockFile(name='example.png', size=10), MockFile(name='example.png', size=10))
]
invalid_inputs = {}
outputs = {}
field = serializers.ImageField(_DjangoImageField=PassImageValidation)
# Composite serializers...
| TestValidImageField |
python | zarr-developers__zarr-python | tests/test_dtype/test_npy/test_bytes.py | {
"start": 235,
"end": 1898
} | class ____(BaseTestZDType):
test_cls = NullTerminatedBytes
valid_dtype = (np.dtype("|S10"), np.dtype("|S4"))
invalid_dtype = (
np.dtype(np.int8),
np.dtype(np.float64),
np.dtype("|U10"),
)
valid_json_v2 = (
{"name": "|S1", "object_codec_id": None},
{"name": "|S2", "object_codec_id": None},
{"name": "|S4", "object_codec_id": None},
)
valid_json_v3 = ({"name": "null_terminated_bytes", "configuration": {"length_bytes": 10}},)
invalid_json_v2 = (
"|S",
"|U10",
"|f8",
{"name": "|S4", "object_codec_id": "vlen-bytes"},
)
invalid_json_v3 = (
{"name": "fixed_length_ascii", "configuration": {"length_bits": 0}},
{"name": "numpy.fixed_length_ascii", "configuration": {"length_bits": "invalid"}},
)
scalar_v2_params = (
(NullTerminatedBytes(length=1), "MA=="),
(NullTerminatedBytes(length=2), "YWI="),
(NullTerminatedBytes(length=4), "YWJjZA=="),
)
scalar_v3_params = (
(NullTerminatedBytes(length=1), "MA=="),
(NullTerminatedBytes(length=2), "YWI="),
(NullTerminatedBytes(length=4), "YWJjZA=="),
)
cast_value_params = (
(NullTerminatedBytes(length=1), "", np.bytes_("")),
(NullTerminatedBytes(length=2), "ab", np.bytes_("ab")),
(NullTerminatedBytes(length=4), "abcdefg", np.bytes_("abcd")),
)
invalid_scalar_params = ((NullTerminatedBytes(length=1), 1.0),)
item_size_params = (
NullTerminatedBytes(length=1),
NullTerminatedBytes(length=4),
NullTerminatedBytes(length=10),
)
| TestNullTerminatedBytes |
python | has2k1__plotnine | plotnine/scales/scale_identity.py | {
"start": 1897,
"end": 2124
} | class ____(
MapTrainMixin, scale_continuous[Literal["legend"] | None]
):
"""
No size scaling
"""
_aesthetics = ["size"]
_: KW_ONLY
guide: Literal["legend"] | None = None
@dataclass
| scale_size_identity |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/operators/sagemaker.py | {
"start": 9497,
"end": 18288
} | class ____(SageMakerBaseOperator):
"""
Use Amazon SageMaker Processing to analyze data and evaluate machine learning models on Amazon SageMaker.
With Processing, you can use a simplified, managed experience on SageMaker
to run your data processing workloads, such as feature engineering, data
validation, model evaluation, and model interpretation.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:SageMakerProcessingOperator`
:param config: The configuration necessary to start a processing job (templated).
For details of the configuration parameter see :py:meth:`SageMaker.Client.create_processing_job`
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is ``None`` or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:param region_name: AWS region_name. If not specified then the default boto3 behaviour is used.
:param verify: Whether or not to verify SSL certificates. See:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html
:param wait_for_completion: If wait is set to True, the time interval, in seconds,
that the operation waits to check the status of the processing job.
:param print_log: if the operator should print the cloudwatch log during processing
:param check_interval: if wait is set to be true, this is the time interval
in seconds which the operator will check the status of the processing job
:param max_attempts: Number of times to poll for query state before returning the current state,
defaults to None.
:param max_ingestion_time: If wait is set to True, the operation fails if the processing job
doesn't finish within max_ingestion_time seconds. If you set this parameter to None,
the operation does not timeout.
:param action_if_job_exists: Behaviour if the job name already exists. Possible options are "timestamp"
(default) and "fail".
:param deferrable: Run operator in the deferrable mode. This is only effective if wait_for_completion is
set to True.
:return Dict: Returns The ARN of the processing job created in Amazon SageMaker.
"""
def __init__(
self,
*,
config: dict,
wait_for_completion: bool = True,
print_log: bool = True,
check_interval: int = CHECK_INTERVAL_SECOND,
max_attempts: int | None = None,
max_ingestion_time: int | None = None,
action_if_job_exists: str = "timestamp",
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
**kwargs,
):
super().__init__(config=config, **kwargs)
if action_if_job_exists not in ("fail", "timestamp"):
raise AirflowException(
f"Argument action_if_job_exists accepts only 'timestamp' and 'fail'. \
Provided value: '{action_if_job_exists}'."
)
self.action_if_job_exists = action_if_job_exists
self.wait_for_completion = wait_for_completion
self.print_log = print_log
self.check_interval = check_interval
self.max_attempts = max_attempts or 60
self.max_ingestion_time = max_ingestion_time
self.deferrable = deferrable
self.serialized_job: dict
def _create_integer_fields(self) -> None:
"""Set fields which should be cast to integers."""
self.integer_fields: list[list[str] | list[list[str]]] = [
["ProcessingResources", "ClusterConfig", "InstanceCount"],
["ProcessingResources", "ClusterConfig", "VolumeSizeInGB"],
]
if "StoppingCondition" in self.config:
self.integer_fields.append(["StoppingCondition", "MaxRuntimeInSeconds"])
def expand_role(self) -> None:
"""Expand an IAM role name into an ARN."""
if "RoleArn" in self.config:
hook = AwsBaseHook(self.aws_conn_id, client_type="iam")
self.config["RoleArn"] = hook.expand_role(self.config["RoleArn"])
def execute(self, context: Context) -> dict:
self.preprocess_config()
self.config["ProcessingJobName"] = self._get_unique_job_name(
self.config["ProcessingJobName"],
self.action_if_job_exists == "fail",
self.hook.describe_processing_job,
)
if self.deferrable and not self.wait_for_completion:
self.log.warning(
"Setting deferrable to True does not have effect when wait_for_completion is set to False."
)
wait_for_completion = self.wait_for_completion
if self.deferrable and self.wait_for_completion:
# Set wait_for_completion to False so that it waits for the status in the deferred task.
wait_for_completion = False
response = self.hook.create_processing_job(
self.config,
wait_for_completion=wait_for_completion,
check_interval=self.check_interval,
max_ingestion_time=self.max_ingestion_time,
)
if response["ResponseMetadata"]["HTTPStatusCode"] != 200:
raise AirflowException(f"Sagemaker Processing Job creation failed: {response}")
if self.deferrable and self.wait_for_completion:
response = self.hook.describe_processing_job(self.config["ProcessingJobName"])
status = response["ProcessingJobStatus"]
if status in self.hook.failed_states:
raise AirflowException(f"SageMaker job failed because {response['FailureReason']}")
if status == "Completed":
self.log.info("%s completed successfully.", self.task_id)
return {"Processing": serialize(response)}
timeout = self.execution_timeout
if self.max_ingestion_time:
timeout = datetime.timedelta(seconds=self.max_ingestion_time)
self.defer(
timeout=timeout,
trigger=SageMakerTrigger(
job_name=self.config["ProcessingJobName"],
job_type="Processing",
poke_interval=self.check_interval,
max_attempts=self.max_attempts,
aws_conn_id=self.aws_conn_id,
),
method_name="execute_complete",
)
self.serialized_job = serialize(self.hook.describe_processing_job(self.config["ProcessingJobName"]))
return {"Processing": self.serialized_job}
def execute_complete(self, context: Context, event: dict[str, Any] | None = None) -> dict[str, dict]:
validated_event = validate_execute_complete_event(event)
if validated_event["status"] != "success":
raise AirflowException(f"Error while running job: {validated_event}")
self.log.info(validated_event["message"])
self.serialized_job = serialize(self.hook.describe_processing_job(validated_event["job_name"]))
self.log.info("%s completed successfully.", self.task_id)
return {"Processing": self.serialized_job}
def get_openlineage_facets_on_complete(self, task_instance) -> OperatorLineage:
"""Return OpenLineage data gathered from SageMaker's API response saved by processing job."""
from airflow.providers.openlineage.extractors.base import OperatorLineage
inputs = []
outputs = []
try:
inputs, outputs = self._extract_s3_dataset_identifiers(
processing_inputs=self.serialized_job["ProcessingInputs"],
processing_outputs=self.serialized_job["ProcessingOutputConfig"]["Outputs"],
)
except KeyError:
self.log.exception("Could not find input/output information in Xcom.")
return OperatorLineage(inputs=inputs, outputs=outputs)
def _extract_s3_dataset_identifiers(self, processing_inputs, processing_outputs):
inputs = []
outputs = []
try:
for processing_input in processing_inputs:
inputs.append(self.path_to_s3_dataset(processing_input["S3Input"]["S3Uri"]))
except KeyError:
self.log.exception("Cannot find S3 input details")
try:
for processing_output in processing_outputs:
outputs.append(self.path_to_s3_dataset(processing_output["S3Output"]["S3Uri"]))
except KeyError:
self.log.exception("Cannot find S3 output details.")
return inputs, outputs
| SageMakerProcessingOperator |
python | matplotlib__matplotlib | lib/matplotlib/backends/backend_qtagg.py | {
"start": 333,
"end": 3343
} | class ____(FigureCanvasAgg, FigureCanvasQT):
def paintEvent(self, event):
"""
Copy the image from the Agg canvas to the qt.drawable.
In Qt, all drawing should be done inside of here when a widget is
shown onscreen.
"""
self._draw_idle() # Only does something if a draw is pending.
# If the canvas does not have a renderer, then give up and wait for
# FigureCanvasAgg.draw(self) to be called.
if not hasattr(self, 'renderer'):
return
painter = QtGui.QPainter(self)
try:
# See documentation of QRect: bottom() and right() are off
# by 1, so use left() + width() and top() + height().
rect = event.rect()
# scale rect dimensions using the screen dpi ratio to get
# correct values for the Figure coordinates (rather than
# QT5's coords)
width = rect.width() * self.device_pixel_ratio
height = rect.height() * self.device_pixel_ratio
left, top = self.mouseEventCoords(rect.topLeft())
# shift the "top" by the height of the image to get the
# correct corner for our coordinate system
bottom = top - height
# same with the right side of the image
right = left + width
# create a buffer using the image bounding box
bbox = Bbox([[left, bottom], [right, top]])
buf = memoryview(self.copy_from_bbox(bbox))
if QT_API == "PyQt6":
from PyQt6 import sip
ptr = int(sip.voidptr(buf))
else:
ptr = buf
painter.eraseRect(rect) # clear the widget canvas
qimage = QtGui.QImage(ptr, buf.shape[1], buf.shape[0],
QtGui.QImage.Format.Format_RGBA8888)
qimage.setDevicePixelRatio(self.device_pixel_ratio)
# set origin using original QT coordinates
origin = QtCore.QPoint(rect.left(), rect.top())
painter.drawImage(origin, qimage)
# Adjust the buf reference count to work around a memory
# leak bug in QImage under PySide.
if QT_API == "PySide2" and QtCore.__version_info__ < (5, 12):
ctypes.c_long.from_address(id(buf)).value = 1
self._draw_rect_callback(painter)
finally:
painter.end()
def print_figure(self, *args, **kwargs):
super().print_figure(*args, **kwargs)
# In some cases, Qt will itself trigger a paint event after closing the file
# save dialog. When that happens, we need to be sure that the internal canvas is
# re-drawn. However, if the user is using an automatically-chosen Qt backend but
# saving with a different backend (such as pgf), we do not want to trigger a
# full draw in Qt, so just set the flag for next time.
self._draw_pending = True
@_BackendQT.export
| FigureCanvasQTAgg |
python | davidhalter__jedi | test/completion/django.py | {
"start": 452,
"end": 523
} | class ____(models.Model):
category_name = models.CharField()
| Category |
python | PyCQA__flake8 | src/flake8/plugins/finder.py | {
"start": 2351,
"end": 11129
} | class ____(NamedTuple):
"""Options related to plugin loading."""
local_plugin_paths: tuple[str, ...]
enable_extensions: frozenset[str]
require_plugins: frozenset[str]
@classmethod
def blank(cls) -> PluginOptions:
"""Make a blank PluginOptions, mostly used for tests."""
return cls(
local_plugin_paths=(),
enable_extensions=frozenset(),
require_plugins=frozenset(),
)
def _parse_option(
cfg: configparser.RawConfigParser,
cfg_opt_name: str,
opt: str | None,
) -> list[str]:
# specified on commandline: use that
if opt is not None:
return utils.parse_comma_separated_list(opt)
else:
# ideally this would reuse our config parsing framework but we need to
# parse this from preliminary options before plugins are enabled
for opt_name in (cfg_opt_name, cfg_opt_name.replace("_", "-")):
val = cfg.get("flake8", opt_name, fallback=None)
if val is not None:
return utils.parse_comma_separated_list(val)
else:
return []
def parse_plugin_options(
cfg: configparser.RawConfigParser,
cfg_dir: str,
*,
enable_extensions: str | None,
require_plugins: str | None,
) -> PluginOptions:
"""Parse plugin loading related options."""
paths_s = cfg.get("flake8:local-plugins", "paths", fallback="").strip()
paths = utils.parse_comma_separated_list(paths_s)
paths = utils.normalize_paths(paths, cfg_dir)
return PluginOptions(
local_plugin_paths=tuple(paths),
enable_extensions=frozenset(
_parse_option(cfg, "enable_extensions", enable_extensions),
),
require_plugins=frozenset(
_parse_option(cfg, "require_plugins", require_plugins),
),
)
def _flake8_plugins(
eps: Iterable[importlib.metadata.EntryPoint],
name: str,
version: str,
) -> Generator[Plugin]:
pyflakes_meta = importlib.metadata.distribution("pyflakes").metadata
pycodestyle_meta = importlib.metadata.distribution("pycodestyle").metadata
for ep in eps:
if ep.group not in FLAKE8_GROUPS:
continue
if ep.name == "F":
yield Plugin(pyflakes_meta["name"], pyflakes_meta["version"], ep)
elif ep.name in "EW":
# pycodestyle provides both `E` and `W` -- but our default select
# handles those
# ideally pycodestyle's plugin entrypoints would exactly represent
# the codes they produce...
yield Plugin(
pycodestyle_meta["name"], pycodestyle_meta["version"], ep,
)
else:
yield Plugin(name, version, ep)
def _find_importlib_plugins() -> Generator[Plugin]:
# some misconfigured pythons (RHEL) have things on `sys.path` twice
seen = set()
for dist in importlib.metadata.distributions():
# assigned to prevent continual reparsing
eps = dist.entry_points
# perf: skip parsing `.metadata` (slow) if no entry points match
if not any(ep.group in FLAKE8_GROUPS for ep in eps):
continue
# assigned to prevent continual reparsing
meta = dist.metadata
if meta["name"] in seen:
continue
else:
seen.add(meta["name"])
if meta["name"] in BANNED_PLUGINS:
LOG.warning(
"%s plugin is obsolete in flake8>=%s",
meta["name"],
BANNED_PLUGINS[meta["name"]],
)
continue
elif meta["name"] == "flake8":
# special case flake8 which provides plugins for pyflakes /
# pycodestyle
yield from _flake8_plugins(eps, meta["name"], meta["version"])
continue
for ep in eps:
if ep.group in FLAKE8_GROUPS:
yield Plugin(meta["name"], meta["version"], ep)
def _find_local_plugins(
cfg: configparser.RawConfigParser,
) -> Generator[Plugin]:
for plugin_type in ("extension", "report"):
group = f"flake8.{plugin_type}"
for plugin_s in utils.parse_comma_separated_list(
cfg.get("flake8:local-plugins", plugin_type, fallback="").strip(),
regexp=utils.LOCAL_PLUGIN_LIST_RE,
):
name, _, entry_str = plugin_s.partition("=")
name, entry_str = name.strip(), entry_str.strip()
ep = importlib.metadata.EntryPoint(name, entry_str, group)
yield Plugin("local", "local", ep)
def _check_required_plugins(
plugins: list[Plugin],
expected: frozenset[str],
) -> None:
plugin_names = {
utils.normalize_pypi_name(plugin.package) for plugin in plugins
}
expected_names = {utils.normalize_pypi_name(name) for name in expected}
missing_plugins = expected_names - plugin_names
if missing_plugins:
raise ExecutionError(
f"required plugins were not installed!\n"
f"- installed: {', '.join(sorted(plugin_names))}\n"
f"- expected: {', '.join(sorted(expected_names))}\n"
f"- missing: {', '.join(sorted(missing_plugins))}",
)
def find_plugins(
cfg: configparser.RawConfigParser,
opts: PluginOptions,
) -> list[Plugin]:
"""Discovers all plugins (but does not load them)."""
ret = [*_find_importlib_plugins(), *_find_local_plugins(cfg)]
# for determinism, sort the list
ret.sort()
_check_required_plugins(ret, opts.require_plugins)
return ret
def _parameters_for(func: Any) -> dict[str, bool]:
"""Return the parameters for the plugin.
This will inspect the plugin and return either the function parameters
if the plugin is a function or the parameters for ``__init__`` after
``self`` if the plugin is a class.
:returns:
A dictionary mapping the parameter name to whether or not it is
required (a.k.a., is positional only/does not have a default).
"""
is_class = not inspect.isfunction(func)
if is_class:
func = func.__init__
parameters = {
parameter.name: parameter.default is inspect.Parameter.empty
for parameter in inspect.signature(func).parameters.values()
if parameter.kind is inspect.Parameter.POSITIONAL_OR_KEYWORD
}
if is_class:
parameters.pop("self", None)
return parameters
def _load_plugin(plugin: Plugin) -> LoadedPlugin:
try:
obj = plugin.entry_point.load()
except Exception as e:
raise FailedToLoadPlugin(plugin.package, e)
if not callable(obj):
err = TypeError("expected loaded plugin to be callable")
raise FailedToLoadPlugin(plugin.package, err)
return LoadedPlugin(plugin, obj, _parameters_for(obj))
def _import_plugins(
plugins: list[Plugin],
opts: PluginOptions,
) -> list[LoadedPlugin]:
sys.path.extend(opts.local_plugin_paths)
return [_load_plugin(p) for p in plugins]
def _classify_plugins(
plugins: list[LoadedPlugin],
opts: PluginOptions,
) -> Plugins:
tree = []
logical_line = []
physical_line = []
reporters = {}
disabled = []
for loaded in plugins:
if (
getattr(loaded.obj, "off_by_default", False)
and loaded.plugin.entry_point.name not in opts.enable_extensions
):
disabled.append(loaded)
elif loaded.plugin.entry_point.group == "flake8.report":
reporters[loaded.entry_name] = loaded
elif "tree" in loaded.parameters:
tree.append(loaded)
elif "logical_line" in loaded.parameters:
logical_line.append(loaded)
elif "physical_line" in loaded.parameters:
physical_line.append(loaded)
else:
raise NotImplementedError(f"what plugin type? {loaded}")
for loaded in itertools.chain(tree, logical_line, physical_line):
if not VALID_CODE_PREFIX.match(loaded.entry_name):
raise ExecutionError(
f"plugin code for `{loaded.display_name}` does not match "
f"{VALID_CODE_PREFIX.pattern}",
)
return Plugins(
checkers=Checkers(
tree=tree,
logical_line=logical_line,
physical_line=physical_line,
),
reporters=reporters,
disabled=disabled,
)
def load_plugins(
plugins: list[Plugin],
opts: PluginOptions,
) -> Plugins:
"""Load and classify all flake8 plugins.
- first: extends ``sys.path`` with ``paths`` (to import local plugins)
- next: converts the ``Plugin``s to ``LoadedPlugins``
- finally: classifies plugins into their specific types
"""
return _classify_plugins(_import_plugins(plugins, opts), opts)
| PluginOptions |
python | jazzband__django-oauth-toolkit | oauth2_provider/forms.py | {
"start": 27,
"end": 734
} | class ____(forms.Form):
allow = forms.BooleanField(required=False)
redirect_uri = forms.CharField(widget=forms.HiddenInput())
scope = forms.CharField(widget=forms.HiddenInput())
nonce = forms.CharField(required=False, widget=forms.HiddenInput())
client_id = forms.CharField(widget=forms.HiddenInput())
state = forms.CharField(required=False, widget=forms.HiddenInput())
response_type = forms.CharField(widget=forms.HiddenInput())
code_challenge = forms.CharField(required=False, widget=forms.HiddenInput())
code_challenge_method = forms.CharField(required=False, widget=forms.HiddenInput())
claims = forms.CharField(required=False, widget=forms.HiddenInput())
| AllowForm |
python | django__django | tests/introspection/models.py | {
"start": 831,
"end": 1458
} | class ____(models.Model):
headline = models.CharField(max_length=100)
pub_date = models.DateField()
body = models.TextField(default="")
reporter = models.ForeignKey(Reporter, models.CASCADE)
response_to = models.ForeignKey("self", models.SET_NULL, null=True)
unmanaged_reporters = models.ManyToManyField(
Reporter, through="ArticleReporter", related_name="+"
)
class Meta:
ordering = ("headline",)
indexes = [
models.Index(fields=["headline", "pub_date"]),
models.Index(fields=["headline", "response_to", "pub_date", "reporter"]),
]
| Article |
python | ray-project__ray | python/ray/_private/worker.py | {
"start": 7823,
"end": 8732
} | class ____(HasOptions, Generic[R, T0, T1, T2, T3, T4, T5, T6]):
def __init__(self, function: Callable[[T0, T1, T2, T3, T4, T5, T6], R]) -> None:
pass
def remote(
self,
__arg0: "Union[T0, ObjectRef[T0]]",
__arg1: "Union[T1, ObjectRef[T1]]",
__arg2: "Union[T2, ObjectRef[T2]]",
__arg3: "Union[T3, ObjectRef[T3]]",
__arg4: "Union[T4, ObjectRef[T4]]",
__arg5: "Union[T5, ObjectRef[T5]]",
__arg6: "Union[T6, ObjectRef[T6]]",
) -> "ObjectRef[R]":
...
def bind(
self,
__arg0: "Union[T0, DAGNode[T0]]",
__arg1: "Union[T1, DAGNode[T1]]",
__arg2: "Union[T2, DAGNode[T2]]",
__arg3: "Union[T3, DAGNode[T3]]",
__arg4: "Union[T4, DAGNode[T4]]",
__arg5: "Union[T5, DAGNode[T5]]",
__arg6: "Union[T6, DAGNode[T6]]",
) -> "DAGNode[R]":
...
| RemoteFunction6 |
python | allegroai__clearml | clearml/backend_api/services/v2_9/workers.py | {
"start": 60187,
"end": 66576
} | class ____(Request):
"""
Returns statistics for the selected workers and time range aggregated by date intervals.
:param worker_ids: List of worker ids to collect metrics for. If not provided
or empty then all the company workers metrics are analyzed.
:type worker_ids: Sequence[str]
:param from_date: Starting time (in seconds from epoch) for collecting
statistics
:type from_date: float
:param to_date: Ending time (in seconds from epoch) for collecting statistics
:type to_date: float
:param interval: Time interval in seconds for a single statistics point. The
minimal value is 1
:type interval: int
:param items: List of metric keys and requested statistics
:type items: Sequence[StatItem]
:param split_by_variant: If true then break statistics by hardware sub types
:type split_by_variant: bool
"""
_service = "workers"
_action = "get_stats"
_version = "2.9"
_schema = {
"definitions": {
"aggregation_type": {
"description": "Metric aggregation type",
"enum": ["avg", "min", "max"],
"type": "string",
},
"stat_item": {
"properties": {
"category": {
"oneOf": [
{"$ref": "#/definitions/aggregation_type"},
{"type": "null"},
]
},
"key": {
"description": "Name of a metric",
"type": ["string", "null"],
},
},
"type": "object",
},
},
"properties": {
"from_date": {
"description": "Starting time (in seconds from epoch) for collecting statistics",
"type": "number",
},
"interval": {
"description": "Time interval in seconds for a single statistics point. The minimal value is 1",
"type": "integer",
},
"items": {
"description": "List of metric keys and requested statistics",
"items": {"$ref": "#/definitions/stat_item"},
"type": "array",
},
"split_by_variant": {
"default": False,
"description": "If true then break statistics by hardware sub types",
"type": "boolean",
},
"to_date": {
"description": "Ending time (in seconds from epoch) for collecting statistics",
"type": "number",
},
"worker_ids": {
"description": "List of worker ids to collect metrics for. If not provided or empty then all the company workers metrics are analyzed.",
"items": {"type": "string"},
"type": ["array", "null"],
},
},
"required": ["from_date", "to_date", "interval", "items"],
"type": "object",
}
def __init__(
self,
from_date: float,
to_date: float,
interval: int,
items: List[Any],
worker_ids: Optional[List[str]] = None,
split_by_variant: Optional[bool] = False,
**kwargs: Any
) -> None:
super(GetStatsRequest, self).__init__(**kwargs)
self.worker_ids = worker_ids
self.from_date = from_date
self.to_date = to_date
self.interval = interval
self.items = items
self.split_by_variant = split_by_variant
@schema_property("worker_ids")
def worker_ids(self) -> Optional[List[str]]:
return self._property_worker_ids
@worker_ids.setter
def worker_ids(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_worker_ids = None
return
self.assert_isinstance(value, "worker_ids", (list, tuple))
self.assert_isinstance(value, "worker_ids", six.string_types, is_array=True)
self._property_worker_ids = value
@schema_property("from_date")
def from_date(self) -> float:
return self._property_from_date
@from_date.setter
def from_date(self, value: float) -> None:
if value is None:
self._property_from_date = None
return
self.assert_isinstance(value, "from_date", six.integer_types + (float,))
self._property_from_date = value
@schema_property("to_date")
def to_date(self) -> float:
return self._property_to_date
@to_date.setter
def to_date(self, value: float) -> None:
if value is None:
self._property_to_date = None
return
self.assert_isinstance(value, "to_date", six.integer_types + (float,))
self._property_to_date = value
@schema_property("interval")
def interval(self) -> int:
return self._property_interval
@interval.setter
def interval(self, value: int) -> None:
if value is None:
self._property_interval = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "interval", six.integer_types)
self._property_interval = value
@schema_property("items")
def items(self) -> List[Any]:
return self._property_items
@items.setter
def items(self, value: List[Any]) -> None:
if value is None:
self._property_items = None
return
self.assert_isinstance(value, "items", (list, tuple))
if any((isinstance(v, dict) for v in value)):
value = [StatItem.from_dict(v) if isinstance(v, dict) else v for v in value]
else:
self.assert_isinstance(value, "items", StatItem, is_array=True)
self._property_items = value
@schema_property("split_by_variant")
def split_by_variant(self) -> Optional[bool]:
return self._property_split_by_variant
@split_by_variant.setter
def split_by_variant(self, value: Optional[bool]) -> None:
if value is None:
self._property_split_by_variant = None
return
self.assert_isinstance(value, "split_by_variant", (bool,))
self._property_split_by_variant = value
| GetStatsRequest |
python | h5py__h5py | h5py/tests/test_dataset_getitem.py | {
"start": 15759,
"end": 16309
} | class ____(TestCase):
def setUp(self):
TestCase.setUp(self)
self.data = np.ones((0,3), dtype='f')
self.dset = self.f.create_dataset('x', data=self.data)
def test_ndim(self):
""" Verify number of dimensions """
self.assertEqual(self.dset.ndim, 2)
def test_shape(self):
""" Verify shape """
self.assertEqual(self.dset.shape, (0, 3))
def test_indexlist(self):
""" see issue #473 """
self.assertNumpyBehavior(self.dset, self.data, np.s_[:,[0,1,2]])
| Test2DZeroFloat |
python | pypa__pip | docs/pip_sphinxext.py | {
"start": 6138,
"end": 6297
} | class ____(PipOptions):
def process_options(self) -> None:
self._format_options([o() for o in cmdoptions.general_group["options"]])
| PipGeneralOptions |
python | python-openxml__python-docx | src/docx/image/tiff.py | {
"start": 6706,
"end": 8196
} | class ____:
"""Base class for IFD entry classes.
Subclasses are differentiated by value type, e.g. ASCII, long int, etc.
"""
def __init__(self, tag_code, value):
super(_IfdEntry, self).__init__()
self._tag_code = tag_code
self._value = value
@classmethod
def from_stream(cls, stream_rdr, offset):
"""Return an |_IfdEntry| subclass instance containing the tag and value of the
tag parsed from `stream_rdr` at `offset`.
Note this method is common to all subclasses. Override the ``_parse_value()``
method to provide distinctive behavior based on field type.
"""
tag_code = stream_rdr.read_short(offset, 0)
value_count = stream_rdr.read_long(offset, 4)
value_offset = stream_rdr.read_long(offset, 8)
value = cls._parse_value(stream_rdr, offset, value_count, value_offset)
return cls(tag_code, value)
@classmethod
def _parse_value(cls, stream_rdr, offset, value_count, value_offset):
"""Return the value of this field parsed from `stream_rdr` at `offset`.
Intended to be overridden by subclasses.
"""
return "UNIMPLEMENTED FIELD TYPE" # pragma: no cover
@property
def tag(self):
"""Short int code that identifies this IFD entry."""
return self._tag_code
@property
def value(self):
"""Value of this tag, its type being dependent on the tag."""
return self._value
| _IfdEntry |
python | getlogbook__logbook | src/logbook/handlers.py | {
"start": 40831,
"end": 52666
} | class ____(Handler, StringFormatterHandlerMixin, LimitingHandlerMixin):
"""A handler that sends error mails. The format string used by this
handler are the contents of the mail plus the headers. This is handy
if you want to use a custom subject or ``X-`` header:
.. blacken-docs:off
.. code-block:: python
handler = MailHandler(format_string='''\
Subject: {record.level_name} on My Application
{record.message}
{record.extra[a_custom_injected_record]}
''')
.. blacken-docs:on
This handler will always emit text-only mails for maximum portability and
best performance.
In the default setting it delivers all log records but it can be set up
to not send more than n mails for the same record each hour to not
overload an inbox and the network in case a message is triggered multiple
times a minute. The following example limits it to 60 mails an hour::
from datetime import timedelta
handler = MailHandler(record_limit=1, record_delta=timedelta(minutes=1))
The default timedelta is 60 seconds (one minute).
The mail handler sends mails in a blocking manner. If you are not
using some centralized system for logging these messages (with the help
of ZeroMQ or others) and the logging system slows you down you can
wrap the handler in a :class:`logbook.queues.ThreadedWrapperHandler`
that will then send the mails in a background thread.
`server_addr` can be a tuple of host and port, or just a string containing
the host to use the default port (25, or 465 if connecting securely.)
`credentials` can be a tuple or dictionary of arguments that will be passed
to :py:meth:`smtplib.SMTP.login`.
`secure` should be an :class:`ssl.SSLContext` or boolean. Please read
:ref:`ssl-security` for best practices. For backwards
compatibility reasons, `secure` may also be a tuple or dictionary, although
this is deprecated:
* ``(keyfile, certfile)`` tuple
* ``{'keyfile': keyfile, 'certfile': certfile}`` dict
* ``()`` an empty tuple is equivalent to ``True``.
.. versionchanged:: 0.3
The handler supports the batching system now.
.. versionadded:: 1.0
`starttls` parameter added to allow disabling STARTTLS for SSL
connections.
.. versionchanged:: 1.0
If `server_addr` is a string, the default port will be used.
.. versionchanged:: 1.0
`credentials` parameter can now be a dictionary of keyword arguments.
.. versionchanged:: 1.0
`secure` can now be a dictionary or boolean in addition to a tuple.
.. versionchanged:: 1.7
`secure` may be an :class:`ssl.SSLContext` (recommended). The tuple or
dict form is deprecated.
"""
default_format_string = MAIL_FORMAT_STRING
default_related_format_string = MAIL_RELATED_FORMAT_STRING
default_subject = "Server Error in Application"
#: the maximum number of record hashes in the cache for the limiting
#: feature. Afterwards, record_cache_prune percent of the oldest
#: entries are removed
max_record_cache = 512
#: the number of items to prune on a cache overflow in percent.
record_cache_prune = 0.333
def __init__(
self,
from_addr,
recipients,
subject=None,
server_addr=None,
credentials=None,
secure=None,
record_limit=None,
record_delta=None,
level=NOTSET,
format_string=None,
related_format_string=None,
filter=None,
bubble=False,
starttls=True,
):
Handler.__init__(self, level, filter, bubble)
StringFormatterHandlerMixin.__init__(self, format_string)
LimitingHandlerMixin.__init__(self, record_limit, record_delta)
self.from_addr = from_addr
self.recipients = recipients
if subject is None:
subject = self.default_subject
self.subject = subject
self.server_addr = server_addr
self.credentials = credentials
self.secure = self._adapt_secure(secure)
if related_format_string is None:
related_format_string = self.default_related_format_string
self.related_format_string = related_format_string
self.starttls = starttls
def _adapt_secure(self, secure):
if secure is None or isinstance(secure, (bool, ssl.SSLContext)):
return secure
if isinstance(secure, tuple):
if not secure:
# For backwards compatibility, () translates to True
return True
else:
keyfile, certfile = secure
elif isinstance(secure, Mapping):
keyfile = secure.get("keyfile", None)
certfile = secure.get("certfile", None)
else:
raise TypeError(f"Unexpected type for `secure`: {type(secure)}")
warnings.warn(
"Passing keyfile and certfile are deprecated, use an SSLContext instead.",
DeprecationWarning,
stacklevel=3,
)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
ctx.load_cert_chain(certfile, keyfile)
return ctx
@property
def related_format_string(self):
if isinstance(self.related_formatter, StringFormatter):
return self.related_formatter.format_string
@related_format_string.setter
def related_format_string(self, value):
if value is None:
self.related_formatter = None
else:
self.related_formatter = self.formatter_class(value)
def get_recipients(self, record):
"""Returns the recipients for a record. By default the
:attr:`recipients` attribute is returned for all records.
"""
return self.recipients
def message_from_record(self, record, suppressed):
"""Creates a new message for a record as email message object
(:class:`email.message.Message`). `suppressed` is the number
of mails not sent if the `record_limit` feature is active.
"""
from email.header import Header
from email.message import Message
msg = Message()
msg.set_charset("utf-8")
lineiter = iter(self.format(record).splitlines())
for line in lineiter:
if not line:
break
h, v = line.split(":", 1)
# We could probably just encode everything. For the moment encode
# only what really needed to avoid breaking a couple of tests.
try:
v.encode("ascii")
except UnicodeEncodeError:
msg[h.strip()] = Header(v.strip(), "utf-8")
else:
msg[h.strip()] = v.strip()
msg.replace_header("Content-Transfer-Encoding", "8bit")
body = "\r\n".join(lineiter)
if suppressed:
body += (
f"\r\n\r\nThis message occurred additional {suppressed} "
"time(s) and was suppressed"
)
msg.set_payload(body, "UTF-8")
return msg
def format_related_record(self, record):
"""Used for format the records that led up to another record or
records that are related into strings. Used by the batch formatter.
"""
return self.related_formatter(record, self)
def generate_mail(self, record, suppressed=0):
"""Generates the final email (:class:`email.message.Message`)
with headers and date. `suppressed` is the number of mails
that were not send if the `record_limit` feature is active.
"""
from email.utils import formatdate
msg = self.message_from_record(record, suppressed)
msg["From"] = self.from_addr
msg["Date"] = formatdate()
return msg
def collapse_mails(self, mail, related, reason):
"""When escaling or grouped mails are"""
if not related:
return mail
if reason == "group":
title = "Other log records in the same group"
else:
title = "Log records that led up to this one"
mail.set_payload(
"{}\r\n\r\n\r\n{}:\r\n\r\n{}".format(
mail.get_payload(),
title,
"\r\n\r\n".join(body.rstrip() for body in related),
),
"UTF-8",
)
return mail
def get_connection(self):
"""Returns an SMTP connection. By default it reconnects for
each sent mail.
"""
from smtplib import SMTP, SMTP_PORT, SMTP_SSL, SMTP_SSL_PORT
if self.secure:
if self.starttls:
default_port = 587
else:
default_port = SMTP_SSL_PORT
else:
default_port = SMTP_PORT
if self.server_addr is None:
host = "127.0.0.1"
port = default_port
else:
try:
host, port = self.server_addr
except ValueError:
# If server_addr is a string, the tuple unpacking will raise
# ValueError, and we can use the default port.
host = self.server_addr
port = default_port
if isinstance(self.secure, ssl.SSLContext):
context = self.secure
else:
context = None
if self.secure and not self.starttls:
con = SMTP_SSL(host, port, context=context)
else:
con = SMTP(host, port)
if self.secure and self.starttls:
con.starttls(context=context)
con.ehlo()
if self.credentials is not None:
# Allow credentials to be a tuple or dict.
if isinstance(self.credentials, Mapping):
credentials_args = ()
credentials_kwargs = self.credentials
else:
credentials_args = self.credentials
credentials_kwargs = dict()
con.login(*credentials_args, **credentials_kwargs)
return con
def close_connection(self, con):
"""Closes the connection that was returned by
:meth:`get_connection`.
"""
try:
if con is not None:
con.quit()
except Exception:
pass
def deliver(self, msg, recipients):
"""Delivers the given message to a list of recipients."""
con = self.get_connection()
try:
con.sendmail(self.from_addr, recipients, msg.as_string())
finally:
self.close_connection(con)
def emit(self, record):
suppressed = 0
if self.record_limit is not None:
suppressed, allow_delivery = self.check_delivery(record)
if not allow_delivery:
return
self.deliver(
self.generate_mail(record, suppressed), self.get_recipients(record)
)
def emit_batch(self, records, reason):
if reason not in ("escalation", "group"):
raise RuntimeError("reason must be either 'escalation' or 'group'")
records = list(records)
if not records:
return
trigger = records.pop(-1 if reason == "escalation" else 0)
suppressed = 0
if self.record_limit is not None:
suppressed, allow_delivery = self.check_delivery(trigger)
if not allow_delivery:
return
trigger_mail = self.generate_mail(trigger, suppressed)
related = [self.format_related_record(record) for record in records]
self.deliver(
self.collapse_mails(trigger_mail, related, reason),
self.get_recipients(trigger),
)
| MailHandler |
python | mlflow__mlflow | mlflow/projects/_project_spec.py | {
"start": 12595,
"end": 14290
} | class ____:
"""A parameter in an MLproject entry point."""
def __init__(self, name, yaml_obj):
self.name = name
if is_string_type(yaml_obj):
self.type = yaml_obj
self.default = None
else:
self.type = yaml_obj.get("type", "string")
self.default = yaml_obj.get("default")
def _compute_uri_value(self, user_param_value):
if not data_utils.is_uri(user_param_value):
raise ExecutionException(
f"Expected URI for parameter {self.name} but got {user_param_value}"
)
return user_param_value
def _compute_path_value(self, user_param_value, storage_dir, key_position):
if local_path := get_local_path_or_none(user_param_value):
if not os.path.exists(local_path):
raise ExecutionException(
f"Got value {user_param_value} for parameter {self.name}, but no such file or "
"directory was found."
)
return os.path.abspath(local_path)
target_sub_dir = f"param_{key_position}"
download_dir = os.path.join(storage_dir, target_sub_dir)
os.mkdir(download_dir)
return artifact_utils._download_artifact_from_uri(
artifact_uri=user_param_value, output_path=download_dir
)
def compute_value(self, param_value, storage_dir, key_position):
if storage_dir and self.type == "path":
return self._compute_path_value(param_value, storage_dir, key_position)
elif self.type == "uri":
return self._compute_uri_value(param_value)
else:
return param_value
| Parameter |
python | sympy__sympy | sympy/utilities/codegen.py | {
"start": 12835,
"end": 13006
} | class ____(Variable):
"""An abstract Argument data structure: a name and a data type.
This structure is refined in the descendants below.
"""
pass
| Argument |
python | mlflow__mlflow | tests/resources/mlflow-test-plugin/mlflow_test_plugin/dummy_backend.py | {
"start": 600,
"end": 1109
} | class ____(AbstractBackend):
def run(
self,
project_uri,
entry_point,
params,
version,
backend_config,
tracking_uri,
experiment_id,
):
work_dir = fetch_and_validate_project(project_uri, version, entry_point, params)
active_run = get_or_create_run(
None, project_uri, experiment_id, work_dir, version, entry_point, params
)
return DummySubmittedRun(active_run.info.run_id)
| PluginDummyProjectBackend |
python | sphinx-doc__sphinx | tests/roots/test-ext-math-compat/conf.py | {
"start": 267,
"end": 502
} | class ____(Directive):
def run(self):
text = 'E = mc^2'
return [nodes.math_block(text, text)]
def setup(app):
app.add_role('my_math', my_math_role)
app.add_directive('my-math', MyMathDirective)
| MyMathDirective |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.