language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
getsentry__sentry
|
tests/sentry/workflow_engine/endpoints/test_organization_alertrule_workflow.py
|
{
"start": 145,
"end": 1383
}
|
class ____(APITestCase):
endpoint = "sentry-api-0-organization-alert-rule-workflow-index"
def setUp(self) -> None:
super().setUp()
self.login_as(user=self.user)
self.workflow_1 = self.create_workflow(organization=self.organization)
self.workflow_2 = self.create_workflow(organization=self.organization)
self.workflow_3 = self.create_workflow(organization=self.organization)
self.alert_rule_workflow_1 = self.create_alert_rule_workflow(
alert_rule_id=12345, workflow=self.workflow_1
)
self.alert_rule_workflow_2 = self.create_alert_rule_workflow(
rule_id=67890, workflow=self.workflow_2
)
self.alert_rule_workflow_3 = self.create_alert_rule_workflow(
alert_rule_id=11111, workflow=self.workflow_3
)
# Create workflow in different organization to test filtering
self.other_org = self.create_organization()
self.other_workflow = self.create_workflow(organization=self.other_org)
self.other_alert_rule_workflow = self.create_alert_rule_workflow(
alert_rule_id=99999, workflow=self.other_workflow
)
@region_silo_test
|
OrganizationAlertRuleWorkflowAPITestCase
|
python
|
tensorflow__tensorflow
|
tensorflow/python/saved_model/proto_splitter_save_test.py
|
{
"start": 1169,
"end": 2520
}
|
class ____(test.TestCase, parameterized.TestCase):
def test_save_experimental_image_format(self):
root = module.Module()
root.c = constant_op.constant(np.random.random_sample([150, 150]))
root.get_c = def_function.function(lambda: root.c)
save_dir = os.path.join(self.get_temp_dir(), "chunked_model")
constants.debug_set_max_size(80000)
options = save_options.SaveOptions(experimental_image_format=True)
save.save(
root,
save_dir,
signatures=root.get_c.get_concrete_function(),
options=options,
)
self.assertTrue(os.path.exists(save_dir + "/saved_model.cpb"))
def test_save_experimental_image_format_not_chunked(self):
root = module.Module()
root.c = constant_op.constant(np.random.random_sample([150, 150]))
root.get_c = def_function.function(lambda: root.c)
save_dir = os.path.join(self.get_temp_dir(), "not_chunked_model")
constants.debug_set_max_size(1 << 31) # 2GB
options = save_options.SaveOptions(experimental_image_format=True)
save.save(
root,
save_dir,
signatures=root.get_c.get_concrete_function(),
options=options,
)
# Should save an unchunked proto (.pb) and not .cpb
self.assertTrue(os.path.exists(save_dir + "/saved_model.pb"))
if __name__ == "__main__":
test.main()
|
ProtoSplitterSaveTest
|
python
|
getsentry__sentry
|
tests/sentry/integrations/gitlab/test_repository.py
|
{
"start": 682,
"end": 13631
}
|
class ____(IntegrationRepositoryTestCase):
provider_name = "integrations:gitlab"
def setUp(self) -> None:
super().setUp()
with assume_test_silo_mode(SiloMode.CONTROL):
self.integration = self.create_provider_integration(
provider="gitlab",
name="Example GitLab",
external_id="example.gitlab.com:getsentry",
metadata={
"instance": "example.gitlab.com",
"domain_name": "example.gitlab.com/getsentry",
"verify_ssl": False,
"base_url": "https://example.gitlab.com",
"webhook_secret": "secret-token-value",
},
)
identity = Identity.objects.create(
idp=self.create_identity_provider(
type="gitlab", config={}, external_id="1234567890"
),
user=self.user,
external_id="example.gitlab.com:4",
data={"access_token": "1234567890"},
)
self.integration.add_organization(self.organization, self.user, identity.id)
self.integration.get_provider().setup()
self.default_repository_config = {
"path_with_namespace": "getsentry/example-repo",
"name_with_namespace": "Get Sentry / Example Repo",
"path": "example-repo",
"id": "123",
"web_url": "https://example.gitlab.com/getsentry/projects/example-repo",
}
self.gitlab_id = 123
@cached_property
def provider(self):
return GitlabRepositoryProvider("gitlab")
def tearDown(self) -> None:
super().tearDown()
responses.reset()
def add_create_repository_responses(self, repository_config):
responses.add(
responses.GET,
"https://example.gitlab.com/api/v4/projects/%s" % self.gitlab_id,
json=repository_config,
)
responses.add(
responses.POST,
"https://example.gitlab.com/api/v4/projects/%s/hooks" % self.gitlab_id,
json={"id": 99},
)
@assume_test_silo_mode(SiloMode.REGION)
def get_repository(self, **kwargs) -> Repository:
return Repository.objects.get(**kwargs)
def assert_repository(self, repository_config, organization_id=None):
instance = self.integration.metadata["instance"]
external_id = "{}:{}".format(instance, repository_config["id"])
repo = self.get_repository(
organization_id=organization_id or self.organization.id,
provider=self.provider_name,
external_id=external_id,
)
assert repo.name == repository_config["name_with_namespace"]
assert repo.url == repository_config["web_url"]
assert repo.integration_id == self.integration.id
assert repo.config == {
"instance": instance,
"path": repository_config["path_with_namespace"],
"project_id": repository_config["id"],
"webhook_id": 99,
}
@responses.activate
def test_create_repository(self) -> None:
response = self.create_repository(self.default_repository_config, self.integration.id)
assert response.status_code == 201
self.assert_repository(self.default_repository_config)
@responses.activate
def test_create_repository_verify_payload(self) -> None:
def request_callback(request):
payload = orjson.loads(request.body)
assert "url" in payload
assert payload["push_events"]
assert payload["merge_requests_events"]
expected_token = "{}:{}".format(
self.integration.external_id, self.integration.metadata["webhook_secret"]
)
assert payload["token"] == expected_token
return 201, {}, orjson.dumps({"id": 99}).decode()
responses.add_callback(
responses.POST,
"https://example.gitlab.com/api/v4/projects/%s/hooks" % self.gitlab_id,
callback=request_callback,
)
response = self.create_repository(self.default_repository_config, self.integration.id)
assert response.status_code == 201
self.assert_repository(self.default_repository_config)
@responses.activate
def test_create_repository_request_invalid_url(self) -> None:
responses.add(
responses.GET,
"https://example.gitlab.com/api/v4/projects/%s" % self.gitlab_id,
status=200,
json=self.default_repository_config,
)
responses.add(
responses.POST,
"https://example.gitlab.com/api/v4/projects/%s/hooks" % self.gitlab_id,
status=422,
json={"error": "Invalid url given"},
)
response = self.create_repository(
self.default_repository_config, self.integration.id, add_responses=False
)
assert response.status_code == 400
self.assert_error_message(
response, "validation", "Error Communicating with GitLab (HTTP 422): Invalid url given"
)
def test_create_repository_data_no_installation_id(self) -> None:
response = self.create_repository(self.default_repository_config, None)
assert response.status_code == 400
self.assert_error_message(response, "validation", "requires an integration id")
def test_create_repository_data_integration_does_not_exist(self) -> None:
integration_id = self.integration.id
with assume_test_silo_mode(SiloMode.CONTROL):
self.integration.delete()
response = self.create_repository(self.default_repository_config, integration_id)
assert response.status_code == 404
self.assert_error_message(
response, "not found", "Integration matching query does not exist."
)
def test_create_repository_org_given_has_no_installation(self) -> None:
organization = self.create_organization(owner=self.user)
response = self.create_repository(
self.default_repository_config, self.integration.id, organization.slug
)
assert response.status_code == 404
@responses.activate
def test_create_repository_get_project_request_fails(self) -> None:
responses.add(
responses.GET,
"https://example.gitlab.com/api/v4/projects/%s" % self.gitlab_id,
status=503,
)
response = self.create_repository(
self.default_repository_config, self.integration.id, add_responses=False
)
assert response.status_code == 503
@responses.activate
def test_create_repository_integration_create_webhook_failure(self) -> None:
responses.add(
responses.GET,
"https://example.gitlab.com/api/v4/projects/%s" % self.gitlab_id,
json=self.default_repository_config,
)
responses.add(
responses.POST,
"https://example.gitlab.com/api/v4/projects/%s/hooks" % self.gitlab_id,
status=503,
)
response = self.create_repository(
self.default_repository_config, self.integration.id, add_responses=False
)
assert response.status_code == 503
@responses.activate
def test_on_delete_repository_remove_webhook(self) -> None:
response = self.create_repository(self.default_repository_config, self.integration.id)
responses.reset()
responses.add(
responses.DELETE,
"https://example.gitlab.com/api/v4/projects/%s/hooks/99" % self.gitlab_id,
status=204,
)
repo = self.get_repository(pk=response.data["id"])
self.provider.on_delete_repository(repo)
assert len(responses.calls) == 1
@responses.activate
def test_on_delete_repository_remove_webhook_missing_hook(self) -> None:
response = self.create_repository(self.default_repository_config, self.integration.id)
responses.reset()
responses.add(
responses.DELETE,
"https://example.gitlab.com/api/v4/projects/%s/hooks/99" % self.gitlab_id,
status=404,
)
repo = self.get_repository(pk=response.data["id"])
self.provider.on_delete_repository(repo)
assert len(responses.calls) == 1
@responses.activate
def test_compare_commits_start_and_end(self) -> None:
responses.add(
responses.GET,
"https://example.gitlab.com/api/v4/projects/%s/repository/compare?from=abc&to=xyz"
% self.gitlab_id,
json=orjson.loads(COMPARE_RESPONSE),
)
responses.add(
responses.GET,
"https://example.gitlab.com/api/v4/projects/%s/repository/commits/12d65c8dd2b2676fa3ac47d955accc085a37a9c1/diff"
% self.gitlab_id,
json=orjson.loads(COMMIT_DIFF_RESPONSE),
)
responses.add(
responses.GET,
"https://example.gitlab.com/api/v4/projects/%s/repository/commits/8b090c1b79a14f2bd9e8a738f717824ff53aebad/diff"
% self.gitlab_id,
json=orjson.loads(COMMIT_DIFF_RESPONSE),
)
response = self.create_repository(self.default_repository_config, self.integration.id)
repo = self.get_repository(pk=response.data["id"])
commits = self.provider.compare_commits(repo, "abc", "xyz")
assert 2 == len(commits)
for commit in commits:
assert_commit_shape(commit)
@responses.activate
def test_compare_commits_start_and_end_gitlab_failure(self) -> None:
responses.add(
responses.GET,
"https://example.gitlab.com/api/v4/projects/%s/repository/compare?from=abc&to=xyz"
% self.gitlab_id,
status=502,
)
response = self.create_repository(self.default_repository_config, self.integration.id)
repo = self.get_repository(pk=response.data["id"])
with pytest.raises(IntegrationError):
self.provider.compare_commits(repo, "abc", "xyz")
@responses.activate
def test_compare_commits_no_start(self) -> None:
responses.add(
responses.GET,
"https://example.gitlab.com/api/v4/projects/%s/repository/commits/xyz" % self.gitlab_id,
json={"created_at": "2018-09-19T13:14:15Z"},
)
responses.add(
responses.GET,
"https://example.gitlab.com/api/v4/projects/%s/repository/commits?until=2018-09-19T13:14:15Z"
% self.gitlab_id,
json=orjson.loads(COMMIT_LIST_RESPONSE),
)
responses.add(
responses.GET,
"https://example.gitlab.com/api/v4/projects/%s/repository/commits/ed899a2f4b50b4370feeea94676502b42383c746/diff"
% self.gitlab_id,
json=orjson.loads(COMMIT_DIFF_RESPONSE),
)
responses.add(
responses.GET,
"https://example.gitlab.com/api/v4/projects/%s/repository/commits/6104942438c14ec7bd21c6cd5bd995272b3faff6/diff"
% self.gitlab_id,
json=orjson.loads(COMMIT_DIFF_RESPONSE),
)
response = self.create_repository(self.default_repository_config, self.integration.id)
repo = self.get_repository(pk=response.data["id"])
commits = self.provider.compare_commits(repo, None, "xyz")
for commit in commits:
assert_commit_shape(commit)
@responses.activate
def test_compare_commits_no_start_gitlab_failure(self) -> None:
responses.add(
responses.GET,
"https://example.gitlab.com/api/v4/projects/%s/repository/commits/abc" % self.gitlab_id,
status=502,
)
response = self.create_repository(self.default_repository_config, self.integration.id)
repo = self.get_repository(pk=response.data["id"])
with pytest.raises(IntegrationError):
self.provider.compare_commits(repo, None, "abc")
@responses.activate
def test_pull_request_url(self) -> None:
response = self.create_repository(self.default_repository_config, self.integration.id)
repo = self.get_repository(pk=response.data["id"])
pull = PullRequest(key=99)
result = self.provider.pull_request_url(repo, pull)
assert (
result == "https://example.gitlab.com/getsentry/projects/example-repo/merge_requests/99"
)
@responses.activate
def test_repository_external_slug(self) -> None:
response = self.create_repository(self.default_repository_config, self.integration.id)
repo = self.get_repository(pk=response.data["id"])
result = self.provider.repository_external_slug(repo)
assert result == repo.config["project_id"]
|
GitLabRepositoryProviderTest
|
python
|
apache__airflow
|
providers/microsoft/azure/src/airflow/providers/microsoft/azure/triggers/message_bus.py
|
{
"start": 5774,
"end": 8485
}
|
class ____(BaseAzureServiceBusTrigger):
"""
Trigger for Azure Service Bus Topic Subscription message processing.
This trigger monitors topic subscriptions for incoming messages. It can handle
multiple topics with a single subscription name, processing messages as they
arrive and yielding them as trigger events.
Example:
>>> trigger = AzureServiceBusSubscriptionTrigger(
... topics=["topic1", "topic2"],
... subscription_name="my-subscription",
... azure_service_bus_conn_id="my_asb_conn",
... )
:param topics: List of topic names to monitor
:param subscription_name: Name of the subscription to use
:param poll_interval: Time interval between polling operations (seconds)
:param azure_service_bus_conn_id: Connection ID for Azure Service Bus
:param max_wait_time: Maximum time to wait for messages (seconds)
"""
def __init__(
self,
topics: list[str],
subscription_name: str,
poll_interval: float | None = None,
azure_service_bus_conn_id: str | None = None,
max_wait_time: float | None = None,
) -> None:
super().__init__(poll_interval, azure_service_bus_conn_id, max_wait_time)
self.topics = topics
self.subscription_name = subscription_name
def serialize(self) -> tuple[str, dict[str, Any]]:
return (
self.__class__.__module__ + "." + self.__class__.__qualname__,
{
"azure_service_bus_conn_id": self.connection_id,
"topics": self.topics,
"subscription_name": self.subscription_name,
"poll_interval": self.poll_interval,
"max_wait_time": self.max_wait_time,
},
)
async def run(self) -> AsyncIterator[TriggerEvent]:
read_subscription_message_async = sync_to_async(self.message_hook.read_subscription_message)
while True:
for topic_name in self.topics:
message = await read_subscription_message_async(
topic_name=topic_name,
subscription_name=self.subscription_name,
max_wait_time=self.max_wait_time,
)
if message:
yield TriggerEvent(
{
"message": BaseAzureServiceBusTrigger._get_message_body(message),
"topic": topic_name,
"subscription": self.subscription_name,
}
)
break
await asyncio.sleep(self.poll_interval)
|
AzureServiceBusSubscriptionTrigger
|
python
|
fluentpython__example-code
|
attic/dicts/test_transformdict.py
|
{
"start": 646,
"end": 7288
}
|
class ____(TransformDictTestBase):
def test_init(self):
with self.assertRaises(TypeError):
TransformDict()
with self.assertRaises(TypeError):
# Too many positional args
TransformDict(str.lower, {}, {})
with self.assertRaises(TypeError):
# Not a callable
TransformDict(object())
d = TransformDict(str.lower)
self.check_underlying_dict(d, {})
pairs = [('Bar', 1), ('Foo', 2)]
d = TransformDict(str.lower, pairs)
self.assertEqual(sorted(d.items()), pairs)
self.check_underlying_dict(d, {'bar': 1, 'foo': 2})
d = TransformDict(str.lower, dict(pairs))
self.assertEqual(sorted(d.items()), pairs)
self.check_underlying_dict(d, {'bar': 1, 'foo': 2})
d = TransformDict(str.lower, **dict(pairs))
self.assertEqual(sorted(d.items()), pairs)
self.check_underlying_dict(d, {'bar': 1, 'foo': 2})
d = TransformDict(str.lower, {'Bar': 1}, Foo=2)
self.assertEqual(sorted(d.items()), pairs)
self.check_underlying_dict(d, {'bar': 1, 'foo': 2})
def test_transform_func(self):
# Test the `transform_func` attribute
d = TransformDict(str.lower)
self.assertIs(d.transform_func, str.lower)
# The attribute is read-only
with self.assertRaises(AttributeError):
d.transform_func = str.upper
def test_various_transforms(self):
d = TransformDict(lambda s: s.encode('utf-8'))
d['Foo'] = 5
self.assertEqual(d['Foo'], 5)
self.check_underlying_dict(d, {b'Foo': 5})
with self.assertRaises(AttributeError):
# 'bytes' object has no attribute 'encode'
d[b'Foo']
# Another example
d = TransformDict(str.swapcase)
d['Foo'] = 5
self.assertEqual(d['Foo'], 5)
self.check_underlying_dict(d, {'fOO': 5})
with self.assertRaises(KeyError):
d['fOO']
# NOTE: we mostly test the operations which are not inherited from
# MutableMapping.
def test_setitem_getitem(self):
d = TransformDict(str.lower)
with self.assertRaises(KeyError):
d['foo']
d['Foo'] = 5
self.assertEqual(d['foo'], 5)
self.assertEqual(d['Foo'], 5)
self.assertEqual(d['FOo'], 5)
with self.assertRaises(KeyError):
d['bar']
self.check_underlying_dict(d, {'foo': 5})
d['BAR'] = 6
self.assertEqual(d['Bar'], 6)
self.check_underlying_dict(d, {'foo': 5, 'bar': 6})
# Overwriting
d['foO'] = 7
self.assertEqual(d['foo'], 7)
self.assertEqual(d['Foo'], 7)
self.assertEqual(d['FOo'], 7)
self.check_underlying_dict(d, {'foo': 7, 'bar': 6})
def test_delitem(self):
d = TransformDict(str.lower, Foo=5)
d['baR'] = 3
del d['fOO']
with self.assertRaises(KeyError):
del d['Foo']
with self.assertRaises(KeyError):
del d['foo']
self.check_underlying_dict(d, {'bar': 3})
def test_get(self):
d = TransformDict(str.lower)
default = object()
self.assertIs(d.get('foo'), None)
self.assertIs(d.get('foo', default), default)
d['Foo'] = 5
self.assertEqual(d.get('foo'), 5)
self.assertEqual(d.get('FOO'), 5)
self.assertIs(d.get('bar'), None)
self.check_underlying_dict(d, {'foo': 5})
def test_getitem(self):
d = TransformDict(str.lower)
d['Foo'] = 5
self.assertEqual(d.getitem('foo'), ('Foo', 5))
self.assertEqual(d.getitem('FOO'), ('Foo', 5))
with self.assertRaises(KeyError):
d.getitem('bar')
def test_pop(self):
d = TransformDict(str.lower)
default = object()
with self.assertRaises(KeyError):
d.pop('foo')
self.assertIs(d.pop('foo', default), default)
d['Foo'] = 5
self.assertIn('foo', d)
self.assertEqual(d.pop('foo'), 5)
self.assertNotIn('foo', d)
self.check_underlying_dict(d, {})
d['Foo'] = 5
self.assertIn('Foo', d)
self.assertEqual(d.pop('FOO'), 5)
self.assertNotIn('foo', d)
self.check_underlying_dict(d, {})
with self.assertRaises(KeyError):
d.pop('foo')
def test_clear(self):
d = TransformDict(str.lower)
d.clear()
self.check_underlying_dict(d, {})
d['Foo'] = 5
d['baR'] = 3
self.check_underlying_dict(d, {'foo': 5, 'bar': 3})
d.clear()
self.check_underlying_dict(d, {})
def test_contains(self):
d = TransformDict(str.lower)
self.assertIs(False, 'foo' in d)
d['Foo'] = 5
self.assertIs(True, 'Foo' in d)
self.assertIs(True, 'foo' in d)
self.assertIs(True, 'FOO' in d)
self.assertIs(False, 'bar' in d)
def test_len(self):
d = TransformDict(str.lower)
self.assertEqual(len(d), 0)
d['Foo'] = 5
self.assertEqual(len(d), 1)
d['BAR'] = 6
self.assertEqual(len(d), 2)
d['foo'] = 7
self.assertEqual(len(d), 2)
d['baR'] = 3
self.assertEqual(len(d), 2)
del d['Bar']
self.assertEqual(len(d), 1)
def test_iter(self):
d = TransformDict(str.lower)
it = iter(d)
with self.assertRaises(StopIteration):
next(it)
d['Foo'] = 5
d['BAR'] = 6
self.assertEqual(set(x for x in d), {'Foo', 'BAR'})
def test_first_key_retained(self):
d = TransformDict(str.lower, {'Foo': 5, 'BAR': 6})
self.assertEqual(set(d), {'Foo', 'BAR'})
d['foo'] = 7
d['baR'] = 8
d['quux'] = 9
self.assertEqual(set(d), {'Foo', 'BAR', 'quux'})
del d['foo']
d['FOO'] = 9
del d['bar']
d.setdefault('Bar', 15)
d.setdefault('BAR', 15)
self.assertEqual(set(d), {'FOO', 'Bar', 'quux'})
def test_repr(self):
d = TransformDict(str.lower)
self.assertEqual(repr(d),
"TransformDict(<method 'lower' of 'str' objects>, {})")
d['Foo'] = 5
self.assertEqual(repr(d),
"TransformDict(<method 'lower' of 'str' objects>, {'Foo': 5})")
def test_repr_non_hashable_keys(self):
d = TransformDict(id)
self.assertEqual(repr(d),
"TransformDict(<built-in function id>, {})")
d[[1]] = 2
self.assertEqual(repr(d),
"TransformDict(<built-in function id>, [([1], 2)])")
|
TestTransformDict
|
python
|
django__django
|
tests/admin_views/models.py
|
{
"start": 29159,
"end": 29578
}
|
class ____(models.Model):
born_country = models.ForeignKey(Country, models.CASCADE)
living_country = models.ForeignKey(
Country, models.CASCADE, related_name="living_country_set"
)
favorite_country_to_vacation = models.ForeignKey(
Country,
models.CASCADE,
related_name="favorite_country_to_vacation_set",
limit_choices_to={"continent": Country.ASIA},
)
|
Traveler
|
python
|
getsentry__sentry
|
src/sentry/replays/testutils.py
|
{
"start": 290,
"end": 20307
}
|
class ____(Enum):
DomContentLoaded = 0
Load = 1
FullSnapshot = 2
IncrementalSnapshot = 3
Meta = 4
Custom = 5
Plugin = 6
SegmentList = list[dict[str, Any]]
RRWebNode = dict[str, Any]
def sec(timestamp: datetime.datetime) -> int:
# sentry data inside rrweb is recorded in seconds
return int(timestamp.timestamp())
def ms(timestamp: datetime.datetime) -> int:
return int(timestamp.timestamp()) * 1000
def assert_viewed_by_expected_ids_and_unique(
viewed_by: list[dict[str, Any]], expected_ids: set[int]
):
seen = set()
for user_dict in viewed_by:
id = int(user_dict["id"])
assert id not in seen
seen.add(id)
assert seen == expected_ids
def assert_expected_response(response: dict[str, Any], expected_response: dict[str, Any]) -> None:
"""Assert a received response matches what was expected."""
# Compare the response structure and values to the expected response.
for key, value in expected_response.items():
assert key in response, f"key: {key}"
response_value = response.pop(key)
if isinstance(response_value, dict):
assert isinstance(value, dict)
for k, v in value.items():
if isinstance(v, list):
assert sorted(response_value[k]) == sorted(v)
else:
assert response_value[k] == v, f"value: {v}, expected: {response_value[k]}"
elif isinstance(response_value, list):
assert len(response_value) == len(value), f'"{response_value}" "{value}"'
for item in response_value:
assert item in value, f"{key}, {item}"
value.remove(item)
else:
assert response_value == value, f'"{key}, {response_value}" "{value}"'
# Ensure no lingering unexpected keys exist.
assert list(response.keys()) == [], response.keys()
def mock_expected_response(
project_id: int,
replay_id: str,
started_at: datetime.datetime,
finished_at: datetime.datetime,
**kwargs: Any,
) -> dict[str, Any]:
urls = kwargs.pop("urls", [])
return {
"id": replay_id,
"replay_type": kwargs.pop("replay_type", "session"),
"project_id": str(project_id),
"urls": urls,
"error_ids": kwargs.pop("error_ids", ["a3a62ef6ac86415b83c2416fc2f76db1"]),
"trace_ids": kwargs.pop("trace_ids", ["4491657243ba4dbebd2f6bd62b733080"]),
"started_at": datetime.datetime.strftime(started_at, "%Y-%m-%dT%H:%M:%S+00:00"),
"finished_at": datetime.datetime.strftime(finished_at, "%Y-%m-%dT%H:%M:%S+00:00"),
"duration": (finished_at - started_at).seconds,
"count_dead_clicks": kwargs.pop("count_dead_clicks", 0),
"count_rage_clicks": kwargs.pop("count_rage_clicks", 0),
"count_segments": kwargs.pop("count_segments", 1),
"count_urls": len(urls),
"platform": kwargs.pop("platform", "javascript"),
"environment": kwargs.pop("environment", "production"),
"releases": kwargs.pop("releases", ["version@1.3"]),
"dist": kwargs.pop("dist", "abc123"),
"os": {
"name": kwargs.pop("os_name", "iOS"),
"version": kwargs.pop("os_version", "16.2"),
},
"browser": {
"name": kwargs.pop("browser_name", "Chrome"),
"version": kwargs.pop("browser_version", "103.0.38"),
},
"device": {
"name": kwargs.pop("device_name", "iPhone 13 Pro"),
"brand": kwargs.pop("device_brand", "Apple"),
"family": kwargs.pop("device_family", "iPhone"),
"model": kwargs.pop("device_model", "13 Pro"),
},
"sdk": {
"name": kwargs.pop("sdk_name", "sentry.javascript.react"),
"version": kwargs.pop("sdk_version", "6.18.1"),
},
"ota_updates": {
"channel": kwargs.pop("ota_updates_channel", "test-channel"),
"runtime_version": kwargs.pop("ota_updates_runtime_version", "test-runtime-version"),
"update_id": kwargs.pop("ota_updates_update_id", "test-update-id"),
},
"user": {
"id": kwargs.pop("user_id", "123"),
"display_name": kwargs.pop("user_display_name", "username"),
"email": kwargs.pop("user_email", "username@example.com"),
"username": kwargs.pop("user_name", "username"),
"ip": kwargs.pop("user_ip", "127.0.0.1"),
"geo": {
"city": kwargs.pop("user_geo_city", "San Francisco"),
"country_code": kwargs.pop("user_geo_country_code", "USA"),
"region": kwargs.pop("user_geo_region", "United States"),
"subdivision": kwargs.pop("user_geo_subdivision", "California"),
},
},
"tags": kwargs.pop("tags", {}),
"activity": kwargs.pop("activity", 0),
"is_archived": kwargs.pop("is_archived", False),
"clicks": kwargs.pop("clicks", []),
"warning_ids": kwargs.pop("warning_ids", []),
"info_ids": kwargs.pop("info_ids", []),
"count_errors": kwargs.pop("count_errors", 0),
"count_warnings": kwargs.pop("count_warnings", 0),
"count_infos": kwargs.pop("count_infos", 0),
"has_viewed": kwargs.pop("has_viewed", False),
}
def mock_replay(
timestamp: datetime.datetime,
project_id: int,
replay_id: str,
**kwargs: Any,
) -> dict[str, Any]:
tags = kwargs.pop("tags", {})
tags.update({"transaction": kwargs.pop("title", "Title")})
tags = [[key, value] for key, value in tags.items()]
return {
"type": "replay_event",
"start_time": sec(timestamp),
"replay_id": replay_id,
"project_id": project_id,
"retention_days": kwargs.pop("retention_days", 30),
"payload": {
"type": "replay_event",
"replay_id": replay_id,
"replay_type": kwargs.pop("replay_type", "session"),
"segment_id": kwargs.pop("segment_id", 0),
"tags": tags,
"urls": kwargs.pop("urls", []),
"is_archived": kwargs.pop("is_archived", None),
"error_ids": kwargs.pop("error_ids", ["a3a62ef6-ac86-415b-83c2-416fc2f76db1"]),
"trace_ids": kwargs.pop("trace_ids", ["44916572-43ba-4dbe-bd2f-6bd62b733080"]),
"dist": kwargs.pop("dist", "abc123"),
"platform": kwargs.pop("platform", "javascript"),
"timestamp": sec(timestamp),
"replay_start_timestamp": kwargs.pop("replay_start_timestamp", sec(timestamp)),
"environment": kwargs.pop("environment", "production"),
"release": kwargs.pop("release", "version@1.3"),
"user": {
"id": kwargs.pop("user_id", "123"),
"username": kwargs.pop("user_name", "username"),
"email": kwargs.pop("user_email", "username@example.com"),
"ip_address": kwargs.pop("ipv4", "127.0.0.1"),
"geo": {
"city": kwargs.pop("user_geo_city", "San Francisco"),
"country_code": kwargs.pop("user_geo_country_code", "USA"),
"region": kwargs.pop("user_geo_region", "United States"),
"subdivision": kwargs.pop("user_geo_subdivision", "California"),
},
},
"sdk": {
"name": kwargs.pop("sdk_name", "sentry.javascript.react"),
"version": kwargs.pop("sdk_version", "6.18.1"),
},
"contexts": {
"trace": {
"op": "pageload",
"span_id": "affa5649681a1eeb",
"trace_id": kwargs.pop("trace_id", "23eda6cd4b174ef8a51f0096df3bfdd1"),
},
"os": {
"name": kwargs.pop("os_name", "iOS"),
"version": kwargs.pop("os_version", "16.2"),
},
"browser": {
"name": kwargs.pop("browser_name", "Chrome"),
"version": kwargs.pop("browser_version", "103.0.38"),
},
"device": {
"name": kwargs.pop("device_name", "iPhone 13 Pro"),
"brand": kwargs.pop("device_brand", "Apple"),
"family": kwargs.pop("device_family", "iPhone"),
"model": kwargs.pop("device_model", "13 Pro"),
},
"ota_updates": {
"channel": kwargs.pop("ota_updates_channel", "test-channel"),
"runtime_version": kwargs.pop(
"ota_updates_runtime_version", "test-runtime-version"
),
"update_id": kwargs.pop("ota_updates_update_id", "test-update-id"),
},
},
"request": {
"url": "Doesn't matter not ingested.",
"headers": {"User-Agent": kwargs.pop("user_agent", "Firefox")},
},
"extra": {},
},
}
def mock_replay_click(
timestamp: datetime.datetime,
project_id: int,
replay_id: str,
**kwargs: Any,
) -> dict[str, Any]:
return {
"type": "replay_event",
"start_time": sec(timestamp),
"replay_id": replay_id,
"project_id": project_id,
"retention_days": kwargs.pop("retention_days", 30),
"payload": {
"type": "replay_actions",
"replay_id": replay_id,
"environment": kwargs.pop("environment", "production"),
"clicks": [
{
"node_id": kwargs["node_id"],
"tag": kwargs["tag"],
"id": kwargs.pop("id", ""),
"class": kwargs.pop("class_", []),
"text": kwargs.pop("text", ""),
"component_name": kwargs.pop("component_name", ""),
"role": kwargs.pop("role", ""),
"alt": kwargs.pop("alt", ""),
"testid": kwargs.pop("testid", ""),
"aria_label": kwargs.pop("aria_label", ""),
"title": kwargs.pop("title", ""),
"is_dead": int(kwargs.pop("is_dead", 0)),
"is_rage": int(kwargs.pop("is_rage", 0)),
"event_hash": str(uuid.uuid4()),
"timestamp": sec(timestamp),
}
],
},
}
def mock_replay_tap(
timestamp: datetime.datetime,
project_id: int,
replay_id: str,
**kwargs: Any,
) -> dict[str, Any]:
return {
"type": "replay_event",
"start_time": sec(timestamp),
"replay_id": replay_id,
"project_id": project_id,
"retention_days": kwargs.pop("retention_days", 30),
"payload": {
"type": "replay_tap",
"replay_id": replay_id,
"environment": kwargs.pop("environment", "production"),
"taps": [
{
"message": kwargs["message"],
"view_class": kwargs.pop("view_class", ""),
"view_id": kwargs.pop("view_id", ""),
"event_hash": str(uuid.uuid4()),
"timestamp": sec(timestamp),
}
],
},
}
def mock_replay_viewed(
timestamp: float,
project_id: int,
replay_id: str,
viewed_by_id: int,
retention_days: int = 30,
) -> dict[str, Any]:
return {
"type": "replay_event",
"start_time": int(timestamp),
"replay_id": replay_id,
"project_id": project_id,
"retention_days": retention_days,
"payload": {
"type": "replay_viewed",
"timestamp": timestamp,
"viewed_by_id": viewed_by_id,
},
}
def mock_segment_init(
timestamp: datetime.datetime,
href: str = "http://localhost/",
width: int = 800,
height: int = 600,
) -> SegmentList:
return [
{
"type": EventType.DomContentLoaded,
"timestamp": ms(timestamp), # rrweb timestamps are in ms
},
{
"type": EventType.Load,
"timestamp": ms(timestamp), # rrweb timestamps are in ms
},
{
"type": EventType.Meta,
"data": {"href": href, "width": width, "height": height},
"timestamp": ms(timestamp), # rrweb timestamps are in ms
},
]
def mock_segment_fullsnapshot(
timestamp: datetime.datetime, bodyChildNodes: list[dict[str, Any]]
) -> SegmentList:
bodyNode = mock_rrweb_node(
tagName="body",
attributes={
"style": 'margin:0; font-family: -apple-system, system-ui, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu;',
},
childNodes=bodyChildNodes,
)
htmlNode = mock_rrweb_node(
tagName="html",
childNodes=[bodyNode],
)
return [
{
"type": EventType.FullSnapshot,
"data": {
"timestamp": ms(timestamp), # rrweb timestamps are in ms
"node": {
"type": EventType.DomContentLoaded,
"childNodes": [htmlNode],
},
},
}
]
def mock_segment_console(timestamp: datetime.datetime) -> SegmentList:
return [
{
"type": EventType.Custom,
"timestamp": ms(timestamp), # rrweb timestamps are in ms
"data": {
"tag": "breadcrumb",
"payload": {
"timestamp": sec(timestamp), # sentry data inside rrweb is in seconds
"type": "default",
"category": "console",
"data": {
"arguments": [
"./src/pages/template/Header.js\n Line 14: The href attribute requires a valid value to be accessible. Provide a valid, navigable address as the href value."
],
"logger": "console",
},
"level": "warning",
"message": "./src/pages/template/Header.js\n Line 14: The href attribute requires a valid value to be accessible. Provide a valid, navigable address as the href value.",
},
},
}
]
def mock_segment_breadcrumb(timestamp: datetime.datetime, payload: dict[str, Any]) -> SegmentList:
return [
{
"type": 5,
"timestamp": ms(timestamp), # rrweb timestamps are in ms
"data": {
"tag": "breadcrumb",
"payload": payload,
},
}
]
def mock_segment_nagivation(
timestamp: datetime.datetime, hrefFrom: str = "/", hrefTo: str = "/profile/"
) -> SegmentList:
return mock_segment_breadcrumb(
timestamp,
{
"timestamp": sec(timestamp), # sentry data inside rrweb is in seconds
"type": "default",
"category": "navigation",
"data": {"from": hrefFrom, "to": hrefTo},
},
)
def mock_segment_click(
timestamp: datetime.datetime, message: str, id: str, tagName: str
) -> SegmentList:
return mock_segment_breadcrumb(
timestamp,
{
"timestamp": sec(timestamp),
"type": "default",
"category": "ui.click",
"message": message,
"data": {
"node": {
"tagName": tagName,
"attributes": {
"id": id,
},
}
},
},
)
def mock_segment_rageclick(
timestamp: datetime.datetime, message: str, id: str, tagName: str, clickCount: int
) -> SegmentList:
return mock_segment_breadcrumb(
timestamp,
{
"timestamp": sec(timestamp), # sentry data inside rrweb is in seconds
"type": "default",
"category": "ui.slowClickDetected",
"message": message,
"data": {
"endReason": "timeout",
"timeAfterClickMs": 7000,
"node": {
"tagName": tagName,
"attributes": {
"id": id,
},
},
"clickCount": clickCount,
},
},
)
__rrweb_id = 0
def next_rrweb_id() -> int:
global __rrweb_id
__rrweb_id += 1
return __rrweb_id
def mock_rrweb_node(**kwargs: Any) -> RRWebNode:
id = kwargs.pop("id", next_rrweb_id())
tagName = kwargs.pop("tagName", None)
if tagName:
return {
"type": EventType.FullSnapshot,
"id": id,
"tagName": tagName,
"attributes": kwargs.pop("attributes", {}),
"childNodes": kwargs.pop("childNodes", []),
}
else:
return {
"type": EventType.IncrementalSnapshot,
"id": id,
"textContent": kwargs.pop("textContent", ""),
}
def mock_rrweb_div_helloworld() -> RRWebNode:
return mock_rrweb_node(
tagName="div",
childNodes=[
mock_rrweb_node(
tagName="h1",
attributes={"style": "text-align: center;"},
childNodes=[mock_rrweb_node(textContent="Hello World")],
),
],
)
def mock_replay_event(replay_id="b58a67446c914f44a4e329763420047b", **kwargs):
"""
mock a replay event for useage in our recording consumer tests
"""
timestamp = datetime.datetime.now() - datetime.timedelta(minutes=10)
tags = kwargs.pop("tags", {})
tags.update({"transaction": kwargs.pop("title", "Title")})
tags = [[key, value] for key, value in tags.items()]
return {
"type": "replay_event",
"replay_id": replay_id,
"replay_type": kwargs.pop("replay_type", "session"),
"segment_id": kwargs.pop("segment_id", 0),
"tags": tags,
"urls": kwargs.pop("urls", []),
"is_archived": kwargs.pop("is_archived", None),
"error_ids": kwargs.pop("error_ids", ["a3a62ef6-ac86-415b-83c2-416fc2f76db1"]),
"trace_ids": kwargs.pop("trace_ids", ["44916572-43ba-4dbe-bd2f-6bd62b733080"]),
"dist": kwargs.pop("dist", "abc123"),
"platform": kwargs.pop("platform", "javascript"),
"timestamp": sec(timestamp),
"replay_start_timestamp": kwargs.pop("replay_start_timestamp", sec(timestamp)),
"environment": kwargs.pop("environment", "production"),
"release": kwargs.pop("release", "version@1.3"),
"user": {
"id": kwargs.pop("user_id", "1"),
"username": kwargs.pop("user_name", "username"),
"email": kwargs.pop("user_email", "test@test.com"),
"ip_address": kwargs.pop("ipv4", "127.0.0.1"),
},
"sdk": {
"name": kwargs.pop("sdk_name", "sentry.javascript.react"),
"version": kwargs.pop("sdk_version", "6.18.1"),
},
"contexts": {
"os": {
"name": kwargs.pop("os_name", "iOS"),
"version": kwargs.pop("os_version", "16.2"),
},
"browser": {
"name": kwargs.pop("browser_name", "Chrome"),
"version": kwargs.pop("browser_version", "103.0.38"),
},
"device": {
"name": kwargs.pop("device_name", "iPhone 13 Pro"),
"brand": kwargs.pop("device_brand", "Apple"),
"family": kwargs.pop("device_family", "iPhone"),
"model": kwargs.pop("device_model", "13 Pro"),
},
},
"request": {
"url": "Doesn't matter not ingested.",
"headers": {"User-Agent": kwargs.pop("user_agent", "Firefox")},
},
"extra": {},
}
|
EventType
|
python
|
doocs__leetcode
|
solution/1400-1499/1491.Average Salary Excluding the Minimum and Maximum Salary/Solution.py
|
{
"start": 0,
"end": 156
}
|
class ____:
def average(self, salary: List[int]) -> float:
s = sum(salary) - min(salary) - max(salary)
return s / (len(salary) - 2)
|
Solution
|
python
|
PyCQA__pylint
|
tests/functional/u/useless/useless_object_inheritance.py
|
{
"start": 366,
"end": 427
}
|
class ____(B, object): # [useless-object-inheritance]
pass
|
C
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_20/tasks.py
|
{
"start": 383088,
"end": 384982
}
|
class ____(Response):
"""
Response of tasks.set_requirements endpoint.
:param updated: Number of tasks updated (0 or 1)
:type updated: int
:param fields: Updated fields names and values
:type fields: dict
"""
_service = "tasks"
_action = "set_requirements"
_version = "2.20"
_schema = {
"definitions": {},
"properties": {
"fields": {
"additionalProperties": True,
"description": "Updated fields names and values",
"type": ["object", "null"],
},
"updated": {
"description": "Number of tasks updated (0 or 1)",
"enum": [0, 1],
"type": ["integer", "null"],
},
},
"type": "object",
}
def __init__(self, updated: Optional[int] = None, fields: Optional[dict] = None, **kwargs: Any) -> None:
super(SetRequirementsResponse, self).__init__(**kwargs)
self.updated = updated
self.fields = fields
@schema_property("updated")
def updated(self) -> Optional[int]:
return self._property_updated
@updated.setter
def updated(self, value: Optional[int]) -> None:
if value is None:
self._property_updated = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated", six.integer_types)
self._property_updated = value
@schema_property("fields")
def fields(self) -> Optional[dict]:
return self._property_fields
@fields.setter
def fields(self, value: Optional[dict]) -> None:
if value is None:
self._property_fields = None
return
self.assert_isinstance(value, "fields", (dict,))
self._property_fields = value
|
SetRequirementsResponse
|
python
|
PrefectHQ__prefect
|
src/prefect/client/schemas/sorting.py
|
{
"start": 828,
"end": 1042
}
|
class ____(AutoEnum):
"""Defines automation sorting options."""
CREATED_DESC = AutoEnum.auto()
UPDATED_DESC = AutoEnum.auto()
NAME_ASC = AutoEnum.auto()
NAME_DESC = AutoEnum.auto()
|
AutomationSort
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_20/projects.py
|
{
"start": 133448,
"end": 135985
}
|
class ____(Response):
"""
Response of projects.get_unique_metric_variants endpoint.
:param metrics: A list of metric variants reported for tasks in this project
:type metrics: Sequence[MetricVariantResult]
"""
_service = "projects"
_action = "get_unique_metric_variants"
_version = "2.20"
_schema = {
"definitions": {
"metric_variant_result": {
"properties": {
"metric": {
"description": "Metric name",
"type": ["string", "null"],
},
"metric_hash": {
"description": "Metric name hash. Used instead of the metric name when categorizing last metrics events in task objects.",
"type": ["string", "null"],
},
"variant": {
"description": "Variant name",
"type": ["string", "null"],
},
"variant_hash": {
"description": "Variant name hash. Used instead of the variant name when categorizing last metrics events in task objects.",
"type": ["string", "null"],
},
},
"type": "object",
}
},
"properties": {
"metrics": {
"description": "A list of metric variants reported for tasks in this project",
"items": {"$ref": "#/definitions/metric_variant_result"},
"type": ["array", "null"],
}
},
"type": "object",
}
def __init__(self, metrics: Optional[List[Any]] = None, **kwargs: Any) -> None:
super(GetUniqueMetricVariantsResponse, self).__init__(**kwargs)
self.metrics = metrics
@schema_property("metrics")
def metrics(self) -> Optional[List[Any]]:
return self._property_metrics
@metrics.setter
def metrics(self, value: Optional[List[Any]]) -> None:
if value is None:
self._property_metrics = None
return
self.assert_isinstance(value, "metrics", (list, tuple))
if any((isinstance(v, dict) for v in value)):
value = [MetricVariantResult.from_dict(v) if isinstance(v, dict) else v for v in value]
else:
self.assert_isinstance(value, "metrics", MetricVariantResult, is_array=True)
self._property_metrics = value
|
GetUniqueMetricVariantsResponse
|
python
|
modin-project__modin
|
modin/tests/pandas/extensions/conftest.py
|
{
"start": 1352,
"end": 1423
}
|
class ____(NativeIO):
query_compiler_cls = Test1QueryCompiler
|
Test1IO
|
python
|
eriklindernoren__ML-From-Scratch
|
mlfromscratch/supervised_learning/decision_tree.py
|
{
"start": 1492,
"end": 7944
}
|
class ____(object):
"""Super class of RegressionTree and ClassificationTree.
Parameters:
-----------
min_samples_split: int
The minimum number of samples needed to make a split when building a tree.
min_impurity: float
The minimum impurity required to split the tree further.
max_depth: int
The maximum depth of a tree.
loss: function
Loss function that is used for Gradient Boosting models to calculate impurity.
"""
def __init__(self, min_samples_split=2, min_impurity=1e-7,
max_depth=float("inf"), loss=None):
self.root = None # Root node in dec. tree
# Minimum n of samples to justify split
self.min_samples_split = min_samples_split
# The minimum impurity to justify split
self.min_impurity = min_impurity
# The maximum depth to grow the tree to
self.max_depth = max_depth
# Function to calculate impurity (classif.=>info gain, regr=>variance reduct.)
self._impurity_calculation = None
# Function to determine prediction of y at leaf
self._leaf_value_calculation = None
# If y is one-hot encoded (multi-dim) or not (one-dim)
self.one_dim = None
# If Gradient Boost
self.loss = loss
def fit(self, X, y, loss=None):
""" Build decision tree """
self.one_dim = len(np.shape(y)) == 1
self.root = self._build_tree(X, y)
self.loss=None
def _build_tree(self, X, y, current_depth=0):
""" Recursive method which builds out the decision tree and splits X and respective y
on the feature of X which (based on impurity) best separates the data"""
largest_impurity = 0
best_criteria = None # Feature index and threshold
best_sets = None # Subsets of the data
# Check if expansion of y is needed
if len(np.shape(y)) == 1:
y = np.expand_dims(y, axis=1)
# Add y as last column of X
Xy = np.concatenate((X, y), axis=1)
n_samples, n_features = np.shape(X)
if n_samples >= self.min_samples_split and current_depth <= self.max_depth:
# Calculate the impurity for each feature
for feature_i in range(n_features):
# All values of feature_i
feature_values = np.expand_dims(X[:, feature_i], axis=1)
unique_values = np.unique(feature_values)
# Iterate through all unique values of feature column i and
# calculate the impurity
for threshold in unique_values:
# Divide X and y depending on if the feature value of X at index feature_i
# meets the threshold
Xy1, Xy2 = divide_on_feature(Xy, feature_i, threshold)
if len(Xy1) > 0 and len(Xy2) > 0:
# Select the y-values of the two sets
y1 = Xy1[:, n_features:]
y2 = Xy2[:, n_features:]
# Calculate impurity
impurity = self._impurity_calculation(y, y1, y2)
# If this threshold resulted in a higher information gain than previously
# recorded save the threshold value and the feature
# index
if impurity > largest_impurity:
largest_impurity = impurity
best_criteria = {"feature_i": feature_i, "threshold": threshold}
best_sets = {
"leftX": Xy1[:, :n_features], # X of left subtree
"lefty": Xy1[:, n_features:], # y of left subtree
"rightX": Xy2[:, :n_features], # X of right subtree
"righty": Xy2[:, n_features:] # y of right subtree
}
if largest_impurity > self.min_impurity:
# Build subtrees for the right and left branches
true_branch = self._build_tree(best_sets["leftX"], best_sets["lefty"], current_depth + 1)
false_branch = self._build_tree(best_sets["rightX"], best_sets["righty"], current_depth + 1)
return DecisionNode(feature_i=best_criteria["feature_i"], threshold=best_criteria[
"threshold"], true_branch=true_branch, false_branch=false_branch)
# We're at leaf => determine value
leaf_value = self._leaf_value_calculation(y)
return DecisionNode(value=leaf_value)
def predict_value(self, x, tree=None):
""" Do a recursive search down the tree and make a prediction of the data sample by the
value of the leaf that we end up at """
if tree is None:
tree = self.root
# If we have a value (i.e we're at a leaf) => return value as the prediction
if tree.value is not None:
return tree.value
# Choose the feature that we will test
feature_value = x[tree.feature_i]
# Determine if we will follow left or right branch
branch = tree.false_branch
if isinstance(feature_value, int) or isinstance(feature_value, float):
if feature_value >= tree.threshold:
branch = tree.true_branch
elif feature_value == tree.threshold:
branch = tree.true_branch
# Test subtree
return self.predict_value(x, branch)
def predict(self, X):
""" Classify samples one by one and return the set of labels """
y_pred = [self.predict_value(sample) for sample in X]
return y_pred
def print_tree(self, tree=None, indent=" "):
""" Recursively print the decision tree """
if not tree:
tree = self.root
# If we're at leaf => print the label
if tree.value is not None:
print (tree.value)
# Go deeper down the tree
else:
# Print test
print ("%s:%s? " % (tree.feature_i, tree.threshold))
# Print the true scenario
print ("%sT->" % (indent), end="")
self.print_tree(tree.true_branch, indent + indent)
# Print the false scenario
print ("%sF->" % (indent), end="")
self.print_tree(tree.false_branch, indent + indent)
|
DecisionTree
|
python
|
google__pytype
|
pytype/abstract/_classes.py
|
{
"start": 26809,
"end": 36180
}
|
class ____( # pytype: disable=signature-mismatch
_base.BaseValue, class_mixin.Class, mixin.NestedAnnotation
):
"""A class that contains additional parameters.
E.g. a container.
Attributes:
base_cls: The base type.
formal_type_parameters: An iterable of BaseValue, one for each type
parameter.
"""
def __init__(
self,
base_cls: PyTDClass | InterpreterClass,
formal_type_parameters: (
abstract_utils.LazyFormalTypeParameters
| dict[str | int, _base.BaseValue]
),
ctx: "context.Context",
template: tuple["_typing.TypeParameter", ...] | None = None,
):
# A ParameterizedClass is created by converting a pytd.GenericType, whose
# base type is restricted to NamedType and ClassType.
self.base_cls = base_cls
super().__init__(base_cls.name, ctx)
self._cls = None # lazily loaded 'cls' attribute
self.module = base_cls.module
# Lazily loaded to handle recursive types.
# See the formal_type_parameters() property.
self._formal_type_parameters = formal_type_parameters
self._formal_type_parameters_loaded = False
self._hash = None # memoized due to expensive computation
if template is None:
self._template = self.base_cls.template
else:
# The ability to create a new template different from the base class's is
# needed for typing.Generic.
self._template = template
self.slots = self.base_cls.slots
self.is_dynamic = self.base_cls.is_dynamic
class_mixin.Class.init_mixin(self, base_cls.cls)
mixin.NestedAnnotation.init_mixin(self)
self._type_param_check()
def __repr__(self) -> str:
return "ParameterizedClass(cls={!r} params={})".format(
self.base_cls, self._formal_type_parameters
)
def _type_param_check(self) -> None:
"""Throw exception for invalid type parameters."""
# It will cause infinite recursion if `formal_type_parameters` is
# `LazyFormalTypeParameters`
if not isinstance(
self._formal_type_parameters, abstract_utils.LazyFormalTypeParameters
):
tparams = datatypes.AliasingDict()
abstract_utils.parse_formal_type_parameters(self, None, tparams)
def get_formal_type_parameters(self):
return {
abstract_utils.full_type_name(self, k): v
for k, v in self.formal_type_parameters.items()
}
def __eq__(self, other: "ParameterizedClass") -> bool:
if isinstance(other, type(self)):
return self.base_cls == other.base_cls and (
self.formal_type_parameters == other.formal_type_parameters
)
return NotImplemented
def __hash__(self):
if self._hash is None:
if isinstance(
self._formal_type_parameters, abstract_utils.LazyFormalTypeParameters
):
items = self._raw_formal_type_parameters()
cache = False
else:
# Use the names of the parameter values to approximate a hash, to avoid
# infinite recursion on recursive type annotations.
items = []
cache = True
for name, val in self.formal_type_parameters.items():
# The 'is not True' check is to prevent us from incorrectly caching
# the hash when val.resolved == LateAnnotation._RESOLVING.
if val.is_late_annotation() and val.resolved is not True: # pylint: disable=g-bool-id-comparison # pytype: disable=attribute-error
cache = False
items.append((name, val.full_name))
hashval = hash((self.base_cls, tuple(items)))
if cache:
self._hash = hashval
else:
hashval = self._hash
return hashval
def __contains__(self, name: _instance_base.SimpleValue) -> bool:
return name in self.base_cls
def _raw_formal_type_parameters(
self,
) -> Generator[tuple[Any, Any] | None, None, None]:
assert isinstance(
self._formal_type_parameters, abstract_utils.LazyFormalTypeParameters
)
parameters = self._formal_type_parameters.parameters
for i, name in enumerate(self._formal_type_parameters.template):
# TODO(rechen): A missing parameter should be an error.
yield name, parameters[i] if i < len(parameters) else None
def get_own_attributes(self):
return self.base_cls.get_own_attributes()
def get_own_abstract_methods(self):
return self.base_cls.get_own_abstract_methods()
@property
def members(self):
return self.base_cls.members
@property
def formal_type_parameters(self) -> dict[str | int, _base.BaseValue]:
self._load_formal_type_parameters()
return self._formal_type_parameters # pytype: disable=bad-return-type
def _load_formal_type_parameters(self) -> None:
if self._formal_type_parameters_loaded:
return
if isinstance(
self._formal_type_parameters, abstract_utils.LazyFormalTypeParameters
):
formal_type_parameters = {}
for name, param in self._raw_formal_type_parameters(): # pytype: disable=attribute-error
if param is None:
formal_type_parameters[name] = self.ctx.convert.unsolvable
else:
formal_type_parameters[name] = self.ctx.convert.constant_to_value(
param, self._formal_type_parameters.subst, self.ctx.root_node
)
self._formal_type_parameters = formal_type_parameters
# Hack: we'd like to evaluate annotations at the currently active node so
# that imports, etc., are visible. The last created node is usually the
# active one.
self._formal_type_parameters = (
self.ctx.annotation_utils.convert_class_annotations(
self.ctx.program.cfg_nodes[-1], self._formal_type_parameters
)
)
self._formal_type_parameters_loaded = True
def compute_mro(self) -> tuple[_base.BaseValue, ...]:
return (self,) + self.base_cls.mro[1:]
def instantiate(
self,
node: cfg.CFGNode,
container: (
_instance_base.SimpleValue | abstract_utils.DummyContainer | None
) = None,
):
if self.full_name == "builtins.type":
# deformalize removes TypeVars.
instance = self.ctx.annotation_utils.deformalize(
self.formal_type_parameters[abstract_utils.T]
)
return instance.to_variable(node)
elif self.full_name == "typing.ClassVar":
return self.formal_type_parameters[abstract_utils.T].instantiate(
node, container
)
else:
return self._new_instance(container, node, None).to_variable(node)
@property
def cls(self):
if not self.ctx.converter_minimally_initialized:
return self.ctx.convert.unsolvable
if not self._cls:
self._cls = self.base_cls.cls
return self._cls
@cls.setter
def cls(self, cls) -> None:
self._cls = cls
def set_class(self, node: cfg.CFGNode, var: cfg.Variable) -> None:
self.base_cls.set_class(node, var)
@property
def official_name(self) -> str:
return self.base_cls.official_name
@official_name.setter
def official_name(self, official_name: str):
self.base_cls.official_name = official_name
def _is_callable(self) -> bool:
if not isinstance(self.base_cls, (InterpreterClass, PyTDClass)):
# We don't know how to instantiate this base_cls.
return False
if self.from_annotation:
# A user-provided annotation is always instantiable.
return True
# Otherwise, non-abstract classes are instantiable. The exception is
# typing classes; for example,
# from typing import List
# print(List[str]())
# produces 'TypeError: Type List cannot be instantiated; use list() instead'
# at runtime. However, pytype represents concrete typing classes like List
# with their builtins equivalents, so we can't distinguish between
# List[str]() (illegal) and list[str]() (legal in Python 3.9+); we err on
# the side of allowing such calls.
return not self.is_abstract
def call(
self,
node: cfg.CFGNode,
func: cfg.Binding,
args: function.Args,
alias_map: datatypes.UnionFind | None = None,
) -> tuple[cfg.CFGNode, cfg.Variable]:
if not self._is_callable():
raise error_types.NotCallable(self)
else:
return class_mixin.Class.call(self, node, func, args)
def get_formal_type_parameter(self, t):
return self.formal_type_parameters.get(t, self.ctx.convert.unsolvable)
def get_inner_types(self) -> ItemsView[int | str, _base.BaseValue]:
return self.formal_type_parameters.items()
def update_inner_type(self, key: str | int, typ: _base.BaseValue) -> None:
self.formal_type_parameters[key] = typ
def replace(
self,
inner_types: (
abstract_utils.LazyFormalTypeParameters
| Sequence[tuple[int, _base.BaseValue]]
),
) -> "ParameterizedClass | LiteralClass":
inner_types = dict(inner_types)
if isinstance(self, LiteralClass):
if inner_types == self.formal_type_parameters:
# If the type hasn't changed, we can return a copy of this class.
return LiteralClass(self._instance, self.ctx, self.template)
# Otherwise, we can't create a LiteralClass because we don't have a
# concrete value.
typ = ParameterizedClass
else:
typ = self.__class__
return typ(self.base_cls, inner_types, self.ctx, self.template)
def has_protocol_base(self) -> bool:
return self.base_cls.has_protocol_base()
|
ParameterizedClass
|
python
|
pydata__xarray
|
xarray/groupers.py
|
{
"start": 26178,
"end": 28357
}
|
class ____:
seasons: tuple[str, ...]
# tuple[integer months] corresponding to each season
inds: tuple[tuple[int, ...], ...]
# integer code for each season, this is not simply range(len(seasons))
# when the seasons have overlaps
codes: Sequence[int]
def find_independent_seasons(seasons: Sequence[str]) -> Sequence[SeasonsGroup]:
"""
Iterates though a list of seasons e.g. ["DJF", "FMA", ...],
and splits that into multiple sequences of non-overlapping seasons.
>>> find_independent_seasons(
... ["DJF", "FMA", "AMJ", "JJA", "ASO", "OND"]
... ) # doctest: +NORMALIZE_WHITESPACE
[SeasonsGroup(seasons=('DJF', 'AMJ', 'ASO'), inds=((12, 1, 2), (4, 5, 6), (8, 9, 10)), codes=[0, 2, 4]), SeasonsGroup(seasons=('FMA', 'JJA', 'OND'), inds=((2, 3, 4), (6, 7, 8), (10, 11, 12)), codes=[1, 3, 5])]
>>> find_independent_seasons(["DJF", "MAM", "JJA", "SON"])
[SeasonsGroup(seasons=('DJF', 'MAM', 'JJA', 'SON'), inds=((12, 1, 2), (3, 4, 5), (6, 7, 8), (9, 10, 11)), codes=[0, 1, 2, 3])]
"""
season_inds = season_to_month_tuple(seasons)
grouped = defaultdict(list)
codes = defaultdict(list)
seen: set[tuple[int, ...]] = set()
# This is quadratic, but the number of seasons is at most 12
for i, current in enumerate(season_inds):
# Start with a group
if current not in seen:
grouped[i].append(current)
codes[i].append(i)
seen.add(current)
# Loop through remaining groups, and look for overlaps
for j, second in enumerate(season_inds[i:]):
if not (set(chain(*grouped[i])) & set(second)) and second not in seen:
grouped[i].append(second)
codes[i].append(j + i)
seen.add(second)
if len(seen) == len(seasons):
break
# found all non-overlapping groups for this row start over
grouped_ints = tuple(tuple(idx) for idx in grouped.values() if idx)
return [
SeasonsGroup(seasons=inds_to_season_string(inds), inds=inds, codes=codes)
for inds, codes in zip(grouped_ints, codes.values(), strict=False)
]
@dataclass
|
SeasonsGroup
|
python
|
networkx__networkx
|
networkx/algorithms/bipartite/tests/test_covering.py
|
{
"start": 66,
"end": 1221
}
|
class ____:
"""Tests for :func:`networkx.algorithms.bipartite.min_edge_cover`"""
def test_empty_graph(self):
G = nx.Graph()
assert bipartite.min_edge_cover(G) == set()
def test_graph_single_edge(self):
G = nx.Graph()
G.add_edge(0, 1)
assert bipartite.min_edge_cover(G) == {(0, 1), (1, 0)}
def test_bipartite_default(self):
G = nx.Graph()
G.add_nodes_from([1, 2, 3, 4], bipartite=0)
G.add_nodes_from(["a", "b", "c"], bipartite=1)
G.add_edges_from([(1, "a"), (1, "b"), (2, "b"), (2, "c"), (3, "c"), (4, "a")])
min_cover = bipartite.min_edge_cover(G)
assert nx.is_edge_cover(G, min_cover)
assert len(min_cover) == 8
def test_bipartite_explicit(self):
G = nx.Graph()
G.add_nodes_from([1, 2, 3, 4], bipartite=0)
G.add_nodes_from(["a", "b", "c"], bipartite=1)
G.add_edges_from([(1, "a"), (1, "b"), (2, "b"), (2, "c"), (3, "c"), (4, "a")])
min_cover = bipartite.min_edge_cover(G, bipartite.eppstein_matching)
assert nx.is_edge_cover(G, min_cover)
assert len(min_cover) == 8
|
TestMinEdgeCover
|
python
|
Netflix__metaflow
|
metaflow/exception.py
|
{
"start": 2693,
"end": 2954
}
|
class ____(MetaflowException):
headline = "Object not in the current namespace"
def __init__(self, namespace):
msg = "Object not in namespace '%s'" % namespace
super(MetaflowNamespaceMismatch, self).__init__(msg)
|
MetaflowNamespaceMismatch
|
python
|
getsentry__sentry
|
tests/sentry/rules/conditions/test_first_seen_event.py
|
{
"start": 244,
"end": 1052
}
|
class ____(RuleTestCase):
rule_cls = FirstSeenEventCondition
def test_applies_correctly(self) -> None:
rule = self.get_rule(rule=Rule(environment_id=None))
self.assertPasses(rule, self.event, is_new=True)
self.assertDoesNotPass(rule, self.event, is_new=False)
def test_applies_correctly_with_environment(self) -> None:
rule = self.get_rule(rule=Rule(environment_id=1))
self.assertPasses(rule, self.event, is_new=True, is_new_group_environment=True)
self.assertPasses(rule, self.event, is_new=False, is_new_group_environment=True)
self.assertDoesNotPass(rule, self.event, is_new=True, is_new_group_environment=False)
self.assertDoesNotPass(rule, self.event, is_new=False, is_new_group_environment=False)
|
FirstSeenEventConditionTest
|
python
|
huggingface__transformers
|
src/transformers/models/grounding_dino/image_processing_grounding_dino_fast.py
|
{
"start": 11412,
"end": 30193
}
|
class ____(BaseImageProcessorFast):
resample = PILImageResampling.BILINEAR
image_mean = IMAGENET_DEFAULT_MEAN
image_std = IMAGENET_DEFAULT_STD
format = AnnotationFormat.COCO_DETECTION
do_resize = True
do_rescale = True
do_normalize = True
do_pad = True
size = {"shortest_edge": 800, "longest_edge": 1333}
default_to_square = False
model_input_names = ["pixel_values", "pixel_mask"]
valid_kwargs = GroundingDinoImageProcessorKwargs
def __init__(self, **kwargs: Unpack[GroundingDinoImageProcessorKwargs]) -> None:
kwargs.setdefault("do_pad", kwargs.pop("pad_and_return_pixel_mask", self.do_pad))
size = kwargs.pop("size", None)
max_size = None if size is None else kwargs.pop("max_size", 1333)
size = size if size is not None else {"shortest_edge": 800, "longest_edge": 1333}
self.size = get_size_dict(size, max_size=max_size, default_to_square=False)
# Backwards compatibility
do_convert_annotations = kwargs.get("do_convert_annotations")
do_normalize = kwargs.get("do_normalize")
if do_convert_annotations is None and getattr(self, "do_convert_annotations", None) is None:
self.do_convert_annotations = do_normalize if do_normalize is not None else self.do_normalize
super().__init__(**kwargs)
def prepare_annotation(
self,
image: torch.Tensor,
target: dict,
format: Optional[AnnotationFormat] = None,
return_segmentation_masks: Optional[bool] = None,
masks_path: Optional[Union[str, pathlib.Path]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> dict:
"""
Prepare an annotation for feeding into GROUNDING_DINO model.
"""
format = format if format is not None else self.format
if format == AnnotationFormat.COCO_DETECTION:
return_segmentation_masks = False if return_segmentation_masks is None else return_segmentation_masks
target = prepare_coco_detection_annotation(
image, target, return_segmentation_masks, input_data_format=input_data_format
)
elif format == AnnotationFormat.COCO_PANOPTIC:
return_segmentation_masks = True if return_segmentation_masks is None else return_segmentation_masks
target = prepare_coco_panoptic_annotation(
image,
target,
masks_path=masks_path,
return_masks=return_segmentation_masks,
input_data_format=input_data_format,
)
else:
raise ValueError(f"Format {format} is not supported.")
return target
def resize(
self,
image: torch.Tensor,
size: SizeDict,
interpolation: Optional["F.InterpolationMode"] = None,
**kwargs,
) -> torch.Tensor:
"""
Resize the image to the given size. Size can be `min_size` (scalar) or `(height, width)` tuple. If size is an
int, smaller edge of the image will be matched to this number.
Args:
image (`torch.Tensor`):
Image to resize.
size (`SizeDict`):
Size of the image's `(height, width)` dimensions after resizing. Available options are:
- `{"height": int, "width": int}`: The image will be resized to the exact size `(height, width)`.
Do NOT keep the aspect ratio.
- `{"shortest_edge": int, "longest_edge": int}`: The image will be resized to a maximum size respecting
the aspect ratio and keeping the shortest edge less or equal to `shortest_edge` and the longest edge
less or equal to `longest_edge`.
- `{"max_height": int, "max_width": int}`: The image will be resized to the maximum size respecting the
aspect ratio and keeping the height less or equal to `max_height` and the width less or equal to
`max_width`.
interpolation (`InterpolationMode`, *optional*, defaults to `InterpolationMode.BILINEAR`):
Resampling filter to use if resizing the image.
"""
interpolation = interpolation if interpolation is not None else F.InterpolationMode.BILINEAR
if size.shortest_edge and size.longest_edge:
# Resize the image so that the shortest edge or the longest edge is of the given size
# while maintaining the aspect ratio of the original image.
new_size = get_size_with_aspect_ratio(
image.size()[-2:],
size["shortest_edge"],
size["longest_edge"],
)
elif size.max_height and size.max_width:
new_size = get_image_size_for_max_height_width(image.size()[-2:], size["max_height"], size["max_width"])
elif size.height and size.width:
new_size = (size["height"], size["width"])
else:
raise ValueError(
"Size must contain 'height' and 'width' keys or 'shortest_edge' and 'longest_edge' keys. Got"
f" {size.keys()}."
)
image = F.resize(
image,
size=new_size,
interpolation=interpolation,
**kwargs,
)
return image
def resize_annotation(
self,
annotation: dict[str, Any],
orig_size: tuple[int, int],
target_size: tuple[int, int],
threshold: float = 0.5,
interpolation: Optional["F.InterpolationMode"] = None,
):
"""
Resizes an annotation to a target size.
Args:
annotation (`dict[str, Any]`):
The annotation dictionary.
orig_size (`tuple[int, int]`):
The original size of the input image.
target_size (`tuple[int, int]`):
The target size of the image, as returned by the preprocessing `resize` step.
threshold (`float`, *optional*, defaults to 0.5):
The threshold used to binarize the segmentation masks.
resample (`InterpolationMode`, defaults to `F.InterpolationMode.NEAREST_EXACT`):
The resampling filter to use when resizing the masks.
"""
interpolation = interpolation if interpolation is not None else F.InterpolationMode.NEAREST_EXACT
ratio_height, ratio_width = [target / orig for target, orig in zip(target_size, orig_size)]
new_annotation = {}
new_annotation["size"] = target_size
for key, value in annotation.items():
if key == "boxes":
boxes = value
scaled_boxes = boxes * torch.as_tensor(
[ratio_width, ratio_height, ratio_width, ratio_height], dtype=torch.float32, device=boxes.device
)
new_annotation["boxes"] = scaled_boxes
elif key == "area":
area = value
scaled_area = area * (ratio_width * ratio_height)
new_annotation["area"] = scaled_area
elif key == "masks":
masks = value[:, None]
masks = [F.resize(mask, target_size, interpolation=interpolation) for mask in masks]
masks = torch.stack(masks).to(torch.float32)
masks = masks[:, 0] > threshold
new_annotation["masks"] = masks
elif key == "size":
new_annotation["size"] = target_size
else:
new_annotation[key] = value
return new_annotation
def normalize_annotation(self, annotation: dict, image_size: tuple[int, int]) -> dict:
image_height, image_width = image_size
norm_annotation = {}
for key, value in annotation.items():
if key == "boxes":
boxes = value
boxes = corners_to_center_format(boxes)
boxes /= torch.as_tensor(
[image_width, image_height, image_width, image_height], dtype=torch.float32, device=boxes.device
)
norm_annotation[key] = boxes
else:
norm_annotation[key] = value
return norm_annotation
def _update_annotation_for_padded_image(
self,
annotation: dict,
input_image_size: tuple[int, int],
output_image_size: tuple[int, int],
padding,
update_bboxes,
) -> dict:
"""
Update the annotation for a padded image.
"""
new_annotation = {}
new_annotation["size"] = output_image_size
ratio_height, ratio_width = (input / output for output, input in zip(output_image_size, input_image_size))
for key, value in annotation.items():
if key == "masks":
masks = value
masks = F.pad(
masks,
padding,
fill=0,
)
masks = safe_squeeze(masks, 1)
new_annotation["masks"] = masks
elif key == "boxes" and update_bboxes:
boxes = value
boxes *= torch.as_tensor([ratio_width, ratio_height, ratio_width, ratio_height], device=boxes.device)
new_annotation["boxes"] = boxes
elif key == "size":
new_annotation["size"] = output_image_size
else:
new_annotation[key] = value
return new_annotation
def pad(
self,
image: torch.Tensor,
padded_size: tuple[int, int],
annotation: Optional[dict[str, Any]] = None,
update_bboxes: bool = True,
fill: int = 0,
):
original_size = image.size()[-2:]
padding_bottom = padded_size[0] - original_size[0]
padding_right = padded_size[1] - original_size[1]
if padding_bottom < 0 or padding_right < 0:
raise ValueError(
f"Padding dimensions are negative. Please make sure that the padded size is larger than the "
f"original size. Got padded size: {padded_size}, original size: {original_size}."
)
if original_size != padded_size:
padding = [0, 0, padding_right, padding_bottom]
image = F.pad(image, padding, fill=fill)
if annotation is not None:
annotation = self._update_annotation_for_padded_image(
annotation, original_size, padded_size, padding, update_bboxes
)
# Make a pixel mask for the image, where 1 indicates a valid pixel and 0 indicates padding.
pixel_mask = torch.zeros(padded_size, dtype=torch.int64, device=image.device)
pixel_mask[: original_size[0], : original_size[1]] = 1
return image, pixel_mask, annotation
def _preprocess(
self,
images: list["torch.Tensor"],
annotations: Optional[Union[AnnotationType, list[AnnotationType]]],
masks_path: Optional[Union[str, pathlib.Path]],
return_segmentation_masks: bool,
do_resize: bool,
size: SizeDict,
interpolation: Optional["F.InterpolationMode"],
do_rescale: bool,
rescale_factor: float,
do_normalize: bool,
do_convert_annotations: bool,
image_mean: Optional[Union[float, list[float]]],
image_std: Optional[Union[float, list[float]]],
do_pad: bool,
pad_size: Optional[SizeDict],
format: Optional[Union[str, AnnotationFormat]],
return_tensors: Optional[Union[str, TensorType]],
**kwargs,
) -> BatchFeature:
"""
Preprocess an image or a batch of images so that it can be used by the model.
"""
if annotations is not None and isinstance(annotations, dict):
annotations = [annotations]
if annotations is not None and len(images) != len(annotations):
raise ValueError(
f"The number of images ({len(images)}) and annotations ({len(annotations)}) do not match."
)
format = AnnotationFormat(format)
if annotations is not None:
validate_annotations(format, SUPPORTED_ANNOTATION_FORMATS, annotations)
if (
masks_path is not None
and format == AnnotationFormat.COCO_PANOPTIC
and not isinstance(masks_path, (pathlib.Path, str))
):
raise ValueError(
"The path to the directory containing the mask PNG files should be provided as a"
f" `pathlib.Path` or string object, but is {type(masks_path)} instead."
)
data = {}
processed_images = []
processed_annotations = []
pixel_masks = [] # Initialize pixel_masks here
for image, annotation in zip(images, annotations if annotations is not None else [None] * len(images)):
# prepare (COCO annotations as a list of Dict -> GROUNDING_DINO target as a single Dict per image)
if annotations is not None:
annotation = self.prepare_annotation(
image,
annotation,
format,
return_segmentation_masks=return_segmentation_masks,
masks_path=masks_path,
input_data_format=ChannelDimension.FIRST,
)
if do_resize:
resized_image = self.resize(image, size=size, interpolation=interpolation)
if annotations is not None:
annotation = self.resize_annotation(
annotation,
orig_size=image.size()[-2:],
target_size=resized_image.size()[-2:],
)
image = resized_image
# Fused rescale and normalize
image = self.rescale_and_normalize(image, do_rescale, rescale_factor, do_normalize, image_mean, image_std)
if do_convert_annotations and annotations is not None:
annotation = self.normalize_annotation(annotation, get_image_size(image, ChannelDimension.FIRST))
processed_images.append(image)
processed_annotations.append(annotation)
images = processed_images
annotations = processed_annotations if annotations is not None else None
if do_pad:
# depends on all resized image shapes so we need another loop
if pad_size is not None:
padded_size = (pad_size.height, pad_size.width)
else:
padded_size = get_max_height_width(images)
padded_images = []
padded_annotations = []
for image, annotation in zip(images, annotations if annotations is not None else [None] * len(images)):
# Pads images and returns their mask: {'pixel_values': ..., 'pixel_mask': ...}
if padded_size == image.size()[-2:]:
padded_images.append(image)
pixel_masks.append(torch.ones(padded_size, dtype=torch.int64, device=image.device))
padded_annotations.append(annotation)
continue
image, pixel_mask, annotation = self.pad(
image, padded_size, annotation=annotation, update_bboxes=do_convert_annotations
)
padded_images.append(image)
padded_annotations.append(annotation)
pixel_masks.append(pixel_mask)
images = padded_images
annotations = padded_annotations if annotations is not None else None
data.update({"pixel_mask": torch.stack(pixel_masks, dim=0)})
data.update({"pixel_values": torch.stack(images, dim=0)})
encoded_inputs = BatchFeature(data, tensor_type=return_tensors)
if annotations is not None:
encoded_inputs["labels"] = [
BatchFeature(annotation, tensor_type=return_tensors) for annotation in annotations
]
return encoded_inputs
def post_process_object_detection(
self,
outputs: "GroundingDinoObjectDetectionOutput",
threshold: float = 0.1,
target_sizes: Optional[Union[TensorType, list[tuple]]] = None,
):
"""
Converts the raw output of [`GroundingDinoForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y,
bottom_right_x, bottom_right_y) format.
Args:
outputs ([`GroundingDinoObjectDetectionOutput`]):
Raw outputs of the model.
threshold (`float`, *optional*, defaults to 0.1):
Score threshold to keep object detection predictions.
target_sizes (`torch.Tensor` or `list[tuple[int, int]]`, *optional*):
Tensor of shape `(batch_size, 2)` or list of tuples (`tuple[int, int]`) containing the target size
`(height, width)` of each image in the batch. If unset, predictions will not be resized.
Returns:
`list[Dict]`: A list of dictionaries, each dictionary containing the following keys:
- "scores": The confidence scores for each predicted box on the image.
- "labels": Indexes of the classes predicted by the model on the image.
- "boxes": Image bounding boxes in (top_left_x, top_left_y, bottom_right_x, bottom_right_y) format.
"""
batch_logits, batch_boxes = outputs.logits, outputs.pred_boxes
batch_size = len(batch_logits)
if target_sizes is not None and len(target_sizes) != batch_size:
raise ValueError("Make sure that you pass in as many target sizes as images")
# batch_logits of shape (batch_size, num_queries, num_classes)
batch_class_logits = torch.max(batch_logits, dim=-1)
batch_scores = torch.sigmoid(batch_class_logits.values)
batch_labels = batch_class_logits.indices
# Convert to [x0, y0, x1, y1] format
batch_boxes = center_to_corners_format(batch_boxes)
# Convert from relative [0, 1] to absolute [0, height] coordinates
if target_sizes is not None:
batch_boxes = _scale_boxes(batch_boxes, target_sizes)
results = []
for scores, labels, boxes in zip(batch_scores, batch_labels, batch_boxes):
keep = scores > threshold
scores = scores[keep]
labels = labels[keep]
boxes = boxes[keep]
results.append({"scores": scores, "labels": labels, "boxes": boxes})
return results
__all__ = ["GroundingDinoImageProcessorFast"]
|
GroundingDinoImageProcessorFast
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/protocolExplicit1.py
|
{
"start": 1737,
"end": 1846
}
|
class ____(Protocol8):
# This should generate an error because x is a ClassVar.
x: ClassVar = 1
|
Concrete8
|
python
|
pyqtgraph__pyqtgraph
|
pyqtgraph/debug.py
|
{
"start": 41712,
"end": 43089
}
|
class ____(object):
"""
Wrapper on stdout/stderr that colors text by the current thread ID.
*stream* must be 'stdout' or 'stderr'.
"""
colors = {}
lock = Mutex()
def __init__(self, stream):
self.stream = getattr(sys, stream)
self.err = stream == 'stderr'
setattr(sys, stream, self)
def write(self, msg):
with self.lock:
cprint.cprint(self.stream, self.color(), msg, -1, stderr=self.err)
def flush(self):
with self.lock:
self.stream.flush()
def color(self):
tid = threading.current_thread()
if tid not in self.colors:
c = (len(self.colors) % 15) + 1
self.colors[tid] = c
return self.colors[tid]
def enableFaulthandler():
""" Enable faulthandler for all threads.
If the faulthandler package is available, this function disables and then
re-enables fault handling for all threads (this is necessary to ensure any
new threads are handled correctly), and returns True.
If faulthandler is not available, then returns False.
"""
try:
import faulthandler
# necessary to disable first or else new threads may not be handled.
faulthandler.disable()
faulthandler.enable(all_threads=True)
return True
except ImportError:
return False
|
ThreadColor
|
python
|
jazzband__django-simple-history
|
simple_history/tests/models.py
|
{
"start": 19609,
"end": 19930
}
|
class ____(models.Model):
data = models.CharField(max_length=30)
def get_absolute_url(self):
return reverse("bucket_data-detail", kwargs={"pk": self.pk})
register(
BucketDataRegisterRequestUser,
user_model=BucketMember,
get_user=get_bucket_member_request_user,
)
|
BucketDataRegisterRequestUser
|
python
|
pypa__warehouse
|
tests/unit/utils/test_paginate.py
|
{
"start": 4065,
"end": 6755
}
|
class ____:
def test_slices_and_length(self):
wrapper = paginate._OpenSearchWrapper(FakeQuery6([1, 2, 3, 4, 5, 6]))
assert wrapper[1:3] == [2, 3]
assert len(wrapper) == 6
def test_slice_start_clamps_to_max(self):
wrapper = paginate._OpenSearchWrapper(FakeQuery6([1, 2, 3, 4, 5, 6]))
wrapper.max_results = 5
assert wrapper[6:10] == []
assert len(wrapper) == 5
def test_slice_end_clamps_to_max(self):
wrapper = paginate._OpenSearchWrapper(FakeQuery6([1, 2, 3, 4, 5, 6]))
wrapper.max_results = 5
assert wrapper[1:10] == [2, 3, 4, 5]
assert len(wrapper) == 5
def test_second_slice_fails(self):
wrapper = paginate._OpenSearchWrapper(FakeQuery6([1, 2, 3, 4, 5, 6]))
wrapper[1:3]
with pytest.raises(RuntimeError):
wrapper[1:3]
def test_len_before_slice_fails(self):
wrapper = paginate._OpenSearchWrapper(FakeQuery6([1, 2, 3, 4, 5, 6]))
with pytest.raises(RuntimeError):
len(wrapper)
def test_best_guess_suggestion(self):
fake_option = pretend.stub()
query = FakeSuggestQuery([1, 2, 3, 4, 5, 6], options=[fake_option])
wrapper = paginate._OpenSearchWrapper(query)
wrapper[1:3]
assert wrapper.best_guess == fake_option
def test_best_guess_suggestion_no_suggestions(self):
query = FakeSuggestQuery([1, 2, 3, 4, 5, 6], suggestion=[])
wrapper = paginate._OpenSearchWrapper(query)
wrapper[1:3]
assert wrapper.best_guess is None
def test_best_guess_suggestion_no_options(self):
query = FakeSuggestQuery([1, 2, 3, 4, 5, 6], options=[])
wrapper = paginate._OpenSearchWrapper(query)
wrapper[1:3]
assert wrapper.best_guess is None
def test_opensearch_page_has_wrapper(monkeypatch):
page_obj = pretend.stub()
page_cls = pretend.call_recorder(lambda *a, **kw: page_obj)
monkeypatch.setattr(paginate, "Page", page_cls)
assert paginate.OpenSearchPage("first", second="foo") is page_obj
assert page_cls.calls == [
pretend.call("first", second="foo", wrapper_class=paginate._OpenSearchWrapper)
]
def test_paginate_url(pyramid_request):
pyramid_request.GET = MultiDict(pyramid_request.GET)
pyramid_request.GET["foo"] = "bar"
url = pretend.stub()
pyramid_request.current_route_path = pretend.call_recorder(lambda _query: url)
url_maker = paginate.paginate_url_factory(pyramid_request)
assert url_maker(5) is url
assert pyramid_request.current_route_path.calls == [
pretend.call(_query=[("foo", "bar"), ("page", 5)])
]
|
TestOpenSearchWrapper6
|
python
|
TheAlgorithms__Python
|
machine_learning/astar.py
|
{
"start": 1395,
"end": 4181
}
|
class ____:
"""
Gridworld class represents the external world here a grid M*M
matrix.
world_size: create a numpy array with the given world_size default is 5.
"""
def __init__(self, world_size=(5, 5)):
self.w = np.zeros(world_size)
self.world_x_limit = world_size[0]
self.world_y_limit = world_size[1]
def show(self):
print(self.w)
def get_neighbours(self, cell):
"""
Return the neighbours of cell
"""
neughbour_cord = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
current_x = cell.position[0]
current_y = cell.position[1]
neighbours = []
for n in neughbour_cord:
x = current_x + n[0]
y = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
c = Cell()
c.position = (x, y)
c.parent = cell
neighbours.append(c)
return neighbours
def astar(world, start, goal):
"""
Implementation of a start algorithm.
world : Object of the world object.
start : Object of the cell as start position.
stop : Object of the cell as goal position.
>>> p = Gridworld()
>>> start = Cell()
>>> start.position = (0,0)
>>> goal = Cell()
>>> goal.position = (4,4)
>>> astar(p, start, goal)
[(0, 0), (1, 1), (2, 2), (3, 3), (4, 4)]
"""
_open = []
_closed = []
_open.append(start)
while _open:
min_f = np.argmin([n.f for n in _open])
current = _open[min_f]
_closed.append(_open.pop(min_f))
if current == goal:
break
for n in world.get_neighbours(current):
for c in _closed:
if c == n:
continue
n.g = current.g + 1
x1, y1 = n.position
x2, y2 = goal.position
n.h = (y2 - y1) ** 2 + (x2 - x1) ** 2
n.f = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(n)
path = []
while current.parent is not None:
path.append(current.position)
current = current.parent
path.append(current.position)
return path[::-1]
if __name__ == "__main__":
world = Gridworld()
# Start position and goal
start = Cell()
start.position = (0, 0)
goal = Cell()
goal.position = (4, 4)
print(f"path from {start.position} to {goal.position}")
s = astar(world, start, goal)
# Just for visual reasons.
for i in s:
world.w[i] = 1
print(world.w)
|
Gridworld
|
python
|
conda__conda
|
tests/test_solvers.py
|
{
"start": 322,
"end": 1203
}
|
class ____(SolverTests):
@property
def solver_class(self) -> type[Solver]:
from conda_libmamba_solver.solver import LibMambaSolver
return LibMambaSolver
@property
def tests_to_skip(self):
return {
"conda-libmamba-solver does not support features": [
"test_iopro_mkl",
"test_iopro_nomkl",
"test_mkl",
"test_accelerate",
"test_scipy_mkl",
"test_pseudo_boolean",
"test_no_features",
"test_surplus_features_1",
"test_surplus_features_2",
"test_remove",
# this one below only fails reliably on windows;
# it passes Linux on CI, but not locally?
"test_unintentional_feature_downgrade",
],
}
|
TestLibMambaSolver
|
python
|
wandb__wandb
|
wandb/sdk/launch/agent/config.py
|
{
"start": 856,
"end": 990
}
|
class ____(str, Enum):
"""Enum of valid builder types."""
docker = "docker"
kaniko = "kaniko"
noop = "noop"
|
BuilderType
|
python
|
bottlepy__bottle
|
bottle.py
|
{
"start": 75934,
"end": 76384
}
|
class ____(BaseRequest):
""" A thread-local subclass of :class:`BaseRequest` with a different
set of attributes for each thread. There is usually only one global
instance of this class (:data:`request`). If accessed during a
request/response cycle, this instance always refers to the *current*
request (even on a multithreaded server). """
bind = BaseRequest.__init__
environ = _local_property()
|
LocalRequest
|
python
|
huggingface__transformers
|
src/transformers/integrations/executorch.py
|
{
"start": 34870,
"end": 37040
}
|
class ____(torch.nn.Module):
"""
A wrapper module designed to make a Seq2Seq LM decoder exportable with `torch.export`,
specifically for use with static caching. This module ensures the exported decoder
is compatible with ExecuTorch.
"""
def __init__(self, model, max_static_cache_length, batch_size):
super().__init__()
# Get the decoder component
self.decoder = model.get_decoder()
self.lm_head = model.lm_head
self.config = model.config
# Detect the device of the exported models by checking a parameter
# We'll use the model's device as the target device
model_device = next(model.parameters()).device
# Initialize static cache for decoder and DynamicCache for encoder
self.static_cache = StaticCache(config=self.config, max_cache_len=max_static_cache_length)
head_dim = getattr(self.config, "head_dim", self.config.hidden_size // self.config.num_attention_heads)
num_heads = getattr(self.config, "num_key_value_heads", self.config.num_attention_heads)
self.static_cache.early_initialization(batch_size, num_heads, head_dim, torch.float32, model_device)
self.cache = EncoderDecoderCache(self.static_cache, DynamicCache(config=self.config))
register_dynamic_cache_export_support()
# Register cache buffers to make them exportable
for i in range(len(self.static_cache)):
self.register_buffer(f"key_cache_{i}", self.static_cache.layers[i].keys, persistent=False)
self.register_buffer(f"value_cache_{i}", self.static_cache.layers[i].values, persistent=False)
def forward(self, decoder_input_ids, encoder_hidden_states, cache_position):
# Get outputs from decoder
outputs = self.decoder(
input_ids=decoder_input_ids,
encoder_hidden_states=encoder_hidden_states,
past_key_values=self.cache,
use_cache=True,
cache_position=cache_position,
)
# Apply language model head
lm_logits = self.lm_head(outputs[0])
return lm_logits
|
Seq2SeqLMDecoderExportableModuleWithStaticCache
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/definitions/auto_materialize_rule_impls.py
|
{
"start": 47083,
"end": 48800
}
|
class ____(
AutoMaterializeRule,
NamedTuple("_SkipOnBackfillInProgressRule", [("all_partitions", bool)]),
):
@property
def decision_type(self) -> AutoMaterializeDecisionType:
return AutoMaterializeDecisionType.SKIP
@property
def description(self) -> str:
if self.all_partitions:
return "part of an asset targeted by an in-progress backfill"
else:
return "targeted by an in-progress backfill"
def evaluate_for_asset(self, context: "AutomationContext") -> "AutomationResult":
from dagster._core.definitions.declarative_automation.automation_condition import (
AutomationResult,
)
backfilling_subset = ValidAssetSubset.coerce_from_subset(
# this backfilling subset is aware of the current partitions definitions, and so will
# be valid
(
context.legacy_context.instance_queryer.get_active_backfill_target_asset_graph_subset()
).get_asset_subset(
context.legacy_context.asset_key, context.legacy_context.asset_graph
),
context.legacy_context.partitions_def,
)
if backfilling_subset.size == 0:
true_subset = context.legacy_context.empty_subset()
elif self.all_partitions:
true_subset = context.legacy_context.candidate_subset
else:
true_subset = context.legacy_context.candidate_subset & backfilling_subset
true_subset = context.asset_graph_view.legacy_get_asset_subset_from_valid_subset(
true_subset
)
return AutomationResult(context, true_subset)
@whitelist_for_serdes
|
SkipOnBackfillInProgressRule
|
python
|
airbytehq__airbyte
|
airbyte-ci/connectors/live-tests/src/live_tests/commons/models.py
|
{
"start": 8351,
"end": 20150
}
|
class ____:
hashed_connection_id: str
actor_id: str
configured_catalog: ConfiguredAirbyteCatalog
connector_under_test: ConnectorUnderTest
command: Command
stdout_file_path: Path
stderr_file_path: Path
success: bool
executed_container: Optional[dagger.Container]
config: Optional[SecretDict]
http_dump: Optional[dagger.File] = None
http_flows: list[http.HTTPFlow] = field(default_factory=list)
stream_schemas: Optional[dict[str, Any]] = None
backend: Optional[FileBackend] = None
HTTP_DUMP_FILE_NAME = "http_dump.mitm"
HAR_FILE_NAME = "http_dump.har"
@property
def logger(self) -> logging.Logger:
return logging.getLogger(f"{self.connector_under_test.target_or_control.value}-{self.command.value}")
@property
def airbyte_messages(self) -> Iterable[AirbyteMessage]:
return self.parse_airbyte_messages_from_command_output(self.stdout_file_path)
@property
def duckdb_schema(self) -> Iterable[str]:
return (self.connector_under_test.target_or_control.value, self.command.value, self.hashed_connection_id)
@property
def configured_streams(self) -> List[str]:
return [stream.stream.name for stream in self.configured_catalog.streams]
@property
def primary_keys_per_stream(self) -> Dict[str, List[str]]:
return {stream.stream.name: stream.primary_key[0] if stream.primary_key else None for stream in self.configured_catalog.streams}
@classmethod
async def load(
cls: type[ExecutionResult],
connector_under_test: ConnectorUnderTest,
hashed_connection_id: str,
actor_id: str,
configured_catalog: ConfiguredAirbyteCatalog,
command: Command,
stdout_file_path: Path,
stderr_file_path: Path,
success: bool,
executed_container: Optional[dagger.Container],
config: Optional[SecretDict] = None,
http_dump: Optional[dagger.File] = None,
) -> ExecutionResult:
execution_result = cls(
hashed_connection_id,
actor_id,
configured_catalog,
connector_under_test,
command,
stdout_file_path,
stderr_file_path,
success,
executed_container,
config,
http_dump,
)
await execution_result.load_http_flows()
return execution_result
async def load_http_flows(self) -> None:
if not self.http_dump:
return
with tempfile.NamedTemporaryFile() as temp_file:
await self.http_dump.export(temp_file.name)
self.http_flows = get_http_flows_from_mitm_dump(Path(temp_file.name))
def parse_airbyte_messages_from_command_output(
self, command_output_path: Path, log_validation_errors: bool = False
) -> Iterable[AirbyteMessage]:
with open(command_output_path) as command_output:
for line in command_output:
try:
yield AirbyteMessage.parse_raw(line)
except ValidationError as e:
if log_validation_errors:
self.logger.warn(f"Error parsing AirbyteMessage: {e}")
def get_records(self) -> Iterable[AirbyteMessage]:
self.logger.info(
f"Reading records all records for command {self.command.value} on {self.connector_under_test.target_or_control.value} version."
)
for message in self.airbyte_messages:
if message.type is AirbyteMessageType.RECORD:
yield message
def generate_stream_schemas(self) -> dict[str, Any]:
self.logger.info("Generating stream schemas")
stream_builders: dict[str, SchemaBuilder] = {}
for record in self.get_records():
stream = record.record.stream
if stream not in stream_builders:
stream_schema_builder = SchemaBuilder()
stream_schema_builder.add_schema({"type": "object", "properties": {}})
stream_builders[stream] = stream_schema_builder
stream_builders[stream].add_object(self.get_obfuscated_types(record.record.data))
self.logger.info("Stream schemas generated")
return {stream: sort_dict_keys(stream_builders[stream].to_schema()) for stream in stream_builders}
@staticmethod
def get_obfuscated_types(data: dict[str, Any]) -> dict[str, Any]:
"""
Convert obfuscated records into a record whose values have the same type as the original values.
"""
types = {}
for k, v in data.items():
if v.startswith("string_"):
types[k] = "a"
elif v.startswith("integer_"):
types[k] = 0
elif v.startswith("number_"):
types[k] = 0.1
elif v.startswith("boolean_"):
types[k] = True
elif v.startswith("null_"):
types[k] = None
elif v.startswith("array_"):
types[k] = []
elif v.startswith("object_"):
types[k] = {}
else:
types[k] = v
return types
def get_records_per_stream(self, stream: str) -> Iterator[AirbyteMessage]:
assert self.backend is not None, "Backend must be set to get records per stream"
self.logger.info(f"Reading records for stream {stream}")
if stream not in self.backend.record_per_stream_paths:
self.logger.warning(f"No records found for stream {stream}")
yield from []
else:
for message in self.parse_airbyte_messages_from_command_output(
self.backend.record_per_stream_paths[stream], log_validation_errors=True
):
if message.type is AirbyteMessageType.RECORD:
yield message
def get_states_per_stream(self, stream: str) -> Dict[str, List[AirbyteStateMessage]]:
self.logger.info(f"Reading state messages for stream {stream}")
states = defaultdict(list)
for message in self.airbyte_messages:
if message.type is AirbyteMessageType.STATE:
states[message.state.stream.stream_descriptor.name].append(message.state)
return states
def get_status_messages_per_stream(self, stream: str) -> Dict[str, List[AirbyteStreamStatusTraceMessage]]:
self.logger.info(f"Reading state messages for stream {stream}")
statuses = defaultdict(list)
for message in self.airbyte_messages:
if message.type is AirbyteMessageType.TRACE and message.trace.type == TraceType.STREAM_STATUS:
statuses[message.trace.stream_status.stream_descriptor.name].append(message.trace.stream_status)
return statuses
@cache
def get_message_count_per_type(self) -> dict[AirbyteMessageType, int]:
message_count: dict[AirbyteMessageType, int] = defaultdict(int)
for message in self.airbyte_messages:
message_count[message.type] += 1
return message_count
async def save_http_dump(self, output_dir: Path) -> None:
if self.http_dump:
self.logger.info("An http dump was captured during the execution of the command, saving it.")
http_dump_file_path = (output_dir / self.HTTP_DUMP_FILE_NAME).resolve()
await self.http_dump.export(str(http_dump_file_path))
self.logger.info(f"Http dump saved to {http_dump_file_path}")
# Define where the har file will be saved
har_file_path = (output_dir / self.HAR_FILE_NAME).resolve()
# Convert the mitmproxy dump file to a har file
mitm_http_stream_to_har(http_dump_file_path, har_file_path)
self.logger.info(f"Har file saved to {har_file_path}")
else:
self.logger.warning("No http dump to save")
def save_airbyte_messages(self, output_dir: Path, duckdb_path: Optional[Path] = None) -> None:
self.logger.info("Saving Airbyte messages to disk")
airbyte_messages_dir = output_dir / "airbyte_messages"
airbyte_messages_dir.mkdir(parents=True, exist_ok=True)
if duckdb_path:
self.backend = DuckDbBackend(airbyte_messages_dir, duckdb_path, self.duckdb_schema)
else:
self.backend = FileBackend(airbyte_messages_dir)
self.backend.write(self.airbyte_messages)
self.logger.info("Airbyte messages saved")
def save_stream_schemas(self, output_dir: Path) -> None:
self.stream_schemas = self.generate_stream_schemas()
stream_schemas_dir = output_dir / "stream_schemas"
stream_schemas_dir.mkdir(parents=True, exist_ok=True)
for stream_name, stream_schema in self.stream_schemas.items():
(stream_schemas_dir / f"{sanitize_stream_name(stream_name)}.json").write_text(json.dumps(stream_schema, sort_keys=True))
self.logger.info("Stream schemas saved to disk")
async def save_artifacts(self, output_dir: Path, duckdb_path: Optional[Path] = None) -> None:
self.logger.info("Saving artifacts to disk")
self.save_airbyte_messages(output_dir, duckdb_path)
self.update_configuration()
await self.save_http_dump(output_dir)
self.save_stream_schemas(output_dir)
self.logger.info("All artifacts saved to disk")
def get_updated_configuration(self, control_message_path: Path) -> Optional[dict[str, Any]]:
"""Iterate through the control messages to find CONNECTOR_CONFIG message and return the last updated configuration."""
if not control_message_path.exists():
return None
updated_config = None
for line in control_message_path.read_text().splitlines():
if line.strip():
connector_config = json.loads(line.strip()).get("connectorConfig", {})
if connector_config:
updated_config = connector_config
return updated_config
def update_configuration(self) -> None:
"""This function checks if a configuration has to be updated by reading the control messages file.
If a configuration has to be updated, it updates the configuration on the actor using the Airbyte API.
"""
assert self.backend is not None, "Backend must be set to update configuration in order to find the control messages path"
updated_configuration = self.get_updated_configuration(self.backend.jsonl_controls_path)
if updated_configuration is None:
return
self.logger.warning(f"Updating configuration for {self.connector_under_test.name}, actor {self.actor_id}")
url = f"https://api.airbyte.com/v1/{self.connector_under_test.actor_type.value}s/{self.actor_id}"
payload = {
"configuration": {
**updated_configuration,
f"{self.connector_under_test.actor_type.value}Type": self.connector_under_test.name_without_type_prefix,
}
}
headers = {
"accept": "application/json",
"content-type": "application/json",
"authorization": f"Bearer {get_airbyte_api_key()}",
}
response = requests.patch(url, json=payload, headers=headers)
try:
response.raise_for_status()
except requests.HTTPError as e:
self.logger.error(f"Failed to update {self.connector_under_test.name} configuration on actor {self.actor_id}: {e}")
self.logger.error(f"Response: {response.text}")
self.logger.info(f"Updated configuration for {self.connector_under_test.name}, actor {self.actor_id}")
def __hash__(self):
return hash(self.connector_under_test.version)
@dataclass(kw_only=True)
|
ExecutionResult
|
python
|
google__jax
|
tests/pallas/pallas_test.py
|
{
"start": 42702,
"end": 42766
}
|
class ____(ApiErrorTest):
INTERPRET = True
|
ApiErrorInterpretTest
|
python
|
modin-project__modin
|
modin/core/dataframe/algebra/default2pandas/groupby.py
|
{
"start": 21929,
"end": 22811
}
|
class ____(GroupBy):
"""Builder for GroupBy aggregation functions for Series."""
@classmethod
def _call_groupby(cls, df, *args, **kwargs): # noqa: PR01
"""Call .groupby() on passed `df` squeezed to Series."""
# We can end up here by two means - either by "true" call
# like Series().groupby() or by df.groupby()[item].
if len(df.columns) == 1:
# Series().groupby() case
return df.squeeze(axis=1).groupby(*args, **kwargs)
# In second case surrounding logic will supplement grouping columns,
# so we need to drop them after grouping is over; our originally
# selected column is always the first, so use it
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
return df.groupby(*args, **kwargs)[df.columns[0]]
|
SeriesGroupBy
|
python
|
astropy__astropy
|
astropy/io/votable/converters.py
|
{
"start": 27424,
"end": 27611
}
|
class ____(Integer):
"""
Handles the unsignedByte datatype. Unsigned 8-bit integer.
"""
format = "u1"
val_range = (0, 255)
bit_size = "8-bit unsigned"
|
UnsignedByte
|
python
|
getsentry__sentry
|
src/sentry/analytics/events/release_set_commits.py
|
{
"start": 82,
"end": 308
}
|
class ____(analytics.Event):
user_id: int | None = None
organization_id: int
project_ids: list[int] | None
user_agent: str | None = None
analytics.register(ReleaseSetCommitsLocalEvent)
|
ReleaseSetCommitsLocalEvent
|
python
|
django__django
|
django/forms/fields.py
|
{
"start": 36555,
"end": 41957
}
|
class ____(Field):
"""
Aggregate the logic of multiple Fields.
Its clean() method takes a "decompressed" list of values, which are then
cleaned into a single value according to self.fields. Each value in
this list is cleaned by the corresponding field -- the first value is
cleaned by the first field, the second value is cleaned by the second
field, etc. Once all fields are cleaned, the list of clean values is
"compressed" into a single value.
Subclasses should not have to implement clean(). Instead, they must
implement compress(), which takes a list of valid values and returns a
"compressed" version of those values -- a single value.
You'll probably want to use this with MultiWidget.
"""
default_error_messages = {
"invalid": _("Enter a list of values."),
"incomplete": _("Enter a complete value."),
}
def __init__(self, fields, *, require_all_fields=True, **kwargs):
self.require_all_fields = require_all_fields
super().__init__(**kwargs)
for f in fields:
f.error_messages.setdefault("incomplete", self.error_messages["incomplete"])
if self.disabled:
f.disabled = True
if self.require_all_fields:
# Set 'required' to False on the individual fields, because the
# required validation will be handled by MultiValueField, not
# by those individual fields.
f.required = False
self.fields = fields
def __deepcopy__(self, memo):
result = super().__deepcopy__(memo)
result.fields = tuple(x.__deepcopy__(memo) for x in self.fields)
return result
def validate(self, value):
pass
def clean(self, value):
"""
Validate every value in the given list. A value is validated against
the corresponding Field in self.fields.
For example, if this MultiValueField was instantiated with
fields=(DateField(), TimeField()), clean() would call
DateField.clean(value[0]) and TimeField.clean(value[1]).
"""
clean_data = []
errors = []
if self.disabled and not isinstance(value, list):
value = self.widget.decompress(value)
if not value or isinstance(value, (list, tuple)):
if not value or not [v for v in value if v not in self.empty_values]:
if self.required:
raise ValidationError(
self.error_messages["required"], code="required"
)
else:
return self.compress([])
else:
raise ValidationError(self.error_messages["invalid"], code="invalid")
for i, field in enumerate(self.fields):
try:
field_value = value[i]
except IndexError:
field_value = None
if field_value in self.empty_values:
if self.require_all_fields:
# Raise a 'required' error if the MultiValueField is
# required and any field is empty.
if self.required:
raise ValidationError(
self.error_messages["required"], code="required"
)
elif field.required:
# Otherwise, add an 'incomplete' error to the list of
# collected errors and skip field cleaning, if a required
# field is empty.
if field.error_messages["incomplete"] not in errors:
errors.append(field.error_messages["incomplete"])
continue
try:
clean_data.append(field.clean(field_value))
except ValidationError as e:
# Collect all validation errors in a single list, which we'll
# raise at the end of clean(), rather than raising a single
# exception for the first error we encounter. Skip duplicates.
errors.extend(m for m in e.error_list if m not in errors)
if errors:
raise ValidationError(errors)
out = self.compress(clean_data)
self.validate(out)
self.run_validators(out)
return out
def compress(self, data_list):
"""
Return a single value for the given list of values. The values can be
assumed to be valid.
For example, if this MultiValueField was instantiated with
fields=(DateField(), TimeField()), this might return a datetime
object created by combining the date and time in data_list.
"""
raise NotImplementedError("Subclasses must implement this method.")
def has_changed(self, initial, data):
if self.disabled:
return False
if initial is None:
initial = ["" for x in range(0, len(data))]
else:
if not isinstance(initial, list):
initial = self.widget.decompress(initial)
for field, initial, data in zip(self.fields, initial, data):
try:
initial = field.to_python(initial)
except ValidationError:
return True
if field.has_changed(initial, data):
return True
return False
|
MultiValueField
|
python
|
tornadoweb__tornado
|
tornado/test/netutil_test.py
|
{
"start": 3540,
"end": 3988
}
|
class ____(_ResolverErrorTestMixin):
def setUp(self):
super().setUp()
self.resolver = BlockingResolver()
self.real_getaddrinfo = socket.getaddrinfo
socket.getaddrinfo = _failing_getaddrinfo
def tearDown(self):
socket.getaddrinfo = self.real_getaddrinfo
super().tearDown()
@skipIfNoNetwork
@unittest.skipIf(sys.platform == "win32", "preexec_fn not available on win32")
|
ThreadedResolverErrorTest
|
python
|
HIPS__autograd
|
autograd/misc/tracers.py
|
{
"start": 183,
"end": 1663
}
|
class ____(Node):
__slots__ = ["parents", "partial_fun"]
def __init__(self, value, fun, args, kwargs, parent_argnums, parents):
args = subvals(args, zip(parent_argnums, repeat(None)))
def partial_fun(partial_args):
return fun(*subvals(args, zip(parent_argnums, partial_args)), **kwargs)
self.parents = parents
self.partial_fun = partial_fun
def initialize_root(self):
self.parents = []
def const_graph_unary(fun):
graph = []
_fun = [fun] # Allow fun to be freed, since it may have bound args
def maybe_cached_fun(x):
if graph:
_graph = graph[0]
vals = {_graph[0]: x}
for node in _graph[1:]:
vals[node] = node.partial_fun([vals[p] for p in node.parents])
return vals[node]
else:
start_node = ConstGraphNode.new_root()
end_value, end_node = trace(start_node, _fun.pop(), x)
if end_node is None:
raise Exception("Output is independent of input")
graph.append(list(toposort(end_node))[::-1])
return end_value
return maybe_cached_fun
def const_graph(fun, *args, **kwargs):
partial_fun = partial(fun, *args, **kwargs)
unary_fun = lambda args: partial_fun(*args)
maybe_cached_unary_fun = const_graph_unary(unary_fun)
@wraps(fun)
def _fun(*args):
return maybe_cached_unary_fun(args)
return _fun
|
ConstGraphNode
|
python
|
scipy__scipy
|
scipy/stats/_new_distributions.py
|
{
"start": 418,
"end": 4805
}
|
class ____(ContinuousDistribution):
r"""Normal distribution with prescribed mean and standard deviation.
The probability density function of the normal distribution is:
.. math::
f(x) = \frac{1}{\sigma \sqrt{2 \pi}} \exp {
\left( -\frac{1}{2}\left( \frac{x - \mu}{\sigma} \right)^2 \right)}
"""
# `ShiftedScaledDistribution` allows this to be generated automatically from
# an instance of `StandardNormal`, but the normal distribution is so frequently
# used that it's worth a bit of code duplication to get better performance.
_mu_domain = _RealInterval(endpoints=(-inf, inf))
_sigma_domain = _RealInterval(endpoints=(0, inf))
_x_support = _RealInterval(endpoints=(-inf, inf))
_mu_param = _RealParameter('mu', symbol=r'\mu', domain=_mu_domain,
typical=(-1, 1))
_sigma_param = _RealParameter('sigma', symbol=r'\sigma', domain=_sigma_domain,
typical=(0.5, 1.5))
_x_param = _RealParameter('x', domain=_x_support, typical=(-1, 1))
_parameterizations = [_Parameterization(_mu_param, _sigma_param)]
_variable = _x_param
_normalization = 1/np.sqrt(2*np.pi)
_log_normalization = np.log(2*np.pi)/2
def __new__(cls, mu=None, sigma=None, **kwargs):
if mu is None and sigma is None:
return super().__new__(StandardNormal)
return super().__new__(cls)
def __init__(self, *, mu=0., sigma=1., **kwargs):
super().__init__(mu=mu, sigma=sigma, **kwargs)
def _logpdf_formula(self, x, *, mu, sigma, **kwargs):
return StandardNormal._logpdf_formula(self, (x - mu)/sigma) - np.log(sigma)
def _pdf_formula(self, x, *, mu, sigma, **kwargs):
return StandardNormal._pdf_formula(self, (x - mu)/sigma) / sigma
def _logcdf_formula(self, x, *, mu, sigma, **kwargs):
return StandardNormal._logcdf_formula(self, (x - mu)/sigma)
def _cdf_formula(self, x, *, mu, sigma, **kwargs):
return StandardNormal._cdf_formula(self, (x - mu)/sigma)
def _logccdf_formula(self, x, *, mu, sigma, **kwargs):
return StandardNormal._logccdf_formula(self, (x - mu)/sigma)
def _ccdf_formula(self, x, *, mu, sigma, **kwargs):
return StandardNormal._ccdf_formula(self, (x - mu)/sigma)
def _icdf_formula(self, x, *, mu, sigma, **kwargs):
return StandardNormal._icdf_formula(self, x) * sigma + mu
def _ilogcdf_formula(self, x, *, mu, sigma, **kwargs):
return StandardNormal._ilogcdf_formula(self, x) * sigma + mu
def _iccdf_formula(self, x, *, mu, sigma, **kwargs):
return StandardNormal._iccdf_formula(self, x) * sigma + mu
def _ilogccdf_formula(self, x, *, mu, sigma, **kwargs):
return StandardNormal._ilogccdf_formula(self, x) * sigma + mu
def _entropy_formula(self, *, mu, sigma, **kwargs):
return StandardNormal._entropy_formula(self) + np.log(abs(sigma))
def _logentropy_formula(self, *, mu, sigma, **kwargs):
lH0 = StandardNormal._logentropy_formula(self)
with np.errstate(divide='ignore'):
# sigma = 1 -> log(sigma) = 0 -> log(log(sigma)) = -inf
# Silence the unnecessary runtime warning
lls = np.log(np.log(abs(sigma))+0j)
return special.logsumexp(np.broadcast_arrays(lH0, lls), axis=0)
def _median_formula(self, *, mu, sigma, **kwargs):
return mu
def _mode_formula(self, *, mu, sigma, **kwargs):
return mu
def _moment_raw_formula(self, order, *, mu, sigma, **kwargs):
if order == 0:
return np.ones_like(mu)
elif order == 1:
return mu
else:
return None
_moment_raw_formula.orders = [0, 1] # type: ignore[attr-defined]
def _moment_central_formula(self, order, *, mu, sigma, **kwargs):
if order == 0:
return np.ones_like(mu)
elif order % 2:
return np.zeros_like(mu)
else:
# exact is faster (and obviously more accurate) for reasonable orders
return sigma**order * special.factorial2(int(order) - 1, exact=True)
def _sample_formula(self, full_shape, rng, *, mu, sigma, **kwargs):
return rng.normal(loc=mu, scale=sigma, size=full_shape)[()]
def _log_diff(log_p, log_q):
return special.logsumexp([log_p, log_q+np.pi*1j], axis=0)
|
Normal
|
python
|
tensorflow__tensorflow
|
tensorflow/python/eager/monitoring.py
|
{
"start": 14590,
"end": 16679
}
|
class ____(object):
"""A context manager to measure the walltime and increment a Counter cell."""
__slots__ = [
"cell",
"t",
"monitored_section_name",
"_counting",
"_avoid_repetitive_counting",
]
def __init__(
self, cell, monitored_section_name=None, avoid_repetitive_counting=False
):
"""Creates a new MonitoredTimer.
Args:
cell: the cell associated with the time metric that will be inremented.
monitored_section_name: name of action being monitored here.
avoid_repetitive_counting: when set to True, if already in a monitored
timer section with the same monitored_section_name, skip counting.
"""
self.cell = cell
self.monitored_section_name = monitored_section_name
self._avoid_repetitive_counting = avoid_repetitive_counting
self._counting = True
def __enter__(self):
if (
self._avoid_repetitive_counting
and self.monitored_section_name
and self.monitored_section_name in MonitoredTimerSections
):
self._counting = False
return self
self.t = time.time()
if self.monitored_section_name:
MonitoredTimerSections.append(self.monitored_section_name)
return self
def __exit__(self, exception_type, exception_value, traceback):
del exception_type, exception_value, traceback
if self._counting:
micro_seconds = (time.time() - self.t) * 1000000
self.cell.increase_by(int(micro_seconds))
if self.monitored_section_name:
MonitoredTimerSections.remove(self.monitored_section_name)
def monitored_timer(cell):
"""A function decorator for adding MonitoredTimer support.
Args:
cell: the cell associated with the time metric that will be inremented.
Returns:
A decorator that measure the function runtime and increment the specified
counter cell.
"""
def actual_decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
with MonitoredTimer(cell):
return func(*args, **kwargs)
return wrapper
return actual_decorator
|
MonitoredTimer
|
python
|
openai__openai-python
|
src/openai/resources/fine_tuning/checkpoints/permissions.py
|
{
"start": 15738,
"end": 16223
}
|
class ____:
def __init__(self, permissions: AsyncPermissions) -> None:
self._permissions = permissions
self.create = _legacy_response.async_to_raw_response_wrapper(
permissions.create,
)
self.retrieve = _legacy_response.async_to_raw_response_wrapper(
permissions.retrieve,
)
self.delete = _legacy_response.async_to_raw_response_wrapper(
permissions.delete,
)
|
AsyncPermissionsWithRawResponse
|
python
|
automl__auto-sklearn
|
test/test_pipeline/test_regression.py
|
{
"start": 1125,
"end": 27908
}
|
class ____(unittest.TestCase):
_multiprocess_can_split_ = True
def test_io_dict(self):
regressors = regression_components._regressors
for r in regressors:
if regressors[r] == regression_components.RegressorChoice:
continue
props = regressors[r].get_properties()
self.assertIn("input", props)
self.assertIn("output", props)
inp = props["input"]
output = props["output"]
self.assertIsInstance(inp, tuple)
self.assertIsInstance(output, tuple)
for i in inp:
self.assertIn(i, (SPARSE, DENSE, SIGNED_DATA, UNSIGNED_DATA))
self.assertEqual(output, (PREDICTIONS,))
self.assertIn("handles_regression", props)
self.assertTrue(props["handles_regression"])
self.assertIn("handles_classification", props)
self.assertIn("handles_multiclass", props)
self.assertIn("handles_multilabel", props)
self.assertIn("handles_multioutput", props)
self.assertFalse(props["handles_classification"])
self.assertFalse(props["handles_multiclass"])
self.assertFalse(props["handles_multilabel"])
def test_find_regressors(self):
regressors = regression_components._regressors
self.assertGreaterEqual(len(regressors), 1)
for key in regressors:
if hasattr(regressors[key], "get_components"):
continue
self.assertIn(AutoSklearnRegressionAlgorithm, regressors[key].__bases__)
def test_find_preprocessors(self):
preprocessors = preprocessing_components._preprocessors
self.assertGreaterEqual(len(preprocessors), 1)
for key in preprocessors:
if hasattr(preprocessors[key], "get_components"):
continue
self.assertIn(
AutoSklearnPreprocessingAlgorithm, preprocessors[key].__bases__
)
def test_configurations(self):
cs = SimpleRegressionPipeline().get_hyperparameter_search_space()
self._test_configurations(cs)
def test_configurations_signed_data(self):
dataset_properties = {"signed": True}
cs = SimpleRegressionPipeline(
dataset_properties=dataset_properties
).get_hyperparameter_search_space()
self._test_configurations(
configurations_space=cs, dataset_properties=dataset_properties
)
def test_configurations_sparse(self):
dataset_properties = {"sparse": True}
cs = SimpleRegressionPipeline(
dataset_properties=dataset_properties
).get_hyperparameter_search_space()
self._test_configurations(
cs, make_sparse=True, dataset_properties=dataset_properties
)
def test_multioutput(self):
cache = Memory(location=tempfile.gettempdir())
cached_func = cache.cache(sklearn.datasets.make_regression)
X, Y = cached_func(
n_samples=250,
n_features=20,
n_informative=9,
n_targets=4,
bias=0.5,
effective_rank=10,
tail_strength=0.4,
noise=0.3,
shuffle=True,
coef=False,
random_state=1,
)
X_train = X[:200, :]
Y_train = Y[:200, :]
X_test = X[200:, :]
Y_test = Y[200:, :]
data = {
"X_train": X_train,
"Y_train": Y_train,
"X_test": X_test,
"Y_test": Y_test,
}
dataset_properties = {"multioutput": True}
pipeline = SimpleRegressionPipeline(dataset_properties=dataset_properties)
cs = pipeline.get_hyperparameter_search_space()
self._test_configurations(cs, data=data, dataset_properties=dataset_properties)
def _test_configurations(
self,
configurations_space,
make_sparse=False,
data=None,
dataset_properties=None,
):
# Use a limit of ~4GiB
limit = 3072 * 1024 * 1024
resource.setrlimit(resource.RLIMIT_AS, (limit, limit))
configurations_space.seed(1)
for i in range(10):
config = configurations_space.sample_configuration()
config._populate_values()
# Restrict configurations which could take too long on travis-ci
restrictions = {
"regressor:adaboost:n_estimators": 50,
"regressor:adaboost:max_depth": 1,
"feature_preprocessor:kernel_pca:n_components": 10,
"feature_preprocessor:kitchen_sinks:n_components": 50,
"regressor:libsvm_svc:degree": 2,
"regressor:libsvm_svr:degree": 2,
"regressor:libsvm_svr:C": 1.0,
"feature_preprocessor:truncatedSVD:target_dim": 10,
"feature_preprocessor:polynomial:degree": 2,
"regressor:lda:n_components": 10,
}
for restrict_parameter in restrictions:
restrict_to = restrictions[restrict_parameter]
if (
restrict_parameter in config
and config[restrict_parameter] is not None
):
config._values[restrict_parameter] = restrict_to
if data is None:
X_train, Y_train, X_test, Y_test = get_dataset(
dataset="boston", make_sparse=make_sparse, add_NaNs=True
)
else:
X_train = data["X_train"].copy()
Y_train = data["Y_train"].copy()
X_test = data["X_test"].copy()
data["Y_test"].copy()
cls = SimpleRegressionPipeline(
random_state=1, dataset_properties=dataset_properties
)
cls.set_hyperparameters(config)
# First make sure that for this configuration, setting the parameters
# does not mistakenly set the estimator as fitted
for name, step in cls.named_steps.items():
with self.assertRaisesRegex(
sklearn.exceptions.NotFittedError, "instance is not fitted yet"
):
check_is_fitted(step)
try:
with ignore_warnings(regressor_warnings):
cls.fit(X_train, Y_train)
# After fit, all components should be tagged as fitted
# by sklearn. Check is fitted raises an exception if that
# is not the case
try:
for name, step in cls.named_steps.items():
check_is_fitted(step)
except sklearn.exceptions.NotFittedError:
self.fail(
"config={} raised NotFittedError unexpectedly!".format(config)
)
cls.predict(X_test)
except MemoryError:
continue
except np.linalg.LinAlgError:
continue
except ValueError as e:
if "Floating-point under-/overflow occurred at epoch" in e.args[0]:
continue
elif "removed all features" in e.args[0]:
continue
elif "all features are discarded" in e.args[0]:
continue
elif "Numerical problems in QDA" in e.args[0]:
continue
elif "Bug in scikit-learn" in e.args[0]:
continue
elif (
"The condensed distance matrix must contain only finite "
"values." in e.args[0]
):
continue
elif (
"zero-size array to reduction operation maximum which has no "
"identity" in e.args[0]
):
continue
else:
e.args += (f"config={config}",)
raise e
except RuntimeWarning as e:
if "invalid value encountered in sqrt" in e.args[0]:
continue
elif "divide by zero encountered in" in e.args[0]:
continue
elif "invalid value encountered in divide" in e.args[0]:
continue
elif "invalid value encountered in true_divide" in e.args[0]:
continue
elif "invalid value encountered in multiply" in e.args[0]:
continue
else:
e.args += (f"config={config}",)
raise e
except UserWarning as e:
if "FastICA did not converge" in e.args[0]:
continue
else:
e.args += (f"config={config}",)
raise e
except Exception as e:
if (
"Multiple input features cannot have the same target value"
in e.args[0]
):
continue
else:
e.args += (f"config={config}",)
raise e
def test_default_configuration(self):
for i in range(2):
X_train, Y_train, X_test, Y_test = get_dataset(dataset="diabetes")
auto = SimpleRegressionPipeline(random_state=1)
auto = auto.fit(X_train, Y_train)
predictions = auto.predict(copy.deepcopy(X_test))
# The lower the worse
r2_score = sklearn.metrics.r2_score(Y_test, predictions)
self.assertAlmostEqual(0.3458397471855429, r2_score, places=2)
model_score = auto.score(copy.deepcopy(X_test), Y_test)
self.assertAlmostEqual(model_score, r2_score, places=5)
def test_default_configuration_iterative_fit(self):
regressor = SimpleRegressionPipeline(
random_state=1,
include={
"regressor": ["random_forest"],
"feature_preprocessor": ["no_preprocessing"],
},
)
X_train, Y_train, X_test, Y_test = get_dataset(dataset="boston")
regressor.fit_transformer(X_train, Y_train)
for i in range(1, 11):
regressor.iterative_fit(X_train, Y_train)
self.assertEqual(regressor.steps[-1][-1].choice.estimator.n_estimators, i)
def test_repr(self):
representation = repr(SimpleRegressionPipeline())
cls = eval(representation)
self.assertIsInstance(cls, SimpleRegressionPipeline)
def test_get_hyperparameter_search_space(self):
cs = SimpleRegressionPipeline().get_hyperparameter_search_space()
self.assertIsInstance(cs, ConfigurationSpace)
conditions = cs.get_conditions()
hyperparameters = cs.get_hyperparameters()
forbiddens = cs.get_forbiddens()
self.assertEqual(167, len(hyperparameters))
self.assertEqual(len(hyperparameters) - 3, len(conditions))
self.assertEqual(len(forbiddens), 35)
def test_get_hyperparameter_search_space_include_exclude_models(self):
regressor = SimpleRegressionPipeline(include={"regressor": ["random_forest"]})
cs = regressor.get_hyperparameter_search_space()
self.assertEqual(
cs.get_hyperparameter("regressor:__choice__"),
CategoricalHyperparameter("regressor:__choice__", ["random_forest"]),
)
# TODO add this test when more than one regressor is present
regressor = SimpleRegressionPipeline(exclude={"regressor": ["random_forest"]})
cs = regressor.get_hyperparameter_search_space()
self.assertNotIn("random_forest", str(cs))
regressor = SimpleRegressionPipeline(include={"feature_preprocessor": ["pca"]})
cs = regressor.get_hyperparameter_search_space()
self.assertEqual(
cs.get_hyperparameter("feature_preprocessor:__choice__"),
CategoricalHyperparameter("feature_preprocessor:__choice__", ["pca"]),
)
regressor = SimpleRegressionPipeline(
exclude={"feature_preprocessor": ["no_preprocessing"]}
)
cs = regressor.get_hyperparameter_search_space()
self.assertNotIn("no_preprocessing", str(cs))
def test_get_hyperparameter_search_space_preprocessor_contradicts_default(
self,
):
regressor = SimpleRegressionPipeline(
include={"feature_preprocessor": ["densifier"]},
dataset_properties={"sparse": True},
)
cs = regressor.get_hyperparameter_search_space()
self.assertEqual(
cs.get_hyperparameter("regressor:__choice__").default_value,
"gradient_boosting",
)
regressor = SimpleRegressionPipeline(
include={"feature_preprocessor": ["nystroem_sampler"]}
)
cs = regressor.get_hyperparameter_search_space()
self.assertEqual(
cs.get_hyperparameter("regressor:__choice__").default_value, "sgd"
)
def test_get_hyperparameter_search_space_only_forbidden_combinations(self):
self.assertRaisesRegex(
ValueError,
"Cannot find a legal default configuration.",
SimpleRegressionPipeline,
include={
"regressor": ["random_forest"],
"feature_preprocessor": ["kitchen_sinks"],
},
)
# It must also be catched that no classifiers which can handle sparse
# data are located behind the densifier
self.assertRaisesRegex(
ValueError,
"Cannot find a legal default configuration",
SimpleRegressionPipeline,
include={
"regressor": ["extra_trees"],
"feature_preprocessor": ["densifier"],
},
dataset_properties={"sparse": True},
)
@unittest.skip(
"test_get_hyperparameter_search_space_dataset_properties"
+ " Not yet Implemented"
)
def test_get_hyperparameter_search_space_dataset_properties(self):
# TODO: We do not have any dataset properties for regression, so this
# test is somewhat stupid
pass
"""
full_cs = SimpleRegressionPipeline.get_hyperparameter_search_space()
cs_mc = SimpleRegressionPipeline.get_hyperparameter_search_space()
self.assertEqual(full_cs, cs_mc)
cs_ml = SimpleRegressionPipeline.get_hyperparameter_search_space()
self.assertNotIn('k_nearest_neighbors', str(cs_ml))
self.assertNotIn('liblinear', str(cs_ml))
self.assertNotIn('libsvm_svc', str(cs_ml))
self.assertNotIn('sgd', str(cs_ml))
cs_sp = SimpleRegressionPipeline.get_hyperparameter_search_space(
sparse=True)
self.assertNotIn('extra_trees', str(cs_sp))
self.assertNotIn('gradient_boosting', str(cs_sp))
self.assertNotIn('random_forest', str(cs_sp))
cs_mc_ml = SimpleRegressionPipeline.get_hyperparameter_search_space()
self.assertEqual(cs_ml, cs_mc_ml)
self.assertRaisesRegex(ValueError,
"No regressor to build a configuration space "
"for...", SimpleRegressionPipeline.
get_hyperparameter_search_space,
multiclass=True, multilabel=True, sparse=True)
"""
def test_predict_batched(self):
include = {"regressor": ["decision_tree"]}
cs = SimpleRegressionPipeline(include=include).get_hyperparameter_search_space()
default = cs.get_default_configuration()
regressor = SimpleRegressionPipeline(
config=default, random_state=1, include=include
)
X_train, Y_train, X_test, Y_test = get_dataset(dataset="boston")
regressor.fit(X_train, Y_train)
X_test_ = X_test.copy()
prediction_ = regressor.predict(X_test_)
mock_predict = unittest.mock.Mock(wraps=regressor.steps[-1][-1].predict)
regressor.steps[-1][-1].predict = mock_predict
prediction = regressor.predict(X_test, batch_size=20)
self.assertEqual((356,), prediction.shape)
self.assertEqual(18, mock_predict.call_count)
np.testing.assert_array_almost_equal(prediction_, prediction)
def test_predict_batched_sparse(self):
dataset_properties = {"sparse": True}
include = {"regressor": ["decision_tree"]}
cs = SimpleRegressionPipeline(
dataset_properties=dataset_properties, include=include
).get_hyperparameter_search_space()
default = cs.get_default_configuration()
regressor = SimpleRegressionPipeline(
config=default,
random_state=1,
dataset_properties=dataset_properties,
include=include,
)
X_train, Y_train, X_test, Y_test = get_dataset(
dataset="boston", make_sparse=True
)
regressor.fit(X_train, Y_train)
X_test_ = X_test.copy()
prediction_ = regressor.predict(X_test_)
mock_predict = unittest.mock.Mock(wraps=regressor.steps[-1][-1].predict)
regressor.steps[-1][-1].predict = mock_predict
prediction = regressor.predict(X_test, batch_size=20)
self.assertEqual((356,), prediction.shape)
self.assertEqual(18, mock_predict.call_count)
np.testing.assert_array_almost_equal(prediction_, prediction)
@unittest.skip("test_check_random_state Not yet Implemented")
def test_check_random_state(self):
raise NotImplementedError()
@unittest.skip("test_validate_input_X Not yet Implemented")
def test_validate_input_X(self):
raise NotImplementedError()
@unittest.skip("test_validate_input_Y Not yet Implemented")
def test_validate_input_Y(self):
raise NotImplementedError()
def test_pipeline_clonability(self):
X_train, Y_train, X_test, Y_test = get_dataset(dataset="boston")
auto = SimpleRegressionPipeline(random_state=1)
auto = auto.fit(X_train, Y_train)
auto_clone = clone(auto)
auto_clone_params = auto_clone.get_params()
# Make sure all keys are copied properly
for k, v in auto.get_params().items():
self.assertIn(k, auto_clone_params)
# Make sure the params getter of estimator are honored
klass = auto.__class__
new_object_params = auto.get_params(deep=False)
for name, param in new_object_params.items():
new_object_params[name] = clone(param, safe=False)
new_object = klass(**new_object_params)
params_set = new_object.get_params(deep=False)
for name in new_object_params:
param1 = new_object_params[name]
param2 = params_set[name]
self.assertEqual(param1, param2)
def test_set_params(self):
pass
def test_get_params(self):
pass
def _test_set_hyperparameter_choice(
self, expected_key, implementation, config_dict
):
"""
Given a configuration in config, this procedure makes sure that
the given implementation, which should be a Choice component, honors
the type of the object, and any hyperparameter associated to it
"""
keys_checked = [expected_key]
implementation_type = config_dict[expected_key]
expected_type = implementation.get_components()[implementation_type]
self.assertIsInstance(implementation.choice, expected_type)
# Are there further hyperparams?
# A choice component might have attribute requirements that we need to check
expected_sub_key = (
expected_key.replace(":__choice__", ":") + implementation_type
)
expected_attributes = {}
if "data_preprocessor:__choice__" in expected_key:
# We have to check both the numerical and categorical
to_check = {
"numerical_transformer": implementation.choice.numer_ppl.named_steps,
"categorical_transformer": implementation.choice.categ_ppl.named_steps,
"text_transformer": implementation.choice.txt_ppl.named_steps,
}
for data_type, pipeline in to_check.items():
for sub_name, sub_step in pipeline.items():
# If it is a Choice, make sure it is the correct one!
if isinstance(sub_step, AutoSklearnChoice):
key = "data_preprocessor:feature_type:{}:{}:__choice__".format(
data_type, sub_name
)
keys_checked.extend(
self._test_set_hyperparameter_choice(
key, sub_step, config_dict
)
)
# If it is a component, make sure it has the correct hyperparams
elif isinstance(sub_step, AutoSklearnComponent):
keys_checked.extend(
self._test_set_hyperparameter_component(
"data_preprocessor:feature_type:{}:{}".format(
data_type, sub_name
),
sub_step,
config_dict,
)
)
else:
raise ValueError("New type of pipeline component!")
return keys_checked
else:
for key, value in config_dict.items():
if key != expected_key and expected_sub_key in key:
expected_attributes[key.split(":")[-1]] = value
keys_checked.append(key)
if expected_attributes:
attributes = vars(implementation.choice)
# Cannot check the whole dictionary, just names, as some
# classes map the text hyperparameter directly to a function!
for expected_attribute in expected_attributes.keys():
self.assertIn(expected_attribute, attributes.keys())
return keys_checked
def _test_set_hyperparameter_component(
self, expected_key, implementation, config_dict
):
"""
Given a configuration in config, this procedure makes sure that
the given implementation, which should be a autosklearn component, honors
is created with the desired hyperparameters stated in config_dict
"""
keys_checked = []
attributes = vars(implementation)
expected_attributes = {}
for key, value in config_dict.items():
if expected_key in key:
keys_checked.append(key)
key = key.replace(expected_key + ":", "")
if ":" in key:
raise ValueError(
"This utility should only be called with a "
"matching string that produces leaf configurations, "
"that is no further colons are expected, yet key={}"
"".format(key)
)
expected_attributes[key] = value
# Cannot check the whole dictionary, just names, as some
# classes map the text hyperparameter directly to a function!
for expected_attribute in expected_attributes.keys():
self.assertIn(expected_attribute, attributes.keys())
return keys_checked
def test_set_hyperparameters_honors_configuration(self):
"""Makes sure that a given configuration is honored in practice.
This method tests that the set hyperparameters actually create objects
that comply with the given configuration. It iterates trough the pipeline to
make sure we did not miss a step, but also checks at the end that every
configuration from Config was checked
Also considers random_state and ensures pipeline steps correctly recieve
the right random_state
"""
all_combinations = list(itertools.product([True, False], repeat=4))
for (
sparse,
multilabel,
signed,
multiclass,
) in all_combinations:
dataset_properties = {
"sparse": sparse,
"multilabel": multilabel,
"multiclass": multiclass,
"signed": signed,
}
random_state = 1
auto = SimpleRegressionPipeline(
random_state=random_state,
dataset_properties=dataset_properties,
)
cs = auto.get_hyperparameter_search_space()
config = cs.sample_configuration()
# Set hyperparameters takes a given config and translate
# a config to an actual implementation
auto.set_hyperparameters(config)
config_dict = config.get_dictionary()
# keys to check is our mechanism to ensure that every
# every config key is checked
keys_checked = []
for name, step in auto.named_steps.items():
if name == "data_preprocessor":
keys_checked.extend(
self._test_set_hyperparameter_choice(
"data_preprocessor:__choice__", step, config_dict
)
)
self.assertEqual(step.random_state, random_state)
elif name == "feature_preprocessor":
keys_checked.extend(
self._test_set_hyperparameter_choice(
"feature_preprocessor:__choice__", step, config_dict
)
)
self.assertEqual(step.random_state, random_state)
elif name == "regressor":
keys_checked.extend(
self._test_set_hyperparameter_choice(
"regressor:__choice__", step, config_dict
)
)
self.assertEqual(step.random_state, random_state)
else:
raise ValueError(
"Found another type of step! Need to update this check"
" {}. ".format(name)
)
# Make sure we checked the whole configuration
self.assertSetEqual(set(config_dict.keys()), set(keys_checked))
|
SimpleRegressionPipelineTest
|
python
|
django__django
|
tests/signals/tests.py
|
{
"start": 18155,
"end": 21115
}
|
class ____(SimpleTestCase):
async def test_asend(self):
sync_handler = SyncHandler()
async_handler = AsyncHandler()
signal = dispatch.Signal()
signal.connect(sync_handler)
signal.connect(async_handler)
result = await signal.asend(self.__class__)
self.assertEqual(result, [(sync_handler, 1), (async_handler, 1)])
def test_send(self):
sync_handler = SyncHandler()
async_handler = AsyncHandler()
signal = dispatch.Signal()
signal.connect(sync_handler)
signal.connect(async_handler)
result = signal.send(self.__class__)
self.assertEqual(result, [(sync_handler, 1), (async_handler, 1)])
def test_send_robust(self):
class ReceiverException(Exception):
pass
receiver_exception = ReceiverException()
async def failing_async_handler(**kwargs):
raise receiver_exception
sync_handler = SyncHandler()
async_handler = AsyncHandler()
signal = dispatch.Signal()
signal.connect(failing_async_handler)
signal.connect(async_handler)
signal.connect(sync_handler)
result = signal.send_robust(self.__class__)
# The ordering here is different than the order that signals were
# connected in.
self.assertEqual(
result,
[
(sync_handler, 1),
(failing_async_handler, receiver_exception),
(async_handler, 1),
],
)
async def test_asend_robust(self):
class ReceiverException(Exception):
pass
receiver_exception = ReceiverException()
async def failing_async_handler(**kwargs):
raise receiver_exception
sync_handler = SyncHandler()
async_handler = AsyncHandler()
signal = dispatch.Signal()
signal.connect(failing_async_handler)
signal.connect(async_handler)
signal.connect(sync_handler)
result = await signal.asend_robust(self.__class__)
# The ordering here is different than the order that signals were
# connected in.
self.assertEqual(
result,
[
(sync_handler, 1),
(failing_async_handler, receiver_exception),
(async_handler, 1),
],
)
async def test_asend_only_async_receivers(self):
async_handler = AsyncHandler()
signal = dispatch.Signal()
signal.connect(async_handler)
result = await signal.asend(self.__class__)
self.assertEqual(result, [(async_handler, 1)])
async def test_asend_robust_only_async_receivers(self):
async_handler = AsyncHandler()
signal = dispatch.Signal()
signal.connect(async_handler)
result = await signal.asend_robust(self.__class__)
self.assertEqual(result, [(async_handler, 1)])
|
AsyncReceiversTests
|
python
|
mlflow__mlflow
|
tests/tracing/test_fluent.py
|
{
"start": 3699,
"end": 3959
}
|
class ____:
@mlflow.trace()
async def predict(self, x, y):
return await self.some_operation_raise_error(x, y)
@mlflow.trace()
async def some_operation_raise_error(self, x, y):
raise ValueError("Some error")
|
ErroringAsyncTestModel
|
python
|
wandb__wandb
|
wandb/sdk/data_types/_dtypes.py
|
{
"start": 10930,
"end": 11332
}
|
class ____(Type):
"""An object with an unknown type.
All assignments to an UnknownType result in the type of the assigned object except
`None` which results in a InvalidType.
"""
name = "unknown"
types: t.ClassVar[t.List[type]] = []
def assign_type(self, wb_type: "Type") -> "Type":
return wb_type if not isinstance(wb_type, NoneType) else InvalidType()
|
UnknownType
|
python
|
sphinx-doc__sphinx
|
sphinx/directives/other.py
|
{
"start": 10770,
"end": 13121
}
|
class ____(SphinxDirective):
"""Directive to only include text if the given tag(s) are enabled."""
has_content = True
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec: ClassVar[OptionSpec] = {}
def run(self) -> list[Node]:
node = addnodes.only()
node.document = self.state.document
self.set_source_info(node)
node['expr'] = self.arguments[0]
# Same as util.nested_parse_with_titles but try to handle nested
# sections which should be raised higher up the doctree.
memo: Any = self.state.memo
surrounding_title_styles = memo.title_styles
surrounding_section_level = memo.section_level
memo.title_styles = []
memo.section_level = 0
try:
self.state.nested_parse(
self.content, self.content_offset, node, match_titles=True
)
title_styles = memo.title_styles
if (
not surrounding_title_styles
or not title_styles
or title_styles[0] not in surrounding_title_styles
or not self.state.parent
):
# No nested sections so no special handling needed.
return [node]
# Calculate the depths of the current and nested sections.
current_depth = 0
parent = self.state.parent
while parent:
current_depth += 1
parent = parent.parent
current_depth -= 2
title_style = title_styles[0]
nested_depth = len(surrounding_title_styles)
if title_style in surrounding_title_styles:
nested_depth = surrounding_title_styles.index(title_style)
# Use these depths to determine where the nested sections should
# be placed in the doctree.
n_sects_to_raise = current_depth - nested_depth + 1
parent = cast('nodes.Element', self.state.parent)
for _i in range(n_sects_to_raise):
if parent.parent:
parent = parent.parent
parent.append(node)
return []
finally:
memo.title_styles = surrounding_title_styles
memo.section_level = surrounding_section_level
|
Only
|
python
|
google__jax
|
tests/errors_test.py
|
{
"start": 2572,
"end": 12232
}
|
class ____(jtu.JaxTestCase):
def test_nested_jit(self, filter_mode):
@jit
def innermost(x):
assert False
@jit
def inbetween(x):
return 1 + innermost(x)
@jit
def outermost(x):
return 2 + inbetween(x)
f = lambda: outermost(jnp.array([1, 2]))
check_filtered_stack_trace(self, AssertionError, f, [
('<lambda>', 'f = lambda: outermost'),
('outermost', 'return 2 + inbetween(x)'),
('inbetween', 'return 1 + innermost(x)'),
('innermost', 'assert False')],
filter_mode=filter_mode)
def test_nested_jit_and_vmap(self, filter_mode):
@jit
def innermost(x):
assert False
@jit
def inbetween(x):
return 1 + vmap(innermost)(x)
@jit
def outermost(x):
return 2 + inbetween(x)
f = lambda: outermost(jnp.array([1, 2]))
check_filtered_stack_trace(self, AssertionError, f, [
('<lambda>', 'f = lambda: outermost'),
('outermost', 'return 2 + inbetween(x)'),
('inbetween', 'return 1 + vmap(innermost)(x)'),
('innermost', 'assert False')],
filter_mode=filter_mode)
def test_nested_jit_and_grad(self, filter_mode):
@jit
def innermost(x):
assert False
@jit
def inbetween(x):
return 1 + grad(innermost)(x)
@jit
def outermost(x):
return 2 + inbetween(x)
f = lambda: outermost(jnp.array([1, 2]))
check_filtered_stack_trace(self, TypeError, f, [
('<lambda>', 'f = lambda: outermost'),
('outermost', 'return 2 + inbetween(x)'),
('inbetween', 'return 1 + grad(innermost)(x)'),
], filter_mode=filter_mode)
def test_lax_cond(self, filter_mode):
def err(_):
assert False
return ()
def f():
return lax.cond(True, err, lambda _: (), ())
check_filtered_stack_trace(self, AssertionError, f, [
('f', 'return lax.cond(True, err, lambda _: (), ())'),
('err', 'assert False')],
filter_mode=filter_mode)
def test_lax_switch(self, filter_mode):
def err(_):
assert False
return ()
def f():
branches = [lambda _: (), err, lambda _: ()]
return lax.switch(1, branches, ())
check_filtered_stack_trace(self, AssertionError, f, [
('f', 'return lax.switch(1, branches, ())'),
('err', 'assert False')], filter_mode=filter_mode)
def test_lax_scan(self, filter_mode):
def err(*_):
assert False
return ()
def f():
return lax.scan(err, (), (), 3)
check_filtered_stack_trace(self, AssertionError, f, [
('f', 'return lax.scan(err, (), (), 3)'),
('err', 'assert False')], filter_mode=filter_mode)
def test_lax_fori_loop(self, filter_mode):
def err(*_):
assert False
return ()
def f():
return lax.fori_loop(0, 3, err, ())
check_filtered_stack_trace(self, AssertionError, f, [
('f', 'return lax.fori_loop(0, 3, err, ())'),
('err', 'assert False')], filter_mode=filter_mode)
def test_lax_while_loop(self, filter_mode):
def err(*_):
assert False
return ()
def f():
pred = lambda _: False
return lax.while_loop(pred, err, ())
check_filtered_stack_trace(self, AssertionError, f, [
('f', 'return lax.while_loop(pred, err, ())'),
('err', 'assert False')], filter_mode=filter_mode)
def test_lax_map(self, filter_mode):
def err(_):
assert False
return ()
def f():
xs = jnp.ones(3)
return lax.map(err, xs)
check_filtered_stack_trace(self, AssertionError, f, [
('f', 'return lax.map(err, xs)'),
('err', 'assert False')], filter_mode=filter_mode)
def test_lax_custom_root(self, filter_mode):
def err(*_):
assert False
return ()
def g(x): return (x - 1.) ** 2.
def solve(*_): return 1.
def f1():
return lax.custom_root(g, 0., err, solve)
def f2():
return lax.custom_root(g, 0., solve, err)
def f3():
return lax.custom_root(err, 0., solve, solve)
check_filtered_stack_trace(self, AssertionError, f1, [
('f1', 'return lax.custom_root(g, 0., err, solve)'),
('err', 'assert False')], filter_mode=filter_mode)
check_filtered_stack_trace(self, AssertionError, f2, [
('f2', 'return lax.custom_root(g, 0., solve, err)'),
('err', 'assert False')], filter_mode=filter_mode)
check_filtered_stack_trace(self, AssertionError, f3, [
('f3', 'return lax.custom_root(err, 0., solve, solve)'),
('err', 'assert False')], filter_mode=filter_mode)
def test_lax_custom_linear_solve(self, filter_mode):
def err(*_):
assert False
return ()
matvec = lambda v: v
solve = lambda mv, b: 1.
b = 1.
def f1():
return lax.custom_linear_solve(err, b, solve)
def f2():
return lax.custom_linear_solve(matvec, b, err)
check_filtered_stack_trace(self, AssertionError, f1, [
('f1', 'return lax.custom_linear_solve(err, b, solve)'),
('err', 'assert False')], filter_mode=filter_mode)
check_filtered_stack_trace(self, AssertionError, f2, [
('f2', 'return lax.custom_linear_solve(matvec, b, err)'),
('err', 'assert False')], filter_mode=filter_mode)
def test_lax_associative_scan(self, filter_mode):
def err(*_):
assert False
return ()
def f():
xs = jnp.arange(4.)
return lax.associative_scan(err, xs)
check_filtered_stack_trace(self, AssertionError, f, [
('f', 'return lax.associative_scan(err, xs)'),
('err', 'assert False')], filter_mode=filter_mode)
def test_custom_jvp(self, filter_mode):
def err(*args):
assert False
return args
@jax.custom_jvp
def f(x):
return err(x)
@f.defjvp
def f_jvp(x, tx):
x = err(x)
return x, tx
check_filtered_stack_trace(self, AssertionError, lambda: f(1.), [
('f', 'return err(x)'),
('err', 'assert False')], filter_mode=filter_mode)
check_filtered_stack_trace(self, AssertionError, lambda: jax.jvp(f, [1.], [1.]), [
('f_jvp', 'x = err(x)'),
('err', 'assert False')], filter_mode=filter_mode)
def test_custom_vjp(self, filter_mode):
def err(*args):
assert False
return args[0]
@jax.custom_vjp
def f(x):
return err(x)
def fwd(x):
return x, ()
def fwd_err(x):
x = err(x)
return x, ()
def bwd(_, g):
return (g,)
def bwd_err(_, g):
g = err(g)
return (g,)
f.defvjp(fwd_err, bwd)
check_filtered_stack_trace(self, AssertionError, lambda: f(1.), [
('f', 'return err(x)'),
('err', 'assert False')], filter_mode=filter_mode)
check_filtered_stack_trace(self, AssertionError, lambda: jax.grad(f)(1.), [
('fwd_err', 'x = err(x)'),
('err', 'assert False')], filter_mode=filter_mode)
f.defvjp(fwd, bwd_err)
check_filtered_stack_trace(self, AssertionError, lambda: jax.grad(f)(1.), [
('bwd_err', 'g = err(g)'),
('err', 'assert False')], filter_mode=filter_mode)
def test_jvp(self, filter_mode):
def err(_):
assert False
return ()
def f():
p = (1.,)
t = (0.,)
return jax.jvp(err, p, t)
check_filtered_stack_trace(self, AssertionError, f, [
('f', 'return jax.jvp(err, p, t)'),
('err', 'assert False')], filter_mode=filter_mode)
def test_vjp(self, filter_mode):
def err(_):
assert False
return ()
def f():
x = 1.
return jax.vjp(err, x)[0]
check_filtered_stack_trace(self, AssertionError, f, [
('f', 'return jax.vjp(err, x)[0]'),
('err', 'assert False')], filter_mode=filter_mode)
def test_debug_nans(self, filter_mode):
@jax.jit
def f(x):
return 0. / x
f(2.)
def g():
return f(0.)
with jax.debug_nans(True):
check_filtered_stack_trace(self, ZeroDivisionError, g, [
('g', 'return f(0.)'),
('f', 'return 0. / x')], filter_mode=filter_mode)
def test_cause_chain(self, filter_mode):
@jit
def inner(x):
raise ValueError('inner')
@jit
def outer(x):
try:
inner(x)
except ValueError as e:
raise TypeError('outer') from e
f = lambda: outer(1.)
check_filtered_stack_trace(self, TypeError, f, [
('<lambda>', 'f = lambda: outer'),
('outer', 'raise TypeError')], filter_mode=filter_mode)
e = get_exception(TypeError, f) # Uses the default JAX_TRACEBACK_FILTERING=auto
assert any("For simplicity" in x for x in e.__notes__)
self.assertIsInstance(e.__cause__, ValueError)
def test_null_traceback(self, filter_mode):
class TestA: pass
def f(a): return a + 1
def err():
a = TestA()
return jit(f)(a)
check_filtered_stack_trace(self, TypeError, err, [
('err', 'return jit(f)(a)')], filter_mode=filter_mode)
def test_api_boundary_does_not_add_to_garbage(self, filter_mode):
self.enter_context(config.traceback_filtering(filter_mode))
self.enter_context(disable_gc())
class MyObject:
def __call__(self):
f()
@traceback_util.api_boundary
def f():
g()
@traceback_util.api_boundary
def g():
raise ValueError('f')
o = MyObject()
weak_o = weakref.ref(o)
try:
o()
except ValueError:
pass
del o
self.assertIsNone(weak_o())
@contextlib.contextmanager
def disable_gc():
gc.disable()
gc.collect()
try:
yield
finally:
gc.enable()
@jtu.with_config(jax_traceback_filtering='auto') # JaxTestCase defaults to off.
|
FilteredTracebackTest
|
python
|
huggingface__transformers
|
src/transformers/models/resnet/modeling_resnet.py
|
{
"start": 2907,
"end": 3561
}
|
class ____(nn.Module):
"""
ResNet shortcut, used to project the residual features to the correct size. If needed, it is also used to
downsample the input using `stride=2`.
"""
def __init__(self, in_channels: int, out_channels: int, stride: int = 2):
super().__init__()
self.convolution = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, bias=False)
self.normalization = nn.BatchNorm2d(out_channels)
def forward(self, input: Tensor) -> Tensor:
hidden_state = self.convolution(input)
hidden_state = self.normalization(hidden_state)
return hidden_state
|
ResNetShortCut
|
python
|
sqlalchemy__sqlalchemy
|
examples/generic_associations/table_per_association.py
|
{
"start": 2165,
"end": 3049
}
|
class ____(HasAddresses, Base):
company_name: Mapped[str]
engine = create_engine("sqlite://", echo=True)
Base.metadata.create_all(engine)
session = Session(engine)
session.add_all(
[
Customer(
name="customer 1",
addresses=[
Address(
street="123 anywhere street", city="New York", zip="10110"
),
Address(
street="40 main street", city="San Francisco", zip="95732"
),
],
),
Supplier(
company_name="Ace Hammers",
addresses=[
Address(street="2569 west elm", city="Detroit", zip="56785")
],
),
]
)
session.commit()
for customer in session.query(Customer):
for address in customer.addresses:
print(address)
# no parent here
|
Supplier
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 1122596,
"end": 1124004
}
|
class ____(sgqlc.types.Type, UniformResourceLocatable, Node):
"""Represents a mention made by one issue or pull request to another."""
__schema__ = github_schema
__field_names__ = ("actor", "created_at", "is_cross_repository", "referenced_at", "source", "target", "will_close_target")
actor = sgqlc.types.Field(Actor, graphql_name="actor")
"""Identifies the actor who performed the event."""
created_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="createdAt")
"""Identifies the date and time when the object was created."""
is_cross_repository = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="isCrossRepository")
"""Reference originated in a different repository."""
referenced_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="referencedAt")
"""Identifies when the reference was made."""
source = sgqlc.types.Field(sgqlc.types.non_null("ReferencedSubject"), graphql_name="source")
"""Issue or pull request that made the reference."""
target = sgqlc.types.Field(sgqlc.types.non_null("ReferencedSubject"), graphql_name="target")
"""Issue or pull request to which the reference was made."""
will_close_target = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="willCloseTarget")
"""Checks if the target will be closed when the source is merged."""
|
CrossReferencedEvent
|
python
|
apache__airflow
|
providers/snowflake/tests/unit/snowflake/operators/test_snowflake.py
|
{
"start": 2294,
"end": 2965
}
|
class ____:
@mock.patch("airflow.providers.common.sql.operators.sql.SQLExecuteQueryOperator.get_db_hook")
def test_snowflake_operator(self, mock_get_db_hook, dag_maker):
sql = """
CREATE TABLE IF NOT EXISTS test_airflow (
dummy VARCHAR(50)
);
"""
with dag_maker(TEST_DAG_ID):
operator = SQLExecuteQueryOperator(
task_id="basic_snowflake", sql=sql, do_xcom_push=False, conn_id="snowflake_default"
)
# do_xcom_push=False because otherwise the XCom test will fail due to the mocking (it actually works)
dag_maker.run_ti(operator.task_id)
|
TestSnowflakeOperator
|
python
|
pytransitions__transitions
|
transitions/extensions/asyncio.py
|
{
"start": 3082,
"end": 3942
}
|
class ____(Condition):
"""A helper class to await condition checks in the intended way."""
async def check(self, event_data):
"""Check whether the condition passes.
Args:
event_data (EventData): An EventData instance to pass to the
condition (if event sending is enabled) or to extract arguments
from (if event sending is disabled). Also contains the data
model attached to the current machine which is used to invoke
the condition.
"""
func = event_data.machine.resolve_callable(self.func, event_data)
res = func(event_data) if event_data.machine.send_event else func(*event_data.args, **event_data.kwargs)
if inspect.isawaitable(res):
return await res == self.target
return res == self.target
|
AsyncCondition
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/matchClass2.py
|
{
"start": 130,
"end": 474
}
|
class ____:
optional: int | None = field(default=None, kw_only=True)
x: int
y: int
obj = Point(1, 2)
match obj:
case Point(x, y, optional=opt):
reveal_type(x, expected_text="int")
reveal_type(y, expected_text="int")
reveal_type(opt, expected_text="int | None")
distance = (x**2 + y**2) ** 0.5
|
Point
|
python
|
getsentry__sentry
|
src/sentry/models/commitauthor.py
|
{
"start": 1045,
"end": 2965
}
|
class ____(Model):
__relocation_scope__ = RelocationScope.Excluded
organization_id = BoundedBigIntegerField(db_index=True)
# display name
name = models.CharField(max_length=128, null=True)
email = models.CharField(max_length=200)
# Format varies by provider:
# - GitHub/GitHub Enterprise: "github:username", "github_enterprise:username"
# - Other providers: null
# - Legacy data(?): integer (rare)
external_id = models.CharField(max_length=164, null=True)
objects: ClassVar[CommitAuthorManager] = CommitAuthorManager()
class Meta:
app_label = "sentry"
db_table = "sentry_commitauthor"
unique_together = (("organization_id", "email"), ("organization_id", "external_id"))
__repr__ = sane_repr("organization_id", "email", "name")
users: list[RpcUser] | None = None
def preload_users(self) -> list[RpcUser]:
self.users = None
self.users = self.find_users()
return self.users
def find_users(self) -> list[RpcUser]:
from sentry.models.organizationmember import OrganizationMember
from sentry.users.services.user.service import user_service
if self.users is not None:
return self.users
users = user_service.get_many_by_email(emails=[self.email], is_verified=True)
org_member_user_ids = set(
OrganizationMember.objects.filter(
organization_id=self.organization_id, user_id__in={u.id for u in users}
).values_list("user_id", flat=True)
)
return [u for u in users if u.id in org_member_user_ids]
def get_username_from_external_id(self) -> str | None:
"""
Note: only works for GitHub and GitHub Enterprise
"""
return (
self.external_id.split(":", 1)[1]
if self.external_id and ":" in self.external_id
else None
)
|
CommitAuthor
|
python
|
PrefectHQ__prefect
|
src/prefect/logging/highlighters.py
|
{
"start": 730,
"end": 1187
}
|
class ____(RegexHighlighter):
"""Apply style to names."""
base_style = "name."
highlights: list[str] = [
# ?i means case insensitive
# ?<= means find string right after the words: flow run
r"(?i)(?P<flow_run_name>(?<=flow run) \'(.*?)\')",
r"(?i)(?P<flow_name>(?<=flow) \'(.*?)\')",
r"(?i)(?P<task_run_name>(?<=task run) \'(.*?)\')",
r"(?i)(?P<task_name>(?<=task) \'(.*?)\')",
]
|
NameHighlighter
|
python
|
getsentry__sentry
|
src/sentry/api/validators/auth.py
|
{
"start": 99,
"end": 739
}
|
class ____(serializers.Serializer):
password = serializers.CharField(required=False, trim_whitespace=False)
# For u2f
challenge = serializers.CharField(required=False, trim_whitespace=False)
response = serializers.CharField(required=False, trim_whitespace=False)
def validate(self, data):
if "password" in data:
return data
if "challenge" in data and "response" in data:
return data
raise serializers.ValidationError(
detail="You must provide `password` or `challenge` and `response`.",
code=MISSING_PASSWORD_OR_U2F_CODE,
)
|
AuthVerifyValidator
|
python
|
facebook__pyre-check
|
source/interprocedural_analyses/taint/test/integration/class_interval.py
|
{
"start": 5298,
"end": 5350
}
|
class ____(A13):
def g(self):
return 0
|
B13
|
python
|
kamyu104__LeetCode-Solutions
|
Python/cut-off-trees-for-golf-event.py
|
{
"start": 114,
"end": 1984
}
|
class ____(object):
def cutOffTree(self, forest):
"""
:type forest: List[List[int]]
:rtype: int
"""
def dot(p1, p2):
return p1[0]*p2[0]+p1[1]*p2[1]
def minStep(p1, p2):
min_steps = abs(p1[0]-p2[0])+abs(p1[1]-p2[1])
closer, detour = [p1], []
lookup = set()
while True:
if not closer: # cannot find a path in the closer expansions
if not detour: # no other possible path
return -1
# try other possible paths in detour expansions with extra 2-step cost
min_steps += 2
closer, detour = detour, closer
i, j = closer.pop()
if (i, j) == p2:
return min_steps
if (i, j) not in lookup:
lookup.add((i, j))
for I, J in (i+1, j), (i-1, j), (i, j+1), (i, j-1):
if 0 <= I < m and 0 <= J < n and forest[I][J] and (I, J) not in lookup:
is_closer = dot((I-i, J-j), (p2[0]-i, p2[1]-j)) > 0
(closer if is_closer else detour).append((I, J))
return min_steps
m, n = len(forest), len(forest[0])
min_heap = []
for i in xrange(m):
for j in xrange(n):
if forest[i][j] > 1:
heapq.heappush(min_heap, (forest[i][j], (i, j)))
start = (0, 0)
result = 0
while min_heap:
tree = heapq.heappop(min_heap)
step = minStep(start, tree[1])
if step < 0:
return -1
result += step
start = tree[1]
return result
# Time: O(t * (logt + m * n)), t is the number of trees
# Space: O(t + m * n)
|
Solution
|
python
|
Textualize__textual
|
tests/test_message_pump.py
|
{
"start": 323,
"end": 1071
}
|
class ____(Widget):
called_by = None
def key_x(self):
self.called_by = self.key_x
def key_ctrl_i(self):
self.called_by = self.key_ctrl_i
async def test_dispatch_key_valid_key():
widget = ValidWidget()
result = await dispatch_key(widget, Key(key="x", character="x"))
assert result is True
assert widget.called_by == widget.key_x
async def test_dispatch_key_valid_key_alias():
"""When you press tab or ctrl+i, it comes through as a tab key event, but handlers for
tab and ctrl+i are both considered valid."""
widget = ValidWidget()
result = await dispatch_key(widget, Key(key="tab", character="\t"))
assert result is True
assert widget.called_by == widget.key_ctrl_i
|
ValidWidget
|
python
|
google__jax
|
jax/_src/pallas/pipelining/schedulers.py
|
{
"start": 4634,
"end": 6693
}
|
class ____:
"""A scoreboard used to book-keep data dependencies.
Attributes:
which_stage_writes: A mapping from buffer index to the stage index that
writes to it.
which_stages_read: A mapping from buffer index to the stages that read
from it.
stage_counters: A list of length num_stages that tracks the number of times
each stage has run.
"""
which_stage_writes: Mapping[internal.BufferIndex, int]
which_stages_read: Mapping[internal.BufferIndex, Sequence[int]]
stage_counters: list[jax.Array | int]
@classmethod
def create(cls, stages: Sequence[internal.PipelineStage]):
which_stage_writes = collections.defaultdict(lambda: None)
which_stage_reads = collections.defaultdict(set)
stage_counters = [0] * len(stages)
for i, stage in enumerate(stages):
for write_idx in stage.get_write_idxs():
which_stage_writes[write_idx] = i
for read_idx in stage.get_read_idxs():
which_stage_reads[read_idx].add(i)
return cls(which_stage_writes, which_stage_reads, stage_counters)
def get_stage_counter(self, stage_idx: int) -> jax.Array | int:
"""Returns the current stage counter for the given stage index."""
return self.stage_counters[stage_idx]
def get_writing_stage(self, buffer_idx: internal.BufferIndex) -> int:
"""Returns the stage index that writes to the given buffer index."""
return self.which_stage_writes[buffer_idx]
def increment_stage_counter(self, stage_idx: int) -> None:
"""Increments the stage counter for the given stage index."""
self.stage_counters[stage_idx] += 1
def copy(self) -> "Scoreboard":
"""Returns a deep copy of the scoreboard."""
new_stage_counters = copy.copy(self.stage_counters)
return Scoreboard(self.which_stage_writes, self.which_stages_read,
new_stage_counters)
@functools.partial(jax.tree_util.register_dataclass,
data_fields=["indices"],
meta_fields=["grid", "offsets", "dynamic"])
@dataclasses.dataclass(frozen=True)
|
Scoreboard
|
python
|
faif__python-patterns
|
patterns/behavioral/publish_subscribe.py
|
{
"start": 173,
"end": 785
}
|
class ____:
def __init__(self) -> None:
self.msg_queue = []
self.subscribers = {}
def notify(self, msg: str) -> None:
self.msg_queue.append(msg)
def subscribe(self, msg: str, subscriber: Subscriber) -> None:
self.subscribers.setdefault(msg, []).append(subscriber)
def unsubscribe(self, msg: str, subscriber: Subscriber) -> None:
self.subscribers[msg].remove(subscriber)
def update(self) -> None:
for msg in self.msg_queue:
for sub in self.subscribers.get(msg, []):
sub.run(msg)
self.msg_queue = []
|
Provider
|
python
|
fluentpython__example-code-2e
|
10-dp-1class-func/untyped/strategy_best2.py
|
{
"start": 1565,
"end": 3148
}
|
class ____: # the Context
def __init__(self, customer, cart, promotion=None):
self.customer = customer
self.cart = list(cart)
self.promotion = promotion
def total(self):
if not hasattr(self, '__total'):
self.__total = sum(item.total() for item in self.cart)
return self.__total
def due(self):
if self.promotion is None:
discount = 0
else:
discount = self.promotion(self)
return self.total() - discount
def __repr__(self):
return f'<Order total: {self.total():.2f} due: {self.due():.2f}>'
def fidelity_promo(order):
"""5% discount for customers with 1000 or more fidelity points"""
return order.total() * .05 if order.customer.fidelity >= 1000 else 0
def bulk_item_promo(order):
"""10% discount for each LineItem with 20 or more units"""
discount = 0
for item in order.cart:
if item.quantity >= 20:
discount += item.total() * .1
return discount
def large_order_promo(order):
"""7% discount for orders with 10 or more distinct items"""
distinct_items = {item.product for item in order.cart}
if len(distinct_items) >= 10:
return order.total() * .07
return 0
# tag::STRATEGY_BEST2[]
promos = [globals()[name] for name in globals() # <1>
if name.endswith('_promo') # <2>
and name != 'best_promo'] # <3>
def best_promo(order):
"""Select best discount available
"""
return max(promo(order) for promo in promos) # <4>
# end::STRATEGY_BEST2[]
|
Order
|
python
|
walkccc__LeetCode
|
solutions/985. Sum of Even Numbers After Queries/985.py
|
{
"start": 0,
"end": 401
}
|
class ____:
def sumEvenAfterQueries(
self,
nums: list[int],
queries: list[list[int]],
) -> list[int]:
ans = []
summ = sum(a for a in nums if a % 2 == 0)
for val, index in queries:
if nums[index] % 2 == 0:
summ -= nums[index]
nums[index] += val
if nums[index] % 2 == 0:
summ += nums[index]
ans.append(summ)
return ans
|
Solution
|
python
|
doocs__leetcode
|
solution/1300-1399/1334.Find the City With the Smallest Number of Neighbors at a Threshold Distance/Solution.py
|
{
"start": 0,
"end": 987
}
|
class ____:
def findTheCity(
self, n: int, edges: List[List[int]], distanceThreshold: int
) -> int:
def dijkstra(u: int) -> int:
dist = [inf] * n
dist[u] = 0
vis = [False] * n
for _ in range(n):
k = -1
for j in range(n):
if not vis[j] and (k == -1 or dist[k] > dist[j]):
k = j
vis[k] = True
for j in range(n):
# dist[j] = min(dist[j], dist[k] + g[k][j])
if dist[k] + g[k][j] < dist[j]:
dist[j] = dist[k] + g[k][j]
return sum(d <= distanceThreshold for d in dist)
g = [[inf] * n for _ in range(n)]
for f, t, w in edges:
g[f][t] = g[t][f] = w
ans, cnt = n, inf
for i in range(n - 1, -1, -1):
if (t := dijkstra(i)) < cnt:
cnt, ans = t, i
return ans
|
Solution
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-hardcoded-records/source_hardcoded_records/streams.py
|
{
"start": 171,
"end": 557
}
|
class ____(Stream, ABC):
primary_key = None
sample_record = None
def __init__(self, count: int, **kwargs):
super().__init__(**kwargs)
self.count = count
def read_records(self, **kwargs) -> Iterable[Mapping[str, Any]]:
"""Generate records from the stream."""
for _ in range(self.count):
yield self.sample_record
|
HardcodedStream
|
python
|
doocs__leetcode
|
solution/1200-1299/1252.Cells with Odd Values in a Matrix/Solution2.py
|
{
"start": 0,
"end": 268
}
|
class ____:
def oddCells(self, m: int, n: int, indices: List[List[int]]) -> int:
row = [0] * m
col = [0] * n
for r, c in indices:
row[r] += 1
col[c] += 1
return sum((i + j) % 2 for i in row for j in col)
|
Solution
|
python
|
spack__spack
|
lib/spack/spack/cmd/commands.py
|
{
"start": 4216,
"end": 5182
}
|
class ____(ArgparseWriter):
"""Write argparse output as a list of subcommands."""
def format(self, cmd: Command) -> str:
"""Return the string representation of a single node in the parser tree.
Args:
cmd: Parsed information about a command or subcommand.
Returns:
String representation of this subcommand.
"""
return " " * self.level + cmd.prog + "\n"
_positional_to_subroutine: Dict[str, str] = {
"package": "_all_packages",
"spec": "_all_packages",
"filter": "_all_packages",
"installed": "_installed_packages",
"compiler": "_installed_compilers",
"section": "_config_sections",
"env": "_environments",
"extendable": "_extensions",
"keys": "_keys",
"help_command": "_subcommands",
"mirror": "_mirrors",
"virtual": "_providers",
"namespace": "_repos",
"hash": "_all_resource_hashes",
"pytest": "_unit_tests",
}
|
SubcommandWriter
|
python
|
celery__celery
|
t/unit/app/test_exceptions.py
|
{
"start": 101,
"end": 510
}
|
class ____:
def test_when_datetime(self):
x = Retry('foo', KeyError(), when=datetime.now(timezone.utc))
assert x.humanize()
def test_pickleable(self):
x = Retry('foo', KeyError(), when=datetime.now(timezone.utc))
y = pickle.loads(pickle.dumps(x))
assert x.message == y.message
assert repr(x.exc) == repr(y.exc)
assert x.when == y.when
|
test_Retry
|
python
|
pytorch__pytorch
|
test/distributed/tensor/test_op_schema.py
|
{
"start": 314,
"end": 4641
}
|
class ____(TestCase):
def test_equality_checks_lists_of_dtensor_spec(self):
"""If x == y, then we must have h(x) == h(y)."""
dts = DTensorSpec(mesh=None, placements=tuple(), tensor_meta=None)
schema1 = OpSchema(op=None, args_schema=(dts, [dts]), kwargs_schema={})
schema2 = OpSchema(op=None, args_schema=(dts, [dts, dts]), kwargs_schema={})
# This is a regression test; these schemas used to compare equal.
self.assertNotEqual(schema1, schema2)
self.assertNotEqual(hash(schema1), hash(schema2))
def test_equality_respects_static_attributes(self):
def _get_sample_op_schemas(static_arg_val, static_kwarg_val):
dts = DTensorSpec(mesh=None, placements=tuple(), tensor_meta=None)
static_argnum = 2
static_kwargkey = ["statickwarg"]
annotated_schemas = [
(False, False, None),
(True, False, RuntimeSchemaInfo(static_argnum=static_argnum)),
(False, True, RuntimeSchemaInfo(static_kwargkey=static_kwargkey)),
(
True,
True,
RuntimeSchemaInfo(
static_argnum=static_argnum, static_kwargkey=static_kwargkey
),
),
]
# non-tensor args show up in hash iff the argnum is static/
# kwargs show up in hash iff their name is in static_kwargkey.
# random elements are random because they are not supposed to matter for
# equality at all.
args_schema = (dts, random.randint(1, 1000000), static_arg_val)
kwargs_schema = {
"ignoredkwarg": random.randint(1, 1000000),
"statickwarg": static_kwarg_val,
}
return [
(
has_static_arg,
has_static_kwarg,
OpSchema(
op=None,
args_schema=args_schema,
kwargs_schema=kwargs_schema,
schema_info=si,
),
)
for (has_static_arg, has_static_kwarg, si) in annotated_schemas
]
for lhs_has_static_arg, lhs_has_static_kwarg, lhs in _get_sample_op_schemas(
1, 2
):
# Static arg/kwarg both match
for rhs_has_static_arg, rhs_has_static_kwarg, rhs in _get_sample_op_schemas(
1, 2
):
if (
lhs_has_static_arg == rhs_has_static_arg
and lhs_has_static_kwarg == rhs_has_static_kwarg
):
self.assertEqual(lhs, rhs)
else:
self.assertNotEqual(lhs, rhs)
# Static arg mismatch
for rhs_has_static_arg, rhs_has_static_kwarg, rhs in _get_sample_op_schemas(
3, 2
):
if (
lhs_has_static_arg
or rhs_has_static_arg
or lhs_has_static_kwarg != rhs_has_static_kwarg
):
self.assertNotEqual(lhs, rhs)
else:
self.assertEqual(lhs, rhs)
# Static kwarg mismatch
for rhs_has_static_arg, rhs_has_static_kwarg, rhs in _get_sample_op_schemas(
1, 3
):
if (
lhs_has_static_kwarg
or rhs_has_static_kwarg
or lhs_has_static_arg != rhs_has_static_arg
):
self.assertNotEqual(lhs, rhs)
else:
self.assertEqual(lhs, rhs)
# Static arg/kwarg both mismatch
for rhs_has_static_arg, rhs_has_static_kwarg, rhs in _get_sample_op_schemas(
3, 4
):
if (
lhs_has_static_arg
or rhs_has_static_arg
or lhs_has_static_kwarg
or rhs_has_static_kwarg
):
self.assertNotEqual(lhs, rhs)
else:
self.assertEqual(lhs, rhs)
if __name__ == "__main__":
run_tests()
|
TestOpSchema
|
python
|
spack__spack
|
lib/spack/spack/vendor/ruamel/yaml/composer.py
|
{
"start": 649,
"end": 8439
}
|
class ____:
def __init__(self, loader=None):
# type: (Any) -> None
self.loader = loader
if self.loader is not None and getattr(self.loader, '_composer', None) is None:
self.loader._composer = self
self.anchors = {} # type: Dict[Any, Any]
@property
def parser(self):
# type: () -> Any
if hasattr(self.loader, 'typ'):
self.loader.parser
return self.loader._parser
@property
def resolver(self):
# type: () -> Any
# assert self.loader._resolver is not None
if hasattr(self.loader, 'typ'):
self.loader.resolver
return self.loader._resolver
def check_node(self):
# type: () -> Any
# Drop the STREAM-START event.
if self.parser.check_event(StreamStartEvent):
self.parser.get_event()
# If there are more documents available?
return not self.parser.check_event(StreamEndEvent)
def get_node(self):
# type: () -> Any
# Get the root node of the next document.
if not self.parser.check_event(StreamEndEvent):
return self.compose_document()
def get_single_node(self):
# type: () -> Any
# Drop the STREAM-START event.
self.parser.get_event()
# Compose a document if the stream is not empty.
document = None # type: Any
if not self.parser.check_event(StreamEndEvent):
document = self.compose_document()
# Ensure that the stream contains no more documents.
if not self.parser.check_event(StreamEndEvent):
event = self.parser.get_event()
raise ComposerError(
'expected a single document in the stream',
document.start_mark,
'but found another document',
event.start_mark,
)
# Drop the STREAM-END event.
self.parser.get_event()
return document
def compose_document(self):
# type: (Any) -> Any
# Drop the DOCUMENT-START event.
self.parser.get_event()
# Compose the root node.
node = self.compose_node(None, None)
# Drop the DOCUMENT-END event.
self.parser.get_event()
self.anchors = {}
return node
def return_alias(self, a):
# type: (Any) -> Any
return a
def compose_node(self, parent, index):
# type: (Any, Any) -> Any
if self.parser.check_event(AliasEvent):
event = self.parser.get_event()
alias = event.anchor
if alias not in self.anchors:
raise ComposerError(
None,
None,
_F('found undefined alias {alias!r}', alias=alias),
event.start_mark,
)
return self.return_alias(self.anchors[alias])
event = self.parser.peek_event()
anchor = event.anchor
if anchor is not None: # have an anchor
if anchor in self.anchors:
# raise ComposerError(
# "found duplicate anchor %r; first occurrence"
# % (anchor), self.anchors[anchor].start_mark,
# "second occurrence", event.start_mark)
ws = (
'\nfound duplicate anchor {!r}\nfirst occurrence {}\nsecond occurrence '
'{}'.format((anchor), self.anchors[anchor].start_mark, event.start_mark)
)
warnings.warn(ws, ReusedAnchorWarning)
self.resolver.descend_resolver(parent, index)
if self.parser.check_event(ScalarEvent):
node = self.compose_scalar_node(anchor)
elif self.parser.check_event(SequenceStartEvent):
node = self.compose_sequence_node(anchor)
elif self.parser.check_event(MappingStartEvent):
node = self.compose_mapping_node(anchor)
self.resolver.ascend_resolver()
return node
def compose_scalar_node(self, anchor):
# type: (Any) -> Any
event = self.parser.get_event()
tag = event.tag
if tag is None or tag == '!':
tag = self.resolver.resolve(ScalarNode, event.value, event.implicit)
node = ScalarNode(
tag,
event.value,
event.start_mark,
event.end_mark,
style=event.style,
comment=event.comment,
anchor=anchor,
)
if anchor is not None:
self.anchors[anchor] = node
return node
def compose_sequence_node(self, anchor):
# type: (Any) -> Any
start_event = self.parser.get_event()
tag = start_event.tag
if tag is None or tag == '!':
tag = self.resolver.resolve(SequenceNode, None, start_event.implicit)
node = SequenceNode(
tag,
[],
start_event.start_mark,
None,
flow_style=start_event.flow_style,
comment=start_event.comment,
anchor=anchor,
)
if anchor is not None:
self.anchors[anchor] = node
index = 0
while not self.parser.check_event(SequenceEndEvent):
node.value.append(self.compose_node(node, index))
index += 1
end_event = self.parser.get_event()
if node.flow_style is True and end_event.comment is not None:
if node.comment is not None:
nprint(
'Warning: unexpected end_event commment in sequence '
'node {}'.format(node.flow_style)
)
node.comment = end_event.comment
node.end_mark = end_event.end_mark
self.check_end_doc_comment(end_event, node)
return node
def compose_mapping_node(self, anchor):
# type: (Any) -> Any
start_event = self.parser.get_event()
tag = start_event.tag
if tag is None or tag == '!':
tag = self.resolver.resolve(MappingNode, None, start_event.implicit)
node = MappingNode(
tag,
[],
start_event.start_mark,
None,
flow_style=start_event.flow_style,
comment=start_event.comment,
anchor=anchor,
)
if anchor is not None:
self.anchors[anchor] = node
while not self.parser.check_event(MappingEndEvent):
# key_event = self.parser.peek_event()
item_key = self.compose_node(node, None)
# if item_key in node.value:
# raise ComposerError("while composing a mapping",
# start_event.start_mark,
# "found duplicate key", key_event.start_mark)
item_value = self.compose_node(node, item_key)
# node.value[item_key] = item_value
node.value.append((item_key, item_value))
end_event = self.parser.get_event()
if node.flow_style is True and end_event.comment is not None:
node.comment = end_event.comment
node.end_mark = end_event.end_mark
self.check_end_doc_comment(end_event, node)
return node
def check_end_doc_comment(self, end_event, node):
# type: (Any, Any) -> None
if end_event.comment and end_event.comment[1]:
# pre comments on an end_event, no following to move to
if node.comment is None:
node.comment = [None, None]
assert not isinstance(node, ScalarEvent)
# this is a post comment on a mapping node, add as third element
# in the list
node.comment.append(end_event.comment[1])
end_event.comment[1] = None
|
Composer
|
python
|
ray-project__ray
|
doc/source/ray-core/doc_code/actors.py
|
{
"start": 87,
"end": 529
}
|
class ____:
async def f(self):
try:
await asyncio.sleep(5)
except asyncio.CancelledError:
print("Actor task canceled.")
actor = Actor.remote()
ref = actor.f.remote()
# Wait until task is scheduled.
time.sleep(1)
ray.cancel(ref)
try:
ray.get(ref)
except ray.exceptions.RayTaskError:
print("Object reference was cancelled.")
# __cancel_end__
# __enable_task_events_start__
@ray.remote
|
Actor
|
python
|
ray-project__ray
|
rllib/examples/_old_api_stack/models/shared_weights_model.py
|
{
"start": 2044,
"end": 3723
}
|
class ____(TFModelV2):
"""Example of weight sharing between two different TFModelV2s.
NOTE: This will only work for tf1 (static graph). When running with
config.framework_str=tf2, use TF2SharedWeightsModel, instead!
Here, we share the variables defined in the 'shared' variable scope
by entering it explicitly with tf1.AUTO_REUSE. This creates the
variables for the 'fc1' layer in a global scope called 'shared'
(outside of the Policy's normal variable scope).
"""
def __init__(
self, observation_space, action_space, num_outputs, model_config, name
):
super().__init__(
observation_space, action_space, num_outputs, model_config, name
)
inputs = tf.keras.layers.Input(observation_space.shape)
with tf1.variable_scope(
tf1.VariableScope(tf1.AUTO_REUSE, "shared"),
reuse=tf1.AUTO_REUSE,
auxiliary_name_scope=False,
):
last_layer = tf.keras.layers.Dense(
units=64, activation=tf.nn.relu, name="fc1"
)(inputs)
output = tf.keras.layers.Dense(
units=num_outputs, activation=None, name="fc_out"
)(last_layer)
vf = tf.keras.layers.Dense(units=1, activation=None, name="value_out")(
last_layer
)
self.base_model = tf.keras.models.Model(inputs, [output, vf])
@override(ModelV2)
def forward(self, input_dict, state, seq_lens):
out, self._value_out = self.base_model(input_dict["obs"])
return out, []
@override(ModelV2)
def value_function(self):
return tf.reshape(self._value_out, [-1])
|
SharedWeightsModel1
|
python
|
ray-project__ray
|
python/ray/tune/logger/json.py
|
{
"start": 2042,
"end": 4179
}
|
class ____(LoggerCallback):
"""Logs trial results in json format.
Also writes to a results file and param.json file when results or
configurations are updated. Experiments must be executed with the
JsonLoggerCallback to be compatible with the ExperimentAnalysis tool.
"""
_SAVED_FILE_TEMPLATES = [EXPR_RESULT_FILE, EXPR_PARAM_FILE, EXPR_PARAM_PICKLE_FILE]
def __init__(self):
self._trial_configs: Dict["Trial", Dict] = {}
self._trial_files: Dict["Trial", TextIO] = {}
def log_trial_start(self, trial: "Trial"):
if trial in self._trial_files:
self._trial_files[trial].close()
# Update config
self.update_config(trial, trial.config)
# Make sure logdir exists
trial.init_local_path()
local_file = Path(trial.local_path, EXPR_RESULT_FILE)
# Resume the file from remote storage.
self._restore_from_remote(EXPR_RESULT_FILE, trial)
self._trial_files[trial] = local_file.open("at")
def log_trial_result(self, iteration: int, trial: "Trial", result: Dict):
if trial not in self._trial_files:
self.log_trial_start(trial)
json.dump(result, self._trial_files[trial], cls=SafeFallbackEncoder)
self._trial_files[trial].write("\n")
self._trial_files[trial].flush()
def log_trial_end(self, trial: "Trial", failed: bool = False):
if trial not in self._trial_files:
return
self._trial_files[trial].close()
del self._trial_files[trial]
def update_config(self, trial: "Trial", config: Dict):
self._trial_configs[trial] = config
config_out = Path(trial.local_path, EXPR_PARAM_FILE)
with config_out.open("w") as f:
json.dump(
self._trial_configs[trial],
f,
indent=2,
sort_keys=True,
cls=SafeFallbackEncoder,
)
config_pkl = Path(trial.local_path, EXPR_PARAM_PICKLE_FILE)
with config_pkl.open("wb") as f:
cloudpickle.dump(self._trial_configs[trial], f)
|
JsonLoggerCallback
|
python
|
spack__spack
|
lib/spack/spack/cmd/create.py
|
{
"start": 19168,
"end": 20037
}
|
class ____(PackageTemplate):
"""Provides appropriate overrides for octave packages"""
base_class_name = "OctavePackage"
package_class_import = "from spack_repo.builtin.build_systems.octave import OctavePackage"
dependencies = """\
extends("octave")
# FIXME: Add additional dependencies if required.
# depends_on("octave-foo", type=("build", "run"))"""
def __init__(self, name, url, versions, languages: List[str]):
# If the user provided `--name octave-splines`, don't rename it
# octave-octave-splines
if not name.startswith("octave-"):
# Make it more obvious that we are renaming the package
tty.msg("Changing package name from {0} to octave-{0}".format(name))
name = "octave-{0}".format(name)
super().__init__(name, url, versions, languages)
|
OctavePackageTemplate
|
python
|
qdrant__qdrant-client
|
tools/async_client_generator/base_generator.py
|
{
"start": 86,
"end": 425
}
|
class ____:
def __init__(self) -> None:
self.transformers: list[ast.NodeTransformer] = []
def generate(self, code: str) -> str:
nodes = ast.parse(code)
for transformer in self.transformers:
nodes = transformer.visit(nodes)
return AUTOGEN_WARNING_MESSAGE + ast.unparse(nodes)
|
BaseGenerator
|
python
|
conda__conda
|
conda/common/configuration.py
|
{
"start": 3699,
"end": 4239
}
|
class ____(ValidationError):
def __init__(
self, parameter_name, parameter_value, source, wrong_type, valid_types, msg=None
):
self.wrong_type = wrong_type
self.valid_types = valid_types
if msg is None:
msg = (
f"Parameter {parameter_name} = {parameter_value!r} declared in {source} has type {wrong_type}.\n"
f"Valid types:\n{pretty_list(valid_types)}"
)
super().__init__(parameter_name, parameter_value, source, msg=msg)
|
InvalidTypeError
|
python
|
pandas-dev__pandas
|
asv_bench/benchmarks/io/csv.py
|
{
"start": 14485,
"end": 15168
}
|
class ____(StringIORewind):
params = ([True, False], ["c", "python"])
param_names = ["do_cache", "engine"]
def setup(self, do_cache, engine):
data = ("\n".join([f"10/{year}" for year in range(2000, 2100)]) + "\n") * 10
self.StringIO_input = StringIO(data)
def time_read_csv_cached(self, do_cache, engine):
try:
read_csv(
self.data(self.StringIO_input),
engine=engine,
header=None,
parse_dates=[0],
cache_dates=do_cache,
)
except TypeError:
# cache_dates is a new keyword in 0.25
pass
|
ReadCSVCachedParseDates
|
python
|
realpython__materials
|
python-sqlite-sqlalchemy/project/examples/example_3/app/models.py
|
{
"start": 2276,
"end": 2654
}
|
class ____(db.Model):
__tablename__ = "albums"
album_id = db.Column("AlbumId", db.Integer, primary_key=True)
title = db.Column("Title", db.String(160), nullable=False)
artist_id = db.Column(
"ArtistId",
db.ForeignKey("artists.ArtistId"),
nullable=False,
index=True,
)
tracks = db.relationship("Track", backref="album")
|
Album
|
python
|
django__django
|
django/forms/widgets.py
|
{
"start": 2830,
"end": 7982
}
|
class ____:
def __init__(self, media=None, css=None, js=None):
if media is not None:
css = getattr(media, "css", {})
js = getattr(media, "js", [])
else:
if css is None:
css = {}
if js is None:
js = []
self._css_lists = [css]
self._js_lists = [js]
def __repr__(self):
return "Media(css=%r, js=%r)" % (self._css, self._js)
def __str__(self):
return self.render()
@property
def _css(self):
css = defaultdict(list)
for css_list in self._css_lists:
for medium, sublist in css_list.items():
css[medium].append(sublist)
return {medium: self.merge(*lists) for medium, lists in css.items()}
@property
def _js(self):
return self.merge(*self._js_lists)
def render(self):
return mark_safe(
"\n".join(
chain.from_iterable(
getattr(self, "render_" + name)() for name in MEDIA_TYPES
)
)
)
def render_js(self):
return [
(
path.__html__()
if hasattr(path, "__html__")
else format_html('<script src="{}"></script>', self.absolute_path(path))
)
for path in self._js
]
def render_css(self):
# To keep rendering order consistent, we can't just iterate over
# items(). We need to sort the keys, and iterate over the sorted list.
media = sorted(self._css)
return chain.from_iterable(
[
(
path.__html__()
if hasattr(path, "__html__")
else format_html(
'<link href="{}" media="{}" rel="stylesheet">',
self.absolute_path(path),
medium,
)
)
for path in self._css[medium]
]
for medium in media
)
def absolute_path(self, path):
"""
Given a relative or absolute path to a static asset, return an absolute
path. An absolute path will be returned unchanged while a relative path
will be passed to django.templatetags.static.static().
"""
if path.startswith(("http://", "https://", "/")):
return path
return static(path)
def __getitem__(self, name):
"""Return a Media object that only contains media of the given type."""
if name in MEDIA_TYPES:
return Media(**{str(name): getattr(self, "_" + name)})
raise KeyError('Unknown media type "%s"' % name)
@staticmethod
def merge(*lists):
"""
Merge lists while trying to keep the relative order of the elements.
Warn if the lists have the same elements in a different relative order.
For static assets it can be important to have them included in the DOM
in a certain order. In JavaScript you may not be able to reference a
global or in CSS you might want to override a style.
"""
ts = TopologicalSorter()
for head, *tail in filter(None, lists):
ts.add(head) # Ensure that the first items are included.
for item in tail:
if head != item: # Avoid circular dependency to self.
ts.add(item, head)
head = item
try:
return list(ts.static_order())
except CycleError:
warnings.warn(
"Detected duplicate Media files in an opposite order: {}".format(
", ".join(repr(list_) for list_ in lists)
),
MediaOrderConflictWarning,
)
return list(dict.fromkeys(chain.from_iterable(filter(None, lists))))
def __add__(self, other):
combined = Media()
combined._css_lists = self._css_lists[:]
combined._js_lists = self._js_lists[:]
for item in other._css_lists:
if item and item not in self._css_lists:
combined._css_lists.append(item)
for item in other._js_lists:
if item and item not in self._js_lists:
combined._js_lists.append(item)
return combined
def media_property(cls):
def _media(self):
# Get the media property of the superclass, if it exists
sup_cls = super(cls, self)
try:
base = sup_cls.media
except AttributeError:
base = Media()
# Get the media definition for this class
definition = getattr(cls, "Media", None)
if definition:
extend = getattr(definition, "extend", True)
if extend:
if extend is True:
m = base
else:
m = Media()
for medium in extend:
m += base[medium]
return m + Media(definition)
return Media(definition)
return base
return property(_media)
|
Media
|
python
|
fastapi__sqlmodel
|
docs_src/tutorial/fastapi/delete/tutorial001_py310.py
|
{
"start": 434,
"end": 2559
}
|
class ____(SQLModel):
name: str | None = None
secret_name: str | None = None
age: int | None = None
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
connect_args = {"check_same_thread": False}
engine = create_engine(sqlite_url, echo=True, connect_args=connect_args)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
app = FastAPI()
@app.on_event("startup")
def on_startup():
create_db_and_tables()
@app.post("/heroes/", response_model=HeroPublic)
def create_hero(hero: HeroCreate):
with Session(engine) as session:
db_hero = Hero.model_validate(hero)
session.add(db_hero)
session.commit()
session.refresh(db_hero)
return db_hero
@app.get("/heroes/", response_model=list[HeroPublic])
def read_heroes(offset: int = 0, limit: int = Query(default=100, le=100)):
with Session(engine) as session:
heroes = session.exec(select(Hero).offset(offset).limit(limit)).all()
return heroes
@app.get("/heroes/{hero_id}", response_model=HeroPublic)
def read_hero(hero_id: int):
with Session(engine) as session:
hero = session.get(Hero, hero_id)
if not hero:
raise HTTPException(status_code=404, detail="Hero not found")
return hero
@app.patch("/heroes/{hero_id}", response_model=HeroPublic)
def update_hero(hero_id: int, hero: HeroUpdate):
with Session(engine) as session:
db_hero = session.get(Hero, hero_id)
if not db_hero:
raise HTTPException(status_code=404, detail="Hero not found")
hero_data = hero.model_dump(exclude_unset=True)
db_hero.sqlmodel_update(hero_data)
session.add(db_hero)
session.commit()
session.refresh(db_hero)
return db_hero
@app.delete("/heroes/{hero_id}")
def delete_hero(hero_id: int):
with Session(engine) as session:
hero = session.get(Hero, hero_id)
if not hero:
raise HTTPException(status_code=404, detail="Hero not found")
session.delete(hero)
session.commit()
return {"ok": True}
|
HeroUpdate
|
python
|
gevent__gevent
|
src/greentest/3.10/test_ftplib.py
|
{
"start": 3571,
"end": 9048
}
|
class ____(asynchat.async_chat):
dtp_handler = DummyDTPHandler
def __init__(self, conn, encoding=DEFAULT_ENCODING):
asynchat.async_chat.__init__(self, conn)
# tells the socket to handle urgent data inline (ABOR command)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_OOBINLINE, 1)
self.set_terminator(b"\r\n")
self.in_buffer = []
self.dtp = None
self.last_received_cmd = None
self.last_received_data = ''
self.next_response = ''
self.next_data = None
self.rest = None
self.next_retr_data = RETR_DATA
self.push('220 welcome')
self.encoding = encoding
# We use this as the string IPv4 address to direct the client
# to in response to a PASV command. To test security behavior.
# https://bugs.python.org/issue43285/.
self.fake_pasv_server_ip = '252.253.254.255'
def collect_incoming_data(self, data):
self.in_buffer.append(data)
def found_terminator(self):
line = b''.join(self.in_buffer).decode(self.encoding)
self.in_buffer = []
if self.next_response:
self.push(self.next_response)
self.next_response = ''
cmd = line.split(' ')[0].lower()
self.last_received_cmd = cmd
space = line.find(' ')
if space != -1:
arg = line[space + 1:]
else:
arg = ""
if hasattr(self, 'cmd_' + cmd):
method = getattr(self, 'cmd_' + cmd)
method(arg)
else:
self.push('550 command "%s" not understood.' %cmd)
def handle_error(self):
default_error_handler()
def push(self, data):
asynchat.async_chat.push(self, data.encode(self.encoding) + b'\r\n')
def cmd_port(self, arg):
addr = list(map(int, arg.split(',')))
ip = '%d.%d.%d.%d' %tuple(addr[:4])
port = (addr[4] * 256) + addr[5]
s = socket.create_connection((ip, port), timeout=TIMEOUT)
self.dtp = self.dtp_handler(s, baseclass=self)
self.push('200 active data connection established')
def cmd_pasv(self, arg):
with socket.create_server((self.socket.getsockname()[0], 0)) as sock:
sock.settimeout(TIMEOUT)
port = sock.getsockname()[1]
ip = self.fake_pasv_server_ip
ip = ip.replace('.', ','); p1 = port / 256; p2 = port % 256
self.push('227 entering passive mode (%s,%d,%d)' %(ip, p1, p2))
conn, addr = sock.accept()
self.dtp = self.dtp_handler(conn, baseclass=self)
def cmd_eprt(self, arg):
af, ip, port = arg.split(arg[0])[1:-1]
port = int(port)
s = socket.create_connection((ip, port), timeout=TIMEOUT)
self.dtp = self.dtp_handler(s, baseclass=self)
self.push('200 active data connection established')
def cmd_epsv(self, arg):
with socket.create_server((self.socket.getsockname()[0], 0),
family=socket.AF_INET6) as sock:
sock.settimeout(TIMEOUT)
port = sock.getsockname()[1]
self.push('229 entering extended passive mode (|||%d|)' %port)
conn, addr = sock.accept()
self.dtp = self.dtp_handler(conn, baseclass=self)
def cmd_echo(self, arg):
# sends back the received string (used by the test suite)
self.push(arg)
def cmd_noop(self, arg):
self.push('200 noop ok')
def cmd_user(self, arg):
self.push('331 username ok')
def cmd_pass(self, arg):
self.push('230 password ok')
def cmd_acct(self, arg):
self.push('230 acct ok')
def cmd_rnfr(self, arg):
self.push('350 rnfr ok')
def cmd_rnto(self, arg):
self.push('250 rnto ok')
def cmd_dele(self, arg):
self.push('250 dele ok')
def cmd_cwd(self, arg):
self.push('250 cwd ok')
def cmd_size(self, arg):
self.push('250 1000')
def cmd_mkd(self, arg):
self.push('257 "%s"' %arg)
def cmd_rmd(self, arg):
self.push('250 rmd ok')
def cmd_pwd(self, arg):
self.push('257 "pwd ok"')
def cmd_type(self, arg):
self.push('200 type ok')
def cmd_quit(self, arg):
self.push('221 quit ok')
self.close()
def cmd_abor(self, arg):
self.push('226 abor ok')
def cmd_stor(self, arg):
self.push('125 stor ok')
def cmd_rest(self, arg):
self.rest = arg
self.push('350 rest ok')
def cmd_retr(self, arg):
self.push('125 retr ok')
if self.rest is not None:
offset = int(self.rest)
else:
offset = 0
self.dtp.push(self.next_retr_data[offset:])
self.dtp.close_when_done()
self.rest = None
def cmd_list(self, arg):
self.push('125 list ok')
self.dtp.push(LIST_DATA)
self.dtp.close_when_done()
def cmd_nlst(self, arg):
self.push('125 nlst ok')
self.dtp.push(NLST_DATA)
self.dtp.close_when_done()
def cmd_opts(self, arg):
self.push('200 opts ok')
def cmd_mlsd(self, arg):
self.push('125 mlsd ok')
self.dtp.push(MLSD_DATA)
self.dtp.close_when_done()
def cmd_setlongretr(self, arg):
# For testing. Next RETR will return long line.
self.next_retr_data = 'x' * int(arg)
self.push('125 setlongretr ok')
|
DummyFTPHandler
|
python
|
scikit-learn__scikit-learn
|
sklearn/utils/_repr_html/base.py
|
{
"start": 217,
"end": 4773
}
|
class ____:
"""Mixin class allowing to generate a link to the API documentation.
This mixin relies on three attributes:
- `_doc_link_module`: it corresponds to the root module (e.g. `sklearn`). Using this
mixin, the default value is `sklearn`.
- `_doc_link_template`: it corresponds to the template used to generate the
link to the API documentation. Using this mixin, the default value is
`"https://scikit-learn.org/{version_url}/modules/generated/
{estimator_module}.{estimator_name}.html"`.
- `_doc_link_url_param_generator`: it corresponds to a function that generates the
parameters to be used in the template when the estimator module and name are not
sufficient.
The method :meth:`_get_doc_link` generates the link to the API documentation for a
given estimator.
This mixin provides all the necessary states for
:func:`sklearn.utils.estimator_html_repr` to generate a link to the API
documentation for the estimator HTML diagram.
Examples
--------
If the default values for `_doc_link_module`, `_doc_link_template` are not suitable,
then you can override them and provide a method to generate the URL parameters:
>>> from sklearn.base import BaseEstimator
>>> doc_link_template = "https://address.local/{single_param}.html"
>>> def url_param_generator(estimator):
... return {"single_param": estimator.__class__.__name__}
>>> class MyEstimator(BaseEstimator):
... # use "builtins" since it is the associated module when declaring
... # the class in a docstring
... _doc_link_module = "builtins"
... _doc_link_template = doc_link_template
... _doc_link_url_param_generator = url_param_generator
>>> estimator = MyEstimator()
>>> estimator._get_doc_link()
'https://address.local/MyEstimator.html'
If instead of overriding the attributes inside the class definition, you want to
override a class instance, you can use `types.MethodType` to bind the method to the
instance:
>>> import types
>>> estimator = BaseEstimator()
>>> estimator._doc_link_template = doc_link_template
>>> estimator._doc_link_url_param_generator = types.MethodType(
... url_param_generator, estimator)
>>> estimator._get_doc_link()
'https://address.local/BaseEstimator.html'
"""
_doc_link_module = "sklearn"
_doc_link_url_param_generator = None
@property
def _doc_link_template(self):
sklearn_version = parse_version(__version__)
if sklearn_version.dev is None:
version_url = f"{sklearn_version.major}.{sklearn_version.minor}"
else:
version_url = "dev"
return getattr(
self,
"__doc_link_template",
(
f"https://scikit-learn.org/{version_url}/modules/generated/"
"{estimator_module}.{estimator_name}.html"
),
)
@_doc_link_template.setter
def _doc_link_template(self, value):
setattr(self, "__doc_link_template", value)
def _get_doc_link(self):
"""Generates a link to the API documentation for a given estimator.
This method generates the link to the estimator's documentation page
by using the template defined by the attribute `_doc_link_template`.
Returns
-------
url : str
The URL to the API documentation for this estimator. If the estimator does
not belong to module `_doc_link_module`, the empty string (i.e. `""`) is
returned.
"""
if self.__class__.__module__.split(".")[0] != self._doc_link_module:
return ""
if self._doc_link_url_param_generator is None:
estimator_name = self.__class__.__name__
# Construct the estimator's module name, up to the first private submodule.
# This works because in scikit-learn all public estimators are exposed at
# that level, even if they actually live in a private sub-module.
estimator_module = ".".join(
itertools.takewhile(
lambda part: not part.startswith("_"),
self.__class__.__module__.split("."),
)
)
return self._doc_link_template.format(
estimator_module=estimator_module, estimator_name=estimator_name
)
return self._doc_link_template.format(**self._doc_link_url_param_generator())
|
_HTMLDocumentationLinkMixin
|
python
|
psf__requests
|
src/requests/exceptions.py
|
{
"start": 2837,
"end": 2948
}
|
class ____(RequestException, ValueError):
"""The URL scheme (e.g. http or https) is missing."""
|
MissingSchema
|
python
|
dagster-io__dagster
|
python_modules/dagster-graphql/dagster_graphql/schema/pipelines/logger.py
|
{
"start": 273,
"end": 1450
}
|
class ____(graphene.ObjectType):
name = graphene.NonNull(graphene.String)
description = graphene.String()
configField = graphene.Field(GrapheneConfigTypeField)
class Meta:
name = "Logger"
def __init__(
self,
get_config_type: Callable[[str], ConfigTypeSnap],
logger_def_snap: LoggerDefSnap,
):
super().__init__()
self._get_config_type = get_config_type
self._logger_def_snap = check.inst_param(logger_def_snap, "logger_def_snap", LoggerDefSnap)
self.name = logger_def_snap.name
self.description = logger_def_snap.description
def resolve_configField(self, _: ResolveInfo):
if self._logger_def_snap.config_field_snap:
try:
# config type may not be present if mode config mapped, null out gracefully
self._get_config_type(self._logger_def_snap.config_field_snap.type_key)
except KeyError:
return None
return GrapheneConfigTypeField(
self._get_config_type,
field_snap=self._logger_def_snap.config_field_snap,
)
return None
|
GrapheneLogger
|
python
|
getsentry__sentry
|
tests/sentry/web/frontend/test_auth_logout.py
|
{
"start": 220,
"end": 1870
}
|
class ____(TestCase):
@cached_property
def path(self) -> str:
return reverse("sentry-logout")
def test_get_shows_page(self) -> None:
self.login_as(self.user)
resp = self.client.get(self.path)
assert resp.status_code == 200
assert self.client.session.keys(), "Not logged out yet"
def test_logs_user_out(self) -> None:
self.login_as(self.user)
resp = self.client.post(self.path)
assert resp.status_code == 302
assert list(self.client.session.keys()) == []
def test_same_behavior_with_anonymous_user(self) -> None:
resp = self.client.post(self.path)
assert resp.status_code == 302
assert list(self.client.session.keys()) == []
def test_redirects_to_relative_next_url(self) -> None:
self.login_as(self.user)
next = "/welcome"
resp = self.client.post(self.path + "?next=" + next)
assert resp.status_code == 302
assert resp.get("Location", "").endswith(next)
def test_doesnt_redirect_to_external_next_url(self) -> None:
next = "http://example.com"
resp = self.client.post(self.path + "?next=" + quote(next))
self.assertRedirects(resp, "/auth/login/")
resp = self.client.post(self.path + "?next=" + quote("http:1234556"))
self.assertRedirects(resp, "/auth/login/")
resp = self.client.post(self.path + "?next=" + quote("///example.com"))
self.assertRedirects(resp, "/auth/login/")
resp = self.client.post(self.path + "?next=" + quote("http:///example.com"))
self.assertRedirects(resp, "/auth/login/")
|
AuthLogoutTest
|
python
|
python-openxml__python-docx
|
src/docx/oxml/simpletypes.py
|
{
"start": 1850,
"end": 2173
}
|
class ____(BaseSimpleType):
@classmethod
def convert_from_xml(cls, str_value: str) -> int:
return int(str_value)
@classmethod
def convert_to_xml(cls, value: int) -> str:
return str(value)
@classmethod
def validate(cls, value: Any) -> None:
cls.validate_int(value)
|
BaseIntType
|
python
|
kamyu104__LeetCode-Solutions
|
Python/minimum-changes-to-make-k-semi-palindromes.py
|
{
"start": 106,
"end": 1595
}
|
class ____(object):
def minimumChanges(self, s, k):
"""
:type s: str
:type k: int
:rtype: int
"""
divisors = [[] for _ in xrange(len(s)+1)]
for i in xrange(1, len(divisors)): # Time: O(nlogn), Space: O(nlogn)
for j in xrange(i, len(divisors), i):
divisors[j].append(i)
dp = [[{} for _ in xrange(len(s))] for _ in xrange(len(s))]
for l in xrange(1, len(s)+1): # Time: O(n * nlogn + n^3), Space: O(n * nlogn)
for left in xrange(len(s)-l+1):
right = left+l-1
for d in divisors[l]:
dp[left][right][d] = (dp[left+d][right-d][d] if left+d < right-d else 0)+sum(s[left+i] != s[(right-(d-1))+i] for i in xrange(d))
dp2 = [[min(dp[i][j][d] for d in divisors[j-i+1] if d != j-i+1) if i < j else 0 for j in xrange(len(s))] for i in xrange(len(s))] # Time: O(n^2), Space: O(n^2)
dp3 = [len(s)]*(len(s)+1)
dp3[0] = 0
for l in xrange(k): # Time: O(k * n^2), Space: O(n)
new_dp3 = [len(s)]*(len(s)+1)
for i in xrange(len(s)):
for j in xrange(l*2, i): # optimized for the fact that the length of semi-palindrome is at least 2
new_dp3[i+1]= min(new_dp3[i+1], dp3[j]+dp2[j][i])
dp3 = new_dp3
return dp3[len(s)]
# Time: O(n * nlogn + n^3 + n^2 * k) = O(n^3)
# Space: O(n * nlogn) = O(n^2 * logn)
# number theory, dp
|
Solution
|
python
|
pytorch__pytorch
|
torch/_dynamo/source.py
|
{
"start": 33523,
"end": 34384
}
|
class ____(Source):
"""Points to the actual `torch` module - used instead of GlobalSource
in case the user has overridden `torch` in their local namespace"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
from .guards import GuardBuilder, install_guard
install_guard(self.make_guard(GuardBuilder.ID_MATCH))
def name(self) -> str:
return "__import__('torch')"
def reconstruct(self, codegen: "PyCodegen") -> None:
codegen.extend_output(
[
codegen.create_load_const(0), # level
create_build_tuple(0), # fromlist
codegen.create_import_name("torch"),
]
)
def guard_source(self) -> GuardSource:
return GuardSource.GLOBAL
@dataclasses.dataclass(frozen=True)
|
TorchSource
|
python
|
pennersr__django-allauth
|
allauth/socialaccount/providers/globus/provider.py
|
{
"start": 514,
"end": 1251
}
|
class ____(OAuth2Provider):
id = "globus"
name = "Globus"
account_class = GlobusAccount
oauth2_adapter_class = GlobusOAuth2Adapter
def extract_uid(self, data):
if "sub" not in data:
raise ProviderException("Globus OAuth error", data)
return str(data["sub"])
def extract_common_fields(self, data):
return dict(
email=data.get("email"),
username=data.get("preferred_username"),
name=data.get("name"),
)
def get_default_scope(self):
scope = ["openid", "profile", "offline_access"]
if app_settings.QUERY_EMAIL:
scope.append("email")
return scope
provider_classes = [GlobusProvider]
|
GlobusProvider
|
python
|
huggingface__transformers
|
src/transformers/models/umt5/modeling_umt5.py
|
{
"start": 5427,
"end": 6106
}
|
class ____(nn.Module):
def __init__(self, config: UMT5Config):
super().__init__()
if config.is_gated_act:
self.DenseReluDense = UMT5DenseGatedActDense(config)
else:
self.DenseReluDense = UMT5DenseActDense(config)
self.layer_norm = UMT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(self, hidden_states):
forwarded_states = self.layer_norm(hidden_states)
forwarded_states = self.DenseReluDense(forwarded_states)
hidden_states = hidden_states + self.dropout(forwarded_states)
return hidden_states
|
UMT5LayerFF
|
python
|
ray-project__ray
|
python/ray/data/tests/test_iceberg.py
|
{
"start": 27747,
"end": 34488
}
|
class ____:
"""Test basic write operations for APPEND, UPSERT, and OVERWRITE modes."""
def test_append_basic(self, clean_table):
"""Test basic APPEND mode - add new rows without schema changes."""
initial_data = _create_typed_dataframe(
{"col_a": [1, 2], "col_b": ["row_1", "row_2"], "col_c": [1, 2]}
)
_write_to_iceberg(initial_data)
append_data = _create_typed_dataframe(
{"col_a": [3, 4], "col_b": ["row_3", "row_4"], "col_c": [3, 4]}
)
_write_to_iceberg(append_data, mode=SaveMode.APPEND)
result_df = _read_from_iceberg(sort_by="col_a")
expected = _create_typed_dataframe(
{
"col_a": [1, 2, 3, 4],
"col_b": ["row_1", "row_2", "row_3", "row_4"],
"col_c": [1, 2, 3, 4],
}
)
assert rows_same(result_df, expected)
def test_upsert_basic(self, clean_table):
"""Test basic upsert - update existing rows and insert new ones."""
initial_data = _create_typed_dataframe(
{
"col_a": [1, 2, 3],
"col_b": ["initial_1", "initial_2", "initial_3"],
"col_c": [1, 2, 3],
}
)
_write_to_iceberg(initial_data)
upsert_data = _create_typed_dataframe(
{
"col_a": [2, 3, 4],
"col_b": ["updated_2", "updated_3", "new_4"],
"col_c": [2, 3, 4],
}
)
_write_to_iceberg(
upsert_data, mode=SaveMode.UPSERT, upsert_kwargs={"join_cols": ["col_a"]}
)
result_df = _read_from_iceberg(sort_by="col_a")
expected = _create_typed_dataframe(
{
"col_a": [1, 2, 3, 4],
"col_b": ["initial_1", "updated_2", "updated_3", "new_4"],
"col_c": [1, 2, 3, 4],
}
)
assert rows_same(result_df, expected)
def test_upsert_composite_key(self, clean_table):
"""Test upsert with composite key (multiple identifier fields)."""
initial_data = _create_typed_dataframe(
{
"col_a": [1, 1, 2, 2],
"col_b": ["A", "B", "A", "B"],
"col_c": [10, 20, 30, 40],
}
)
_write_to_iceberg(initial_data)
# Update (1, "B") and (2, "A"), insert (3, "A")
upsert_data = _create_typed_dataframe(
{"col_a": [1, 2, 3], "col_b": ["B", "A", "A"], "col_c": [999, 888, 777]}
)
_write_to_iceberg(
upsert_data,
mode=SaveMode.UPSERT,
upsert_kwargs={"join_cols": ["col_a", "col_b"]},
)
result_df = _read_from_iceberg(sort_by=["col_a", "col_b"])
expected = _create_typed_dataframe(
{
"col_a": [1, 1, 2, 2, 3],
"col_b": ["A", "B", "A", "B", "A"],
"col_c": [10, 999, 888, 40, 777],
}
)
assert rows_same(result_df, expected)
def test_overwrite_full_table(self, clean_table):
"""Test full table overwrite - replace all data."""
initial_data = _create_typed_dataframe(
{
"col_a": [1, 2, 3, 4, 5],
"col_b": ["old_1", "old_2", "old_3", "old_4", "old_5"],
"col_c": [1, 2, 3, 4, 5],
}
)
_write_to_iceberg(initial_data)
new_data = _create_typed_dataframe(
{
"col_a": [10, 20, 30],
"col_b": ["new_10", "new_20", "new_30"],
"col_c": [100, 200, 300],
}
)
_write_to_iceberg(new_data, mode=SaveMode.OVERWRITE)
result_df = _read_from_iceberg(sort_by="col_a")
expected = _create_typed_dataframe(
{
"col_a": [10, 20, 30],
"col_b": ["new_10", "new_20", "new_30"],
"col_c": [100, 200, 300],
}
)
assert rows_same(result_df, expected)
def test_overwrite_with_filter(self, clean_table):
"""Test partial overwrite using filter expression."""
initial_data = _create_typed_dataframe(
{
"col_a": [1, 2, 3, 4, 5],
"col_b": ["data_1", "data_2", "data_3", "data_4", "data_5"],
"col_c": [1, 1, 2, 2, 3],
}
)
_write_to_iceberg(initial_data)
# Replace only rows where col_c == 2
overwrite_data = _create_typed_dataframe(
{
"col_a": [10, 20],
"col_b": ["replaced_10", "replaced_20"],
"col_c": [2, 2],
}
)
_write_to_iceberg(
overwrite_data, mode=SaveMode.OVERWRITE, overwrite_filter=col("col_c") == 2
)
result_df = _read_from_iceberg(sort_by="col_a")
expected = _create_typed_dataframe(
{
"col_a": [1, 2, 5, 10, 20],
"col_b": ["data_1", "data_2", "data_5", "replaced_10", "replaced_20"],
"col_c": [1, 1, 3, 2, 2],
}
)
assert rows_same(result_df, expected)
def test_overwrite_full_table_missing_columns(self, clean_table):
"""Test full table overwrite when new data is missing columns - they become NULL."""
# Initial data with 3 columns
initial_data = _create_typed_dataframe(
{
"col_a": [1, 2, 3],
"col_b": ["alice", "bob", "charlie"],
"col_c": [10, 20, 30],
}
)
_write_to_iceberg(initial_data)
# Overwrite with data that's missing col_b (non-partition column)
# Note: col_c must be present since table is partitioned by col_c
new_data = _create_typed_dataframe(
{
"col_a": [10, 20],
# col_b is intentionally missing (non-partition column)
"col_c": [100, 200],
}
)
_write_to_iceberg(new_data, mode=SaveMode.OVERWRITE)
# Read back and verify
result_df = _read_from_iceberg(sort_by="col_a")
# All old data should be gone
assert len(result_df) == 2
assert result_df["col_a"].tolist() == [10, 20]
assert result_df["col_c"].tolist() == [100, 200]
# col_b should still exist in schema but be NULL for new rows
assert "col_b" in result_df.columns
assert pd.isna(result_df["col_b"].iloc[0])
assert pd.isna(result_df["col_b"].iloc[1])
@pytest.mark.skipif(
get_pyarrow_version() < parse_version("14.0.0"),
reason="PyIceberg 0.7.0 fails on pyarrow <= 14.0.0",
)
|
TestBasicWriteModes
|
python
|
dask__dask
|
dask/layers.py
|
{
"start": 1815,
"end": 2322
}
|
class ____(ArrayBlockwiseDep):
"""Produce slice(s) into the full-sized array given a chunk index"""
starts: tuple[tuple[int, ...], ...]
def __init__(self, chunks: tuple[tuple[int, ...], ...]):
super().__init__(chunks)
self.starts = tuple(cached_cumsum(c, initial_zero=True) for c in chunks)
def __getitem__(self, idx: tuple):
loc = tuple((start[i], start[i + 1]) for i, start in zip(idx, self.starts))
return tuple(slice(*s, None) for s in loc)
|
ArraySliceDep
|
python
|
run-llama__llama_index
|
llama-index-integrations/vector_stores/llama-index-vector-stores-hologres/llama_index/vector_stores/hologres/base.py
|
{
"start": 522,
"end": 6420
}
|
class ____(BasePydanticVectorStore):
"""
Hologres Vector Store.
Hologres is a one-stop real-time data warehouse, which can support high performance OLAP analysis and high QPS online services.
Hologres supports vector processing and allows you to use vector data
to show the characteristics of unstructured data.
https://www.alibabacloud.com/help/en/hologres/user-guide/introduction-to-vector-processing
"""
# Hologres storage instance
_storage: HologresVector = PrivateAttr()
# Hologres vector db stores the document node's text as string.
stores_text: bool = True
def __init__(self, hologres_storage: HologresVector):
"""
Construct from a Hologres storage instance.
You can use from_connection_string instead.
"""
super().__init__()
self._storage = hologres_storage
@classmethod
def from_connection_string(
cls,
connection_string: str,
table_name: str,
table_schema: Dict[str, str] = {"document": "text"},
embedding_dimension: int = 1536,
pre_delete_table: bool = False,
) -> "HologresVectorStore":
"""
Create Hologres Vector Store from connection string.
Args:
connection_string: connection string of hologres database
table_name: table name to persist data
table_schema: table column schemam
embedding_dimension: dimension size of embedding vector
pre_delete_table: whether to erase data from table on creation
"""
hologres_storage = HologresVector(
connection_string,
ndims=embedding_dimension,
table_name=table_name,
table_schema=table_schema,
pre_delete_table=pre_delete_table,
)
return cls(hologres_storage=hologres_storage)
@classmethod
def from_param(
cls,
host: str,
port: int,
user: str,
password: str,
database: str,
table_name: str,
table_schema: Dict[str, str] = {"document": "text"},
embedding_dimension: int = 1536,
pre_delete_table: bool = False,
) -> "HologresVectorStore":
"""
Create Hologres Vector Store from database configurations.
Args:
host: host
port: port number
user: hologres user
password: hologres password
database: hologres database
table_name: hologres table name
table_schema: table column schemam
embedding_dimension: dimension size of embedding vector
pre_delete_table: whether to erase data from table on creation
"""
connection_string = HologresVector.connection_string_from_db_params(
host, port, database, user, password
)
return cls.from_connection_string(
connection_string=connection_string,
table_name=table_name,
embedding_dimension=embedding_dimension,
table_schema=table_schema,
pre_delete_table=pre_delete_table,
)
@classmethod
def class_name(cls) -> str:
return "HologresVectorStore"
@property
def client(self) -> Any:
return self._storage
def add(
self,
nodes: List[BaseNode],
**add_kwargs: Any,
) -> List[str]:
"""
Add nodes to hologres index.
Embedding data will be saved to `vector` column and text will be saved to `document` column.
Args:
nodes: List[BaseNode]: list of nodes with embeddings
"""
embeddings = []
node_ids = []
schema_data_list = []
meta_data_list = []
for node in nodes:
text_embedding = node.get_embedding()
embeddings.append(text_embedding)
node_ids.append(node.node_id)
meta_data_list.append(node.metadata)
schema_data_list.append(
{"document": node.get_content(metadata_mode=MetadataMode.NONE)}
)
self._storage.upsert_vectors(
embeddings, node_ids, meta_data_list, schema_data_list
)
return node_ids
def query(
self,
query: VectorStoreQuery,
**kwargs: Any,
) -> VectorStoreQueryResult:
"""
Query index for top k most similar nodes.
Args:
query_embedding (List[float]): query embedding
similarity_top_k (int): top k most similar nodes
"""
query_embedding = cast(List[float], query.query_embedding)
top_k = query.similarity_top_k
query_results: List[dict[str, Any]] = self._storage.search(
query_embedding,
k=top_k,
select_columns=["document", "vector"],
metadata_filters=query.filters,
)
# if empty, then return an empty response
if len(query_results) == 0:
return VectorStoreQueryResult(similarities=[], ids=[])
nodes = []
similarities = []
ids = []
for result in query_results:
node = TextNode(
text=result["document"],
id_=result["id"],
embedding=result["vector"],
metadata=result["metadata"],
)
nodes.append(node)
ids.append(result["id"])
similarities.append(math.exp(-result["distance"]))
return VectorStoreQueryResult(nodes=nodes, similarities=similarities, ids=ids)
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""
Delete nodes using with ref_doc_id.
Args:
ref_doc_id (str): The doc_id of the document to delete.
"""
self._storage.delete_vectors(metadata_filters={"doc_id": ref_doc_id})
|
HologresVectorStore
|
python
|
matplotlib__matplotlib
|
lib/matplotlib/patches.py
|
{
"start": 106388,
"end": 135460
}
|
class ____(_Style):
"""
`ArrowStyle` is a container class which defines several
arrowstyle classes, which is used to create an arrow path along a
given path. These are mainly used with `FancyArrowPatch`.
An arrowstyle object can be either created as::
ArrowStyle.Fancy(head_length=.4, head_width=.4, tail_width=.4)
or::
ArrowStyle("Fancy", head_length=.4, head_width=.4, tail_width=.4)
or::
ArrowStyle("Fancy, head_length=.4, head_width=.4, tail_width=.4")
The following classes are defined
%(ArrowStyle:table)s
For an overview of the visual appearance, see
:doc:`/gallery/text_labels_and_annotations/fancyarrow_demo`.
An instance of any arrow style class is a callable object,
whose call signature is::
__call__(self, path, mutation_size, linewidth, aspect_ratio=1.)
and it returns a tuple of a `.Path` instance and a boolean
value. *path* is a `.Path` instance along which the arrow
will be drawn. *mutation_size* and *aspect_ratio* have the same
meaning as in `BoxStyle`. *linewidth* is a line width to be
stroked. This is meant to be used to correct the location of the
head so that it does not overshoot the destination point, but not all
classes support it.
Notes
-----
*angleA* and *angleB* specify the orientation of the bracket, as either a
clockwise or counterclockwise angle depending on the arrow type. 0 degrees
means perpendicular to the line connecting the arrow's head and tail.
.. plot:: gallery/text_labels_and_annotations/angles_on_bracket_arrows.py
"""
_style_list = {}
class _Base:
"""
Arrow Transmuter Base class
ArrowTransmuterBase and its derivatives are used to make a fancy
arrow around a given path. The __call__ method returns a path
(which will be used to create a PathPatch instance) and a boolean
value indicating the path is open therefore is not fillable. This
class is not an artist and actual drawing of the fancy arrow is
done by the FancyArrowPatch class.
"""
# The derived classes are required to be able to be initialized
# w/o arguments, i.e., all its argument (except self) must have
# the default values.
@staticmethod
def ensure_quadratic_bezier(path):
"""
Some ArrowStyle classes only works with a simple quadratic
Bézier curve (created with `.ConnectionStyle.Arc3` or
`.ConnectionStyle.Angle3`). This static method checks if the
provided path is a simple quadratic Bézier curve and returns its
control points if true.
"""
segments = list(path.iter_segments())
if (len(segments) != 2 or segments[0][1] != Path.MOVETO or
segments[1][1] != Path.CURVE3):
raise ValueError(
"'path' is not a valid quadratic Bezier curve")
return [*segments[0][0], *segments[1][0]]
def transmute(self, path, mutation_size, linewidth):
"""
The transmute method is the very core of the ArrowStyle class and
must be overridden in the subclasses. It receives the *path*
object along which the arrow will be drawn, and the
*mutation_size*, with which the arrow head etc. will be scaled.
The *linewidth* may be used to adjust the path so that it does not
pass beyond the given points. It returns a tuple of a `.Path`
instance and a boolean. The boolean value indicate whether the
path can be filled or not. The return value can also be a list of
paths and list of booleans of the same length.
"""
raise NotImplementedError('Derived must override')
def __call__(self, path, mutation_size, linewidth,
aspect_ratio=1.):
"""
The __call__ method is a thin wrapper around the transmute method
and takes care of the aspect ratio.
"""
if aspect_ratio is not None:
# Squeeze the given height by the aspect_ratio
vertices = path.vertices / [1, aspect_ratio]
path_shrunk = Path(vertices, path.codes)
# call transmute method with squeezed height.
path_mutated, fillable = self.transmute(path_shrunk,
mutation_size,
linewidth)
if np.iterable(fillable):
# Restore the height
path_list = [Path(p.vertices * [1, aspect_ratio], p.codes)
for p in path_mutated]
return path_list, fillable
else:
return path_mutated, fillable
else:
return self.transmute(path, mutation_size, linewidth)
class _Curve(_Base):
"""
A simple arrow which will work with any path instance. The
returned path is the concatenation of the original path, and at
most two paths representing the arrow head or bracket at the start
point and at the end point. The arrow heads can be either open
or closed.
"""
arrow = "-"
fillbegin = fillend = False # Whether arrows are filled.
def __init__(self, head_length=.4, head_width=.2, widthA=1., widthB=1.,
lengthA=0.2, lengthB=0.2, angleA=0, angleB=0, scaleA=None,
scaleB=None):
"""
Parameters
----------
head_length : float, default: 0.4
Length of the arrow head, relative to *mutation_size*.
head_width : float, default: 0.2
Width of the arrow head, relative to *mutation_size*.
widthA, widthB : float, default: 1.0
Width of the bracket.
lengthA, lengthB : float, default: 0.2
Length of the bracket.
angleA, angleB : float, default: 0
Orientation of the bracket, as a counterclockwise angle.
0 degrees means perpendicular to the line.
scaleA, scaleB : float, default: *mutation_size*
The scale of the brackets.
"""
self.head_length, self.head_width = head_length, head_width
self.widthA, self.widthB = widthA, widthB
self.lengthA, self.lengthB = lengthA, lengthB
self.angleA, self.angleB = angleA, angleB
self.scaleA, self.scaleB = scaleA, scaleB
self._beginarrow_head = False
self._beginarrow_bracket = False
self._endarrow_head = False
self._endarrow_bracket = False
if "-" not in self.arrow:
raise ValueError("arrow must have the '-' between "
"the two heads")
beginarrow, endarrow = self.arrow.split("-", 1)
if beginarrow == "<":
self._beginarrow_head = True
self._beginarrow_bracket = False
elif beginarrow == "<|":
self._beginarrow_head = True
self._beginarrow_bracket = False
self.fillbegin = True
elif beginarrow in ("]", "|"):
self._beginarrow_head = False
self._beginarrow_bracket = True
if endarrow == ">":
self._endarrow_head = True
self._endarrow_bracket = False
elif endarrow == "|>":
self._endarrow_head = True
self._endarrow_bracket = False
self.fillend = True
elif endarrow in ("[", "|"):
self._endarrow_head = False
self._endarrow_bracket = True
super().__init__()
def _get_arrow_wedge(self, x0, y0, x1, y1,
head_dist, cos_t, sin_t, linewidth):
"""
Return the paths for arrow heads. Since arrow lines are
drawn with capstyle=projected, The arrow goes beyond the
desired point. This method also returns the amount of the path
to be shrunken so that it does not overshoot.
"""
# arrow from x0, y0 to x1, y1
dx, dy = x0 - x1, y0 - y1
cp_distance = np.hypot(dx, dy)
# pad_projected : amount of pad to account the
# overshooting of the projection of the wedge
pad_projected = (.5 * linewidth / sin_t)
# Account for division by zero
if cp_distance == 0:
cp_distance = 1
# apply pad for projected edge
ddx = pad_projected * dx / cp_distance
ddy = pad_projected * dy / cp_distance
# offset for arrow wedge
dx = dx / cp_distance * head_dist
dy = dy / cp_distance * head_dist
dx1, dy1 = cos_t * dx + sin_t * dy, -sin_t * dx + cos_t * dy
dx2, dy2 = cos_t * dx - sin_t * dy, sin_t * dx + cos_t * dy
vertices_arrow = [(x1 + ddx + dx1, y1 + ddy + dy1),
(x1 + ddx, y1 + ddy),
(x1 + ddx + dx2, y1 + ddy + dy2)]
codes_arrow = [Path.MOVETO,
Path.LINETO,
Path.LINETO]
return vertices_arrow, codes_arrow, ddx, ddy
def _get_bracket(self, x0, y0,
x1, y1, width, length, angle):
cos_t, sin_t = get_cos_sin(x1, y1, x0, y0)
# arrow from x0, y0 to x1, y1
from matplotlib.bezier import get_normal_points
x1, y1, x2, y2 = get_normal_points(x0, y0, cos_t, sin_t, width)
dx, dy = length * cos_t, length * sin_t
vertices_arrow = [(x1 + dx, y1 + dy),
(x1, y1),
(x2, y2),
(x2 + dx, y2 + dy)]
codes_arrow = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO]
if angle:
trans = transforms.Affine2D().rotate_deg_around(x0, y0, angle)
vertices_arrow = trans.transform(vertices_arrow)
return vertices_arrow, codes_arrow
def transmute(self, path, mutation_size, linewidth):
# docstring inherited
if self._beginarrow_head or self._endarrow_head:
head_length = self.head_length * mutation_size
head_width = self.head_width * mutation_size
head_dist = np.hypot(head_length, head_width)
cos_t, sin_t = head_length / head_dist, head_width / head_dist
scaleA = mutation_size if self.scaleA is None else self.scaleA
scaleB = mutation_size if self.scaleB is None else self.scaleB
# begin arrow
x0, y0 = path.vertices[0]
x1, y1 = path.vertices[1]
# If there is no room for an arrow and a line, then skip the arrow
has_begin_arrow = self._beginarrow_head and (x0, y0) != (x1, y1)
verticesA, codesA, ddxA, ddyA = (
self._get_arrow_wedge(x1, y1, x0, y0,
head_dist, cos_t, sin_t, linewidth)
if has_begin_arrow
else ([], [], 0, 0)
)
# end arrow
x2, y2 = path.vertices[-2]
x3, y3 = path.vertices[-1]
# If there is no room for an arrow and a line, then skip the arrow
has_end_arrow = self._endarrow_head and (x2, y2) != (x3, y3)
verticesB, codesB, ddxB, ddyB = (
self._get_arrow_wedge(x2, y2, x3, y3,
head_dist, cos_t, sin_t, linewidth)
if has_end_arrow
else ([], [], 0, 0)
)
# This simple code will not work if ddx, ddy is greater than the
# separation between vertices.
paths = [Path(np.concatenate([[(x0 + ddxA, y0 + ddyA)],
path.vertices[1:-1],
[(x3 + ddxB, y3 + ddyB)]]),
path.codes)]
fills = [False]
if has_begin_arrow:
if self.fillbegin:
paths.append(
Path([*verticesA, (0, 0)], [*codesA, Path.CLOSEPOLY]))
fills.append(True)
else:
paths.append(Path(verticesA, codesA))
fills.append(False)
elif self._beginarrow_bracket:
x0, y0 = path.vertices[0]
x1, y1 = path.vertices[1]
verticesA, codesA = self._get_bracket(x0, y0, x1, y1,
self.widthA * scaleA,
self.lengthA * scaleA,
self.angleA)
paths.append(Path(verticesA, codesA))
fills.append(False)
if has_end_arrow:
if self.fillend:
fills.append(True)
paths.append(
Path([*verticesB, (0, 0)], [*codesB, Path.CLOSEPOLY]))
else:
fills.append(False)
paths.append(Path(verticesB, codesB))
elif self._endarrow_bracket:
x0, y0 = path.vertices[-1]
x1, y1 = path.vertices[-2]
verticesB, codesB = self._get_bracket(x0, y0, x1, y1,
self.widthB * scaleB,
self.lengthB * scaleB,
self.angleB)
paths.append(Path(verticesB, codesB))
fills.append(False)
return paths, fills
@_register_style(_style_list, name="-")
class Curve(_Curve):
"""A simple curve without any arrow head."""
def __init__(self): # hide head_length, head_width
# These attributes (whose values come from backcompat) only matter
# if someone modifies beginarrow/etc. on an ArrowStyle instance.
super().__init__(head_length=.2, head_width=.1)
@_register_style(_style_list, name="<-")
class CurveA(_Curve):
"""An arrow with a head at its start point."""
arrow = "<-"
@_register_style(_style_list, name="->")
class CurveB(_Curve):
"""An arrow with a head at its end point."""
arrow = "->"
@_register_style(_style_list, name="<->")
class CurveAB(_Curve):
"""An arrow with heads both at the start and the end point."""
arrow = "<->"
@_register_style(_style_list, name="<|-")
class CurveFilledA(_Curve):
"""An arrow with filled triangle head at the start."""
arrow = "<|-"
@_register_style(_style_list, name="-|>")
class CurveFilledB(_Curve):
"""An arrow with filled triangle head at the end."""
arrow = "-|>"
@_register_style(_style_list, name="<|-|>")
class CurveFilledAB(_Curve):
"""An arrow with filled triangle heads at both ends."""
arrow = "<|-|>"
@_register_style(_style_list, name="]-")
class BracketA(_Curve):
"""An arrow with an outward square bracket at its start."""
arrow = "]-"
def __init__(self, widthA=1., lengthA=0.2, angleA=0):
"""
Parameters
----------
widthA : float, default: 1.0
Width of the bracket.
lengthA : float, default: 0.2
Length of the bracket.
angleA : float, default: 0 degrees
Orientation of the bracket, as a counterclockwise angle.
0 degrees means perpendicular to the line.
"""
super().__init__(widthA=widthA, lengthA=lengthA, angleA=angleA)
@_register_style(_style_list, name="-[")
class BracketB(_Curve):
"""An arrow with an outward square bracket at its end."""
arrow = "-["
def __init__(self, widthB=1., lengthB=0.2, angleB=0):
"""
Parameters
----------
widthB : float, default: 1.0
Width of the bracket.
lengthB : float, default: 0.2
Length of the bracket.
angleB : float, default: 0 degrees
Orientation of the bracket, as a counterclockwise angle.
0 degrees means perpendicular to the line.
"""
super().__init__(widthB=widthB, lengthB=lengthB, angleB=angleB)
@_register_style(_style_list, name="]-[")
class BracketAB(_Curve):
"""An arrow with outward square brackets at both ends."""
arrow = "]-["
def __init__(self,
widthA=1., lengthA=0.2, angleA=0,
widthB=1., lengthB=0.2, angleB=0):
"""
Parameters
----------
widthA, widthB : float, default: 1.0
Width of the bracket.
lengthA, lengthB : float, default: 0.2
Length of the bracket.
angleA, angleB : float, default: 0 degrees
Orientation of the bracket, as a counterclockwise angle.
0 degrees means perpendicular to the line.
"""
super().__init__(widthA=widthA, lengthA=lengthA, angleA=angleA,
widthB=widthB, lengthB=lengthB, angleB=angleB)
@_register_style(_style_list, name="|-|")
class BarAB(_Curve):
"""An arrow with vertical bars ``|`` at both ends."""
arrow = "|-|"
def __init__(self, widthA=1., angleA=0, widthB=1., angleB=0):
"""
Parameters
----------
widthA, widthB : float, default: 1.0
Width of the bracket.
angleA, angleB : float, default: 0 degrees
Orientation of the bracket, as a counterclockwise angle.
0 degrees means perpendicular to the line.
"""
super().__init__(widthA=widthA, lengthA=0, angleA=angleA,
widthB=widthB, lengthB=0, angleB=angleB)
@_register_style(_style_list, name=']->')
class BracketCurve(_Curve):
"""
An arrow with an outward square bracket at its start and a head at
the end.
"""
arrow = "]->"
def __init__(self, widthA=1., lengthA=0.2, angleA=None):
"""
Parameters
----------
widthA : float, default: 1.0
Width of the bracket.
lengthA : float, default: 0.2
Length of the bracket.
angleA : float, default: 0 degrees
Orientation of the bracket, as a counterclockwise angle.
0 degrees means perpendicular to the line.
"""
super().__init__(widthA=widthA, lengthA=lengthA, angleA=angleA)
@_register_style(_style_list, name='<-[')
class CurveBracket(_Curve):
"""
An arrow with an outward square bracket at its end and a head at
the start.
"""
arrow = "<-["
def __init__(self, widthB=1., lengthB=0.2, angleB=None):
"""
Parameters
----------
widthB : float, default: 1.0
Width of the bracket.
lengthB : float, default: 0.2
Length of the bracket.
angleB : float, default: 0 degrees
Orientation of the bracket, as a counterclockwise angle.
0 degrees means perpendicular to the line.
"""
super().__init__(widthB=widthB, lengthB=lengthB, angleB=angleB)
@_register_style(_style_list)
class Simple(_Base):
"""A simple arrow. Only works with a quadratic Bézier curve."""
def __init__(self, head_length=.5, head_width=.5, tail_width=.2):
"""
Parameters
----------
head_length : float, default: 0.5
Length of the arrow head.
head_width : float, default: 0.5
Width of the arrow head.
tail_width : float, default: 0.2
Width of the arrow tail.
"""
self.head_length, self.head_width, self.tail_width = \
head_length, head_width, tail_width
super().__init__()
def transmute(self, path, mutation_size, linewidth):
# docstring inherited
x0, y0, x1, y1, x2, y2 = self.ensure_quadratic_bezier(path)
# divide the path into a head and a tail
head_length = self.head_length * mutation_size
in_f = inside_circle(x2, y2, head_length)
arrow_path = [(x0, y0), (x1, y1), (x2, y2)]
try:
arrow_out, arrow_in = \
split_bezier_intersecting_with_closedpath(arrow_path, in_f)
except NonIntersectingPathException:
# if this happens, make a straight line of the head_length
# long.
x0, y0 = _point_along_a_line(x2, y2, x1, y1, head_length)
x1n, y1n = 0.5 * (x0 + x2), 0.5 * (y0 + y2)
arrow_in = [(x0, y0), (x1n, y1n), (x2, y2)]
arrow_out = None
# head
head_width = self.head_width * mutation_size
head_left, head_right = make_wedged_bezier2(arrow_in,
head_width / 2., wm=.5)
# tail
if arrow_out is not None:
tail_width = self.tail_width * mutation_size
tail_left, tail_right = get_parallels(arrow_out,
tail_width / 2.)
patch_path = [(Path.MOVETO, tail_right[0]),
(Path.CURVE3, tail_right[1]),
(Path.CURVE3, tail_right[2]),
(Path.LINETO, head_right[0]),
(Path.CURVE3, head_right[1]),
(Path.CURVE3, head_right[2]),
(Path.CURVE3, head_left[1]),
(Path.CURVE3, head_left[0]),
(Path.LINETO, tail_left[2]),
(Path.CURVE3, tail_left[1]),
(Path.CURVE3, tail_left[0]),
(Path.LINETO, tail_right[0]),
(Path.CLOSEPOLY, tail_right[0]),
]
else:
patch_path = [(Path.MOVETO, head_right[0]),
(Path.CURVE3, head_right[1]),
(Path.CURVE3, head_right[2]),
(Path.CURVE3, head_left[1]),
(Path.CURVE3, head_left[0]),
(Path.CLOSEPOLY, head_left[0]),
]
path = Path([p for c, p in patch_path], [c for c, p in patch_path])
return path, True
@_register_style(_style_list)
class Fancy(_Base):
"""A fancy arrow. Only works with a quadratic Bézier curve."""
def __init__(self, head_length=.4, head_width=.4, tail_width=.4):
"""
Parameters
----------
head_length : float, default: 0.4
Length of the arrow head.
head_width : float, default: 0.4
Width of the arrow head.
tail_width : float, default: 0.4
Width of the arrow tail.
"""
self.head_length, self.head_width, self.tail_width = \
head_length, head_width, tail_width
super().__init__()
def transmute(self, path, mutation_size, linewidth):
# docstring inherited
x0, y0, x1, y1, x2, y2 = self.ensure_quadratic_bezier(path)
# divide the path into a head and a tail
head_length = self.head_length * mutation_size
arrow_path = [(x0, y0), (x1, y1), (x2, y2)]
# path for head
in_f = inside_circle(x2, y2, head_length)
try:
path_out, path_in = split_bezier_intersecting_with_closedpath(
arrow_path, in_f)
except NonIntersectingPathException:
# if this happens, make a straight line of the head_length
# long.
x0, y0 = _point_along_a_line(x2, y2, x1, y1, head_length)
x1n, y1n = 0.5 * (x0 + x2), 0.5 * (y0 + y2)
arrow_path = [(x0, y0), (x1n, y1n), (x2, y2)]
path_head = arrow_path
else:
path_head = path_in
# path for head
in_f = inside_circle(x2, y2, head_length * .8)
path_out, path_in = split_bezier_intersecting_with_closedpath(
arrow_path, in_f)
path_tail = path_out
# head
head_width = self.head_width * mutation_size
head_l, head_r = make_wedged_bezier2(path_head,
head_width / 2.,
wm=.6)
# tail
tail_width = self.tail_width * mutation_size
tail_left, tail_right = make_wedged_bezier2(path_tail,
tail_width * .5,
w1=1., wm=0.6, w2=0.3)
# path for head
in_f = inside_circle(x0, y0, tail_width * .3)
path_in, path_out = split_bezier_intersecting_with_closedpath(
arrow_path, in_f)
tail_start = path_in[-1]
head_right, head_left = head_r, head_l
patch_path = [(Path.MOVETO, tail_start),
(Path.LINETO, tail_right[0]),
(Path.CURVE3, tail_right[1]),
(Path.CURVE3, tail_right[2]),
(Path.LINETO, head_right[0]),
(Path.CURVE3, head_right[1]),
(Path.CURVE3, head_right[2]),
(Path.CURVE3, head_left[1]),
(Path.CURVE3, head_left[0]),
(Path.LINETO, tail_left[2]),
(Path.CURVE3, tail_left[1]),
(Path.CURVE3, tail_left[0]),
(Path.LINETO, tail_start),
(Path.CLOSEPOLY, tail_start),
]
path = Path([p for c, p in patch_path], [c for c, p in patch_path])
return path, True
@_register_style(_style_list)
class Wedge(_Base):
"""
Wedge(?) shape. Only works with a quadratic Bézier curve. The
start point has a width of the *tail_width* and the end point has a
width of 0. At the middle, the width is *shrink_factor*x*tail_width*.
"""
def __init__(self, tail_width=.3, shrink_factor=0.5):
"""
Parameters
----------
tail_width : float, default: 0.3
Width of the tail.
shrink_factor : float, default: 0.5
Fraction of the arrow width at the middle point.
"""
self.tail_width = tail_width
self.shrink_factor = shrink_factor
super().__init__()
def transmute(self, path, mutation_size, linewidth):
# docstring inherited
x0, y0, x1, y1, x2, y2 = self.ensure_quadratic_bezier(path)
arrow_path = [(x0, y0), (x1, y1), (x2, y2)]
b_plus, b_minus = make_wedged_bezier2(
arrow_path,
self.tail_width * mutation_size / 2.,
wm=self.shrink_factor)
patch_path = [(Path.MOVETO, b_plus[0]),
(Path.CURVE3, b_plus[1]),
(Path.CURVE3, b_plus[2]),
(Path.LINETO, b_minus[2]),
(Path.CURVE3, b_minus[1]),
(Path.CURVE3, b_minus[0]),
(Path.CLOSEPOLY, b_minus[0]),
]
path = Path([p for c, p in patch_path], [c for c, p in patch_path])
return path, True
|
ArrowStyle
|
python
|
pypa__packaging
|
tests/test_tags.py
|
{
"start": 17476,
"end": 19748
}
|
class ____:
def test_non_android(self) -> None:
non_android_error = pytest.raises(TypeError)
with non_android_error:
list(tags.android_platforms())
with non_android_error:
list(tags.android_platforms(api_level=18))
with non_android_error:
list(tags.android_platforms(abi="x86_64"))
# The function can only be called on non-Android platforms if both arguments are
# provided.
assert list(tags.android_platforms(api_level=18, abi="x86_64")) == [
"android_18_x86_64",
"android_17_x86_64",
"android_16_x86_64",
]
@pytest.mark.usefixtures("mock_android")
def test_detection(self) -> None:
assert list(tags.android_platforms()) == [
"android_21_arm64_v8a",
"android_20_arm64_v8a",
"android_19_arm64_v8a",
"android_18_arm64_v8a",
"android_17_arm64_v8a",
"android_16_arm64_v8a",
]
def test_api_level(self) -> None:
# API levels below the minimum should return nothing.
assert list(tags.android_platforms(api_level=14, abi="x86")) == []
assert list(tags.android_platforms(api_level=15, abi="x86")) == []
assert list(tags.android_platforms(api_level=16, abi="x86")) == [
"android_16_x86",
]
assert list(tags.android_platforms(api_level=17, abi="x86")) == [
"android_17_x86",
"android_16_x86",
]
assert list(tags.android_platforms(api_level=18, abi="x86")) == [
"android_18_x86",
"android_17_x86",
"android_16_x86",
]
def test_abi(self) -> None:
# Real ABI, normalized.
assert list(tags.android_platforms(api_level=16, abi="armeabi_v7a")) == [
"android_16_armeabi_v7a",
]
# Real ABI, not normalized.
assert list(tags.android_platforms(api_level=16, abi="armeabi-v7a")) == [
"android_16_armeabi_v7a",
]
# Nonexistent ABIs should still be accepted and normalized.
assert list(tags.android_platforms(api_level=16, abi="myarch-4.2")) == [
"android_16_myarch_4_2",
]
|
TestAndroidPlatforms
|
python
|
keras-team__keras
|
keras/src/layers/preprocessing/image_preprocessing/random_elastic_transform_test.py
|
{
"start": 164,
"end": 2998
}
|
class ____(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_layer(self):
self.run_layer_test(
layers.RandomElasticTransform,
init_kwargs={
"factor": 1.0,
"scale": 0.5,
"interpolation": "bilinear",
"fill_mode": "reflect",
"fill_value": 0,
"value_range": (0, 255),
"seed": 1,
},
input_shape=(8, 3, 4, 3),
supports_masking=False,
expected_output_shape=(8, 3, 4, 3),
run_training_check=False,
)
def test_random_elastic_transform_inference(self):
seed = 3481
layer = layers.RandomElasticTransform()
np.random.seed(seed)
inputs = np.random.randint(0, 255, size=(224, 224, 3))
output = layer(inputs, training=False)
self.assertAllClose(inputs, output)
def test_random_elastic_transform_no_op(self):
seed = 3481
layer = layers.RandomElasticTransform(factor=0)
np.random.seed(seed)
inputs = np.random.randint(0, 255, size=(224, 224, 3))
output = layer(inputs)
self.assertAllClose(inputs, output)
layer = layers.RandomElasticTransform(scale=0)
np.random.seed(seed)
inputs = np.random.randint(0, 255, size=(224, 224, 3))
output = layer(inputs)
self.assertAllClose(inputs, output)
def test_random_elastic_transform_basic(self):
data_format = backend.config.image_data_format()
if data_format == "channels_last":
inputs = np.zeros((8, 8, 1))
inputs[3:5, 3:5, :] = 1.0
else:
inputs = np.zeros((1, 8, 8))
inputs[:, 3:5, 3:5] = 1.0
layer = layers.RandomElasticTransform(data_format=data_format)
transformation = {
"apply_transform": np.array([True]),
"distortion_factor": np.float32(0.9109325),
"seed": 42,
}
output = layer.transform_images(inputs, transformation)
self.assertNotAllClose(inputs, output)
self.assertEqual(inputs.shape, output.shape)
def test_tf_data_compatibility(self):
data_format = backend.config.image_data_format()
if data_format == "channels_last":
input_data = np.random.random((2, 8, 8, 3))
else:
input_data = np.random.random((2, 3, 8, 8))
layer = layers.RandomElasticTransform(data_format=data_format)
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
print("Output shape:", output.shape) # Debugging line
output_numpy = output.numpy()
print("Output numpy shape:", output_numpy.shape)
|
RandomElasticTransformTest
|
python
|
keon__algorithms
|
tests/test_matrix.py
|
{
"start": 12788,
"end": 13188
}
|
class ____(unittest.TestCase):
def test_sort_diagonally(self):
mat = [
[3, 3, 1, 1],
[2, 2, 1, 2],
[1, 1, 1, 2]
]
self.assertEqual(sort_matrix_diagonally.sort_diagonally(mat), [
[1, 1, 1, 1],
[1, 2, 2, 2],
[1, 2, 3, 3]
])
if __name__ == "__main__":
unittest.main()
|
TestSortMatrixDiagonally
|
python
|
django__django
|
django/contrib/staticfiles/finders.py
|
{
"start": 1188,
"end": 5125
}
|
class ____(BaseFinder):
"""
A static files finder that uses the ``STATICFILES_DIRS`` setting
to locate files.
"""
def __init__(self, app_names=None, *args, **kwargs):
# List of locations with static files
self.locations = []
# Maps dir paths to an appropriate storage instance
self.storages = {}
for root in settings.STATICFILES_DIRS:
if isinstance(root, (list, tuple)):
prefix, root = root
else:
prefix = ""
if (prefix, root) not in self.locations:
self.locations.append((prefix, root))
for prefix, root in self.locations:
filesystem_storage = FileSystemStorage(location=root)
filesystem_storage.prefix = prefix
self.storages[root] = filesystem_storage
super().__init__(*args, **kwargs)
def check(self, **kwargs):
errors = []
if not isinstance(settings.STATICFILES_DIRS, (list, tuple)):
errors.append(
Error(
"The STATICFILES_DIRS setting is not a tuple or list.",
hint="Perhaps you forgot a trailing comma?",
id="staticfiles.E001",
)
)
return errors
for root in settings.STATICFILES_DIRS:
if isinstance(root, (list, tuple)):
prefix, root = root
if prefix.endswith("/"):
errors.append(
Error(
"The prefix %r in the STATICFILES_DIRS setting must "
"not end with a slash." % prefix,
id="staticfiles.E003",
)
)
if settings.STATIC_ROOT and os.path.abspath(
settings.STATIC_ROOT
) == os.path.abspath(root):
errors.append(
Error(
"The STATICFILES_DIRS setting should not contain the "
"STATIC_ROOT setting.",
id="staticfiles.E002",
)
)
if not os.path.isdir(root):
errors.append(
Warning(
f"The directory '{root}' in the STATICFILES_DIRS setting "
f"does not exist.",
id="staticfiles.W004",
)
)
return errors
def find(self, path, find_all=False):
"""
Look for files in the extra locations as defined in STATICFILES_DIRS.
"""
matches = []
for prefix, root in self.locations:
if root not in searched_locations:
searched_locations.append(root)
matched_path = self.find_location(root, path, prefix)
if matched_path:
if not find_all:
return matched_path
matches.append(matched_path)
return matches
def find_location(self, root, path, prefix=None):
"""
Find a requested static file in a location and return the found
absolute path (or ``None`` if no match).
"""
if prefix:
prefix = "%s%s" % (prefix, os.sep)
if not path.startswith(prefix):
return None
path = path.removeprefix(prefix)
path = safe_join(root, path)
if os.path.exists(path):
return path
def list(self, ignore_patterns):
"""
List all files in all locations.
"""
for prefix, root in self.locations:
# Skip nonexistent directories.
if os.path.isdir(root):
storage = self.storages[root]
for path in utils.get_files(storage, ignore_patterns):
yield path, storage
|
FileSystemFinder
|
python
|
encode__django-rest-framework
|
tests/test_throttling.py
|
{
"start": 686,
"end": 775
}
|
class ____(UserRateThrottle):
rate = '3/sec'
scope = 'seconds'
|
User3SecRateThrottle
|
python
|
dask__dask
|
dask/order.py
|
{
"start": 2010,
"end": 33619
}
|
class ____(NamedTuple):
priority: int
critical_path: float | int
@overload
def order(
dsk: Mapping[Key, Any],
dependencies: Mapping[Key, set[Key]] | None = None,
*,
return_stats: Literal[True],
) -> dict[Key, Order]: ...
@overload
def order(
dsk: Mapping[Key, Any],
dependencies: Mapping[Key, set[Key]] | None = None,
*,
return_stats: Literal[False] = False,
) -> dict[Key, int]: ...
def order(
dsk: Mapping[Key, Any],
dependencies: Mapping[Key, set[Key]] | None = None,
*,
return_stats: bool = False,
) -> dict[Key, Order] | dict[Key, int]:
"""Order nodes in dask graph
This produces an ordering over our tasks that we use to break ties when
executing. We do this ahead of time to reduce a bit of stress on the
scheduler and also to assist in static analysis.
This currently traverses the graph as a single-threaded scheduler would
traverse it.
Examples
--------
>>> inc = lambda x: x + 1
>>> add = lambda x, y: x + y
>>> dsk = {'a': 1, 'b': 2, 'c': (inc, 'a'), 'd': (add, 'b', 'c')}
>>> order(dsk)
{'a': 0, 'c': 1, 'b': 2, 'd': 3}
"""
if not dsk:
return {}
dsk = dict(dsk)
dependencies = DependenciesMapping(dsk)
dependents = reverse_dict(dependencies)
external_keys = set()
if len(dependents) != len(dependencies):
# There are external keys. Let's add a DataNode to ensure the algo below
# is encountering a complete graph. Those artificial data nodes will be
# ignored when assigning results
for k in dependents:
if k not in dependencies:
external_keys.add(k)
dsk[k] = DataNode(k, object())
expected_len = len(dsk)
leaf_nodes = {k for k, v in dependents.items() if not v}
root_nodes = {k for k, v in dependencies.items() if not v}
result: dict[Key, Order | int] = {}
# Normalize the graph by removing leaf nodes that are not actual tasks, see
# for instance da.store where the task is merely an alias
# to multiple keys, i.e. [key1, key2, ...,]
# Similarly, we are removing root nodes that are pure data tasks. Those task
# are embedded in the run_spec of a task and are not runnable. We have to
# assign a priority but their priority has no impact on performance.
# The removal of those tasks typically transforms the graph topology in a
# way that is simpler to handle
all_tasks = False
n_removed_leaves = 0
requires_data_task = defaultdict(set)
while not all_tasks:
all_tasks = True
for leaf in list(leaf_nodes):
if leaf in root_nodes:
continue
if (
not istask(dsk[leaf])
# Having a linear chain is fine
and len(dependencies[leaf]) > 1
):
all_tasks = False
# Put non-tasks at the very end since they are merely aliases
# and have no impact on performance at all
prio = len(dsk) - 1 - n_removed_leaves
if return_stats:
result[leaf] = Order(prio, -1)
else:
result[leaf] = prio
n_removed_leaves += 1
leaf_nodes.remove(leaf)
for dep in dependencies[leaf]:
dependents[dep].remove(leaf)
if not dependents[dep]:
leaf_nodes.add(dep)
del dsk[leaf]
del dependencies[leaf]
del dependents[leaf]
for root in list(root_nodes):
if root in leaf_nodes:
continue
deps_root = dependents[root]
if not istask(dsk[root]) and len(deps_root) > 1:
del dsk[root]
del dependencies[root]
root_nodes.remove(root)
del dependents[root]
for dep in deps_root:
requires_data_task[dep].add(root)
if not dependencies[dep]:
root_nodes.add(dep)
num_needed, total_dependencies = ndependencies(dependencies, dependents)
if len(total_dependencies) != len(dsk):
cycle = getcycle(dsk, None)
raise RuntimeError(
"Cycle detected between the following keys:\n -> {}".format(
"\n -> ".join(str(x) for x in cycle)
)
)
assert dependencies is not None
roots_connected, max_dependents = _connecting_to_roots(dependencies, dependents)
leafs_connected, _ = _connecting_to_roots(dependents, dependencies)
i = 0
runnable_hull = set()
reachable_hull = set()
runnable: list[Key] = []
known_runnable_paths: dict[Key, list[list[Key]]] = {}
known_runnable_paths_pop = known_runnable_paths.pop
crit_path_counter = 0
scrit_path: set[Key] = set()
_crit_path_counter_offset: int | float = 0
_sort_keys_cache: dict[Key, tuple[int, int, int, int, str]] = {}
def sort_key(x: Key) -> tuple[int, int, int, int, str]:
try:
return _sort_keys_cache[x]
except KeyError:
assert dependencies is not None
_sort_keys_cache[x] = rv = (
total_dependencies[x],
len(dependencies[x]),
len(roots_connected[x]),
-max_dependents[x],
# Converting to str is actually faster than creating some
# wrapper class and comparisons that come this far are
# relatively rare so we prefer fast init over fast comparison
str(x),
)
return rv
def add_to_result(item: Key) -> None:
# Earlier versions recursed into this method but this could cause
# recursion depth errors. This is the only reason for the while loop
next_items = [item]
nonlocal i
nonlocal min_leaf_degree # type: ignore[misc]
while next_items:
item = next_items.pop()
runnable_hull.discard(item)
reachable_hull.discard(item)
leaf_nodes.discard(item)
if item in result:
continue
while requires_data_task[item]:
add_to_result(requires_data_task[item].pop())
if return_stats:
result[item] = Order(i, crit_path_counter - _crit_path_counter_offset)
else:
result[item] = i
if item not in external_keys:
i += 1
if item in root_nodes:
for leaf in leafs_connected[item]:
if leaf in leaf_nodes:
degree = leafs_degree.pop(leaf)
leafs_per_degree[degree].remove(leaf)
new_degree = degree - 1
if new_degree > 0:
if new_degree < min_leaf_degree:
min_leaf_degree = new_degree
leafs_per_degree[new_degree].add(leaf)
leafs_degree[leaf] = new_degree
elif not leafs_per_degree[degree]:
assert degree == min_leaf_degree
while min_leaf_degree != max_leaf_degree and (
min_leaf_degree not in leafs_per_degree
or not leafs_per_degree[min_leaf_degree]
):
min_leaf_degree += 1
# Note: This is a `set` and therefore this introduces a certain
# randomness. However, this randomness should not have any impact on
# the final result since the `process_runnable` should produce
# equivalent results regardless of the order in which runnable is
# populated (not identical but equivalent)
for dep in dependents.get(item, ()):
num_needed[dep] -= 1
reachable_hull.add(dep)
if not num_needed[dep]:
if len(dependents[item]) == 1:
next_items.append(dep)
else:
runnable.append(dep)
def _with_offset(func: Callable[..., None]) -> Callable[..., None]:
# This decorator is only used to reduce indentation levels. The offset
# is purely cosmetical and used for some visualizations and I haven't
# settled on how to implement this best so I didn't want to have large
# indentations that make things harder to read
def wrapper(*args: Any, **kwargs: Any) -> None:
nonlocal _crit_path_counter_offset
_crit_path_counter_offset = 0.5
try:
func(*args, **kwargs)
finally:
_crit_path_counter_offset = 0
return wrapper
@_with_offset
def process_runnables() -> None:
"""Compute all currently runnable paths and either cache or execute them
This is designed to ensure we are running tasks that are free to execute
(e.g. the result of a splitter task) not too eagerly. If we executed
such free tasks too early we'd be walking the graph in a too wide /
breadth first fashion that is not optimal. If instead we were to only
execute them once they are needed for a final result, this can cause
very high memory pressure since valuable reducers are executed too
late.
The strategy here is to take all runnable tasks and walk forwards until
we hit a reducer node (i.e. a node with more than one dependency). We
will remember/cache the path to this reducer node.
If this path leads to a leaf or if we find enough runnable paths for a
reducer to be runnable, we will execute the path.
If instead of a reducer a splitter is encountered that is runnable, we
will follow its splitter paths individually and apply the same logic to
each branch.
"""
while runnable:
candidates = runnable.copy()
runnable.clear()
while candidates:
key = candidates.pop()
if key in runnable_hull or key in result:
continue
if key in leaf_nodes:
add_to_result(key)
continue
path = [key]
branches = deque([(0, path)])
while branches:
nsplits, path = branches.popleft()
while True:
# Loop invariant. Too expensive to compute at runtime
# assert not set(known_runnable_paths).intersection(runnable_hull)
current = path[-1]
runnable_hull.add(current)
deps_downstream = dependents[current]
deps_upstream = dependencies[current]
if not deps_downstream:
# FIXME: The fact that it is possible for
# num_needed[current] == 0 means we're doing some
# work twice
if num_needed[current] <= 1:
for k in path:
add_to_result(k)
else:
runnable_hull.discard(current)
elif len(path) == 1 or len(deps_upstream) == 1:
if len(deps_downstream) > 1:
nsplits += 1
for d in sorted(deps_downstream, key=sort_key):
# This ensures we're only considering splitters
# that are genuinely splitting and not
# interleaving
if len(dependencies[d]) == 1:
branch = path.copy()
branch.append(d)
branches.append((nsplits, branch))
break
path.extend(deps_downstream)
continue
elif current in known_runnable_paths:
known_runnable_paths[current].append(path)
runnable_hull.discard(current)
if (
len(known_runnable_paths[current])
>= num_needed[current]
):
pruned_branches: deque[list[Key]] = deque()
for path in known_runnable_paths_pop(current):
if path[-2] not in result:
pruned_branches.append(path)
if len(pruned_branches) < num_needed[current]:
known_runnable_paths[current] = list(
pruned_branches
)
else:
if nsplits > 1:
path = []
for pruned in pruned_branches:
path.extend(pruned)
branches.append((nsplits - 1, path))
break
while pruned_branches:
path = pruned_branches.popleft()
for k in path:
if num_needed[k]:
pruned_branches.append(path)
break
add_to_result(k)
elif (
len(dependencies[current]) > 1 and num_needed[current] <= 1
):
for k in path:
add_to_result(k)
else:
known_runnable_paths[current] = [path]
runnable_hull.discard(current)
break
# Pick strategy
# Note: We're trying to be smart here by picking a strategy on how to
# determine the critical path. This is not always clear and we may want to
# consider just calculating both orderings and picking the one with less
# pressure. The only concern to this would be performance but at time of
# writing, the most expensive part of ordering is the prep work (mostly
# connected roots + sort_key) which can be reused for multiple orderings.
# Degree in this context is the number root nodes that have to be loaded for
# this leaf to become accessible. Zero means the leaf is already accessible
# in which case it _should_ either already be in result or be accessible via
# process_runnables
# When picking a new target, we prefer the leafs with the least number of
# roots that need loading.
leafs_degree = {}
leafs_per_degree = defaultdict(set)
min_leaf_degree = len(dsk)
max_leaf_degree = len(dsk)
for leaf in leaf_nodes - root_nodes:
degree = len(roots_connected[leaf])
min_leaf_degree = min(min_leaf_degree, degree)
max_leaf_degree = max(max_leaf_degree, degree)
leafs_degree[leaf] = degree
leafs_per_degree[degree].add(leaf)
def get_target() -> Key:
# If we're already mid run and there is a runnable_hull we'll attempt to
# pick the next target in a way that minimizes the number of additional
# root nodes that are needed
all_leafs_accessible = min_leaf_degree == max_leaf_degree
is_trivial_lookup = not reachable_hull or all_leafs_accessible
if not is_trivial_lookup:
candidates = reachable_hull & leafs_per_degree[min_leaf_degree]
if not candidates:
candidates = leafs_per_degree[min_leaf_degree]
# Even without reachable hull overlap this should be relatively
# small so one full pass should be fine
return min(candidates, key=sort_key)
else:
return leaf_nodes_sorted.pop()
def use_longest_path() -> bool:
size = 0
# Heavy reducer / splitter topologies often benefit from a very
# traditional critical path that expresses the longest chain of
# tasks.
if abs(len(root_nodes) - len(leaf_nodes)) / len(root_nodes) < 0.8:
# If the graph stays about the same, we are checking for symmetry
# and choose a "quickest path first" approach if the graph appears
# to be asymmetrical
for r in root_nodes:
if not size:
size = len(leafs_connected[r])
elif size != len(leafs_connected[r]):
return False
return True
# Some topologies benefit if the node with the most dependencies
# is used as first choice, others benefit from the opposite.
longest_path = use_longest_path()
leaf_nodes_sorted = sorted(leaf_nodes, key=sort_key, reverse=not longest_path)
# *************************************************************************
# CORE ALGORITHM STARTS HERE
#
# 0. Nomenclature
#
# - roots: Nodes that have no dependencies (i.e. typically data producers)
# - leafs: Nodes that have no dependents (i.e. user requested keys)
# - critical_path: The strategic path through the graph.
# - walking forwards: Starting from a root node we walk the graph as if we
# were to compute the individual nodes, i.e. along dependents
# - walking backwards: Starting from a leaf node we walk the graph in
# reverse direction, i.e. along dependencies
# - runnable: Nodes that are ready to be computed because all their
# dependencies are in result
# - runnable_hull: Nodes that could be reached and executed without
# "walking back". This typically means that these are tasks than can be
# executed without loading additional data/executing additional root
# nodes
# - reachable_hull: Nodes that are touching the result, i.e. all nodes in
# reachable_hull have at least one dependency in result
#
# A. Build the critical path
#
# To build the critical path we will use a provided `get_target` function
# that returns a node that is anywhere in the graph, typically a leaf
# node. This node is not required to be runnable. We will walk the graph
# backwards, i.e. from leafs to roots and append nodes to the graph as we
# go. The critical path is a
# linear path in the graph. While this is a viable strategy, it is not
# required for the critical path to be a classical "longest path" but it
# can define any route through the graph that should be considered as top
# priority.
#
# 1. Determine the target node by calling ``get_target`` and append the
# target to the critical path stack
# 2. Take the _most valuable_ (max given a `sort_key`) of its dependents
# and append it to the critical path stack. This key is the new target.
# 3. Repeat step 2 until we reach a node that has no dependencies and is
# therefore runnable
#
# B. Walk the critical path
#
# Only the first element of the critical path is an actually runnable node
# and this is where we're starting the sort. Strategically, this is the
# most important goal to achieve but since not all of the nodes are
# immediately runnable we have to walk back and compute other nodes first
# before we can unlock the critical path. This typically requires us also
# to load more data / run more root tasks.
# While walking the critical path we will also unlock non-critical tasks
# that could be run but are not contributing to our strategic goal. Under
# certain circumstances, those runnable tasks are allowed to be run right
# away to reduce memory pressure. This is described in more detail in
# `process_runnable`.
# Given this, the algorithm is as follows:
#
# 1. Pop the first element of the critical path
# 2a. If the node is already in the result, continue
# 2b. If the node is not runnable, we will put it back on the stack and
# put all its dependencies on the stack and continue with step 1. This
# is what we refer to as "walking back"
# 2c. Else, we add the node to the result
# 3. If we previously had to walk back we will consider running
# non-critical tasks (by calling process_runnables)
# 4a. If critical path is not empty, repeat step 1
# 4b. Go back to A.) and build a new critical path given a new target that
# accounts for the already computed nodes.
#
# *************************************************************************
critical_path: list[Key] = []
cpath_append = critical_path.append
scpath_add = scrit_path.add
def path_append(item: Key) -> None:
cpath_append(item)
scpath_add(item)
scpath_update = scrit_path.update
cpath_extend = critical_path.extend
def path_extend(items: Iterable[Key]) -> None:
cpath_extend(items)
scpath_update(items)
cpath_pop = critical_path.pop
scpath_discard = scrit_path.discard
def path_pop() -> Key:
item = cpath_pop()
scpath_discard(item)
return item
while len(result) < expected_len:
crit_path_counter += 1
assert not critical_path
assert not scrit_path
# A. Build the critical path
target = get_target()
next_deps = dependencies[target]
path_append(target)
while next_deps:
item = max(next_deps, key=sort_key)
path_append(item)
next_deps = dependencies[item].difference(result)
path_extend(next_deps)
# B. Walk the critical path
walked_back = False
while critical_path:
item = path_pop()
if item in result:
continue
if num_needed[item]:
path_append(item)
deps = dependencies[item].difference(result)
unknown: list[Key] = []
known: list[Key] = []
k_append = known.append
uk_append = unknown.append
for d in sorted(deps, key=sort_key):
if d in known_runnable_paths:
k_append(d)
else:
uk_append(d)
if len(unknown) > 1:
walked_back = True
for d in unknown:
path_append(d)
for d in known:
for path in known_runnable_paths_pop(d):
path_extend(reversed(path))
del deps
continue
else:
if walked_back and len(runnable) < len(critical_path):
process_runnables()
add_to_result(item)
process_runnables()
assert len(result) == expected_len
for k in external_keys:
del result[k]
return result # type: ignore[return-value]
def _connecting_to_roots(
dependencies: Mapping[Key, set[Key]], dependents: Mapping[Key, set[Key]]
) -> tuple[dict[Key, frozenset[Key]], dict[Key, int]]:
"""Determine for every node which root nodes are connected to it (i.e.
ancestors). If arguments of dependencies and dependents are switched, this
can also be used to determine which leaf nodes are connected to which node
(i.e. descendants).
Also computes a weight that is defined as (cheaper to compute here)
`max(len(dependents[k]) for k in connected_roots[key])`
"""
result = {}
current = []
num_needed = {k: len(v) for k, v in dependencies.items() if v}
max_dependents = {}
roots = set()
for k, v in dependencies.items():
if not v:
# Note: Hashing the full keys is relatively expensive. Hashing
# integers would be much faster so this could be sped up by just
# introducing a counter here. However, the order algorithm is also
# sometimes interested in the actual keys and the only way to
# benefit from the speedup of using integers would be to convert
# this back on demand which makes the code very hard to read.
roots.add(k)
result[k] = frozenset({k})
deps = dependents[k]
max_dependents[k] = len(deps)
for child in deps:
num_needed[child] -= 1
if not num_needed[child]:
current.append(child)
dedup_mapping: dict[frozenset[Key], frozenset[Key]] = {}
while current:
key = current.pop()
if key in result:
continue
for parent in dependents[key]:
num_needed[parent] -= 1
if not num_needed[parent]:
current.append(parent)
# At some point, all the roots are the same, particularly for dense
# graphs. We don't want to create new sets over and over again
transitive_deps = []
transitive_deps_ids = set()
max_dependents_key = list()
for child in dependencies[key]:
r_child = result[child]
if id(r_child) in transitive_deps_ids:
continue
transitive_deps.append(r_child)
transitive_deps_ids.add(id(r_child))
max_dependents_key.append(max_dependents[child])
max_dependents[key] = max(max_dependents_key)
if len(transitive_deps_ids) == 1:
result[key] = transitive_deps[0]
else:
d = transitive_deps[0]
if all(tdeps.issubset(d) for tdeps in transitive_deps[1:]):
result[key] = d
else:
res = set(d)
for tdeps in transitive_deps[1:]:
res.update(tdeps)
# frozenset is unfortunately triggering a copy. In the event of
# a cache hit, this is wasted time but we can't hash the set
# otherwise (unless we did it manually) and can therefore not
# deduplicate without this copy
frozen_res = frozenset(res)
del res, tdeps
try:
result[key] = dedup_mapping[frozen_res]
except KeyError:
dedup_mapping[frozen_res] = frozen_res
result[key] = frozen_res
del dedup_mapping
empty_set: frozenset[Key] = frozenset()
for r in roots:
result[r] = empty_set
return result, max_dependents
def ndependencies(
dependencies: Mapping[Key, set[Key]], dependents: Mapping[Key, set[Key]]
) -> tuple[dict[Key, int], dict[Key, int]]:
"""Number of total data elements on which this key depends
For each key we return the number of tasks that must be run for us to run
this task.
Examples
--------
>>> inc = lambda x: x + 1
>>> dsk = {'a': 1, 'b': (inc, 'a'), 'c': (inc, 'b')}
>>> dependencies, dependents = get_deps(dsk)
>>> num_dependencies, total_dependencies = ndependencies(dependencies, dependents)
>>> sorted(total_dependencies.items())
[('a', 1), ('b', 2), ('c', 3)]
Returns
-------
num_dependencies: Dict[key, int]
total_dependencies: Dict[key, int]
"""
num_needed = {}
result = {}
for k, v in dependencies.items():
num_needed[k] = len(v)
if not v:
result[k] = 1
num_dependencies = num_needed.copy()
current: list[Key] = []
current_pop = current.pop
current_append = current.append
for key in result:
for parent in dependents[key]:
num_needed[parent] -= 1
if not num_needed[parent]:
current_append(parent)
while current:
key = current_pop()
result[key] = 1 + sum(result[child] for child in dependencies[key])
for parent in dependents[key]:
num_needed[parent] -= 1
if not num_needed[parent]:
current_append(parent)
return num_dependencies, result
OrderInfo = namedtuple(
"OrderInfo",
(
"order",
"age",
"num_data_when_run",
"num_data_when_released",
"num_dependencies_freed",
),
)
def diagnostics(
dsk: MutableMapping[Key, Any],
o: Mapping[Key, int] | None = None,
dependencies: MutableMapping[Key, set[Key]] | None = None,
) -> tuple[dict[Key, OrderInfo], list[int]]:
"""Simulate runtime metrics as though running tasks one at a time in order.
These diagnostics can help reveal behaviors of and issues with ``order``.
Returns a dict of `namedtuple("OrderInfo")` and a list of the number of outputs held over time.
OrderInfo fields:
- order : the order in which the node is run.
- age : how long the output of a node is held.
- num_data_when_run : the number of outputs held in memory when a node is run.
- num_data_when_released : the number of outputs held in memory when the output is released.
- num_dependencies_freed : the number of dependencies freed by running the node.
"""
if dependencies is None:
dependencies, dependents = get_deps(dsk)
else:
dependents = reverse_dict(dependencies)
assert dependencies is not None
if o is None:
o = order(dsk, dependencies=dependencies, return_stats=False)
pressure = []
num_in_memory = 0
age = {}
runpressure = {}
releasepressure = {}
freed = {}
num_needed = {key: len(val) for key, val in dependents.items()}
for i, key in enumerate(sorted(dsk, key=o.__getitem__)):
pressure.append(num_in_memory)
runpressure[key] = num_in_memory
released = 0
for dep in dependencies[key]:
num_needed[dep] -= 1
if num_needed[dep] == 0:
age[dep] = i - o[dep]
releasepressure[dep] = num_in_memory
released += 1
freed[key] = released
if dependents[key]:
num_in_memory -= released - 1
else:
age[key] = 0
releasepressure[key] = num_in_memory
num_in_memory -= released
rv = {
key: OrderInfo(
val, age[key], runpressure[key], releasepressure[key], freed[key]
)
for key, val in o.items()
}
return rv, pressure
def _f() -> None: ...
def sanitize_dsk(dsk: MutableMapping[Key, Any]) -> dict:
"""Take a dask graph and replace callables with a dummy function and remove
payload data like numpy arrays, dataframes, etc.
"""
from dask._task_spec import Task, TaskRef
new = {}
deps = DependenciesMapping(dsk)
for key, values in deps.items():
new[key] = Task(key, _f, *(TaskRef(k) for k in values))
return new
|
Order
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.