language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | pyca__cryptography | tests/x509/test_x509_ext.py | {
"start": 122725,
"end": 123661
} | class ____:
def test_sia(self, backend):
cert = _load_cert(
os.path.join("x509", "custom", "sia.pem"),
x509.load_pem_x509_certificate,
)
ext = cert.extensions.get_extension_for_oid(
ExtensionOID.SUBJECT_INFORMATION_ACCESS
)
assert ext is not None
assert ext.critical is False
assert ext.value == x509.SubjectInformationAccess(
[
x509.AccessDescription(
SubjectInformationAccessOID.CA_REPOSITORY,
x509.UniformResourceIdentifier("https://my.ca.issuer/"),
),
x509.AccessDescription(
x509.ObjectIdentifier("2.999.7"),
x509.UniformResourceIdentifier(
"gopher://info-mac-archive"
),
),
]
)
| TestSubjectInformationAccessExtension |
python | pytest-dev__pytest | testing/test_cacheprovider.py | {
"start": 43218,
"end": 43884
} | class ____:
def check_readme(self, pytester: Pytester) -> bool:
config = pytester.parseconfigure()
assert config.cache is not None
readme = config.cache._cachedir.joinpath("README.md")
return readme.is_file()
def test_readme_passed(self, pytester: Pytester) -> None:
pytester.makepyfile("def test_always_passes(): pass")
pytester.runpytest()
assert self.check_readme(pytester) is True
def test_readme_failed(self, pytester: Pytester) -> None:
pytester.makepyfile("def test_always_fails(): assert 0")
pytester.runpytest()
assert self.check_readme(pytester) is True
| TestReadme |
python | xlwings__xlwings | xlwings/constants.py | {
"start": 62136,
"end": 62309
} | class ____:
xlFillWithAll = -4104 # from enum XlFillWith
xlFillWithContents = 2 # from enum XlFillWith
xlFillWithFormats = -4122 # from enum XlFillWith
| FillWith |
python | getsentry__sentry | src/sentry/issues/endpoints/browser_reporting_collector.py | {
"start": 1151,
"end": 1830
} | class ____(serializers.URLField):
"""
A URLField that allows longer URLs than Django's default 2048 character limit.
This is needed for browser reporting where URLs can be very long due to many query parameters.
"""
def __init__(self, **kwargs: Any) -> None:
super().__init__(**kwargs)
# Replace the default URLValidator with our custom one
self.validators = [
LongURLValidator() if isinstance(v, URLValidator) else v for v in self.validators
]
# Working Draft https://www.w3.org/TR/reporting-1/#concept-reports
# Editor's Draft https://w3c.github.io/reporting/#concept-reports
# We need to support both
| LongURLField |
python | numpy__numpy | benchmarks/benchmarks/bench_ufunc_strides.py | {
"start": 4337,
"end": 4499
} | class ____(_AbstractUnary):
params = [
[np.reciprocal, np.absolute, np.square, np.conjugate],
[1, 2, 4], [1, 2, 4], ['F', 'D']
]
| UnaryComplex |
python | patrick-kidger__equinox | equinox/nn/_pool.py | {
"start": 10516,
"end": 12098
} | class ____(Pool):
"""Two-dimensional downsample using the maximum over a sliding window."""
def __init__(
self,
kernel_size: int | Sequence[int],
stride: int | Sequence[int] = 1,
padding: int | Sequence[int] | Sequence[tuple[int, int]] = 0,
use_ceil: bool = False,
):
"""**Arguments:**
- `kernel_size`: The size of the convolutional kernel.
- `stride`: The stride of the convolution.
- `padding`: The amount of padding to apply before and after each
spatial dimension.
- `use_ceil`: If `True`, then `ceil` is used to compute the final output
shape instead of `floor`. For `ceil`, if required, extra padding is added.
Defaults to `False`.
"""
super().__init__(
init=-jnp.inf,
operation=lax.max,
num_spatial_dims=2,
kernel_size=kernel_size,
stride=stride,
padding=padding,
use_ceil=use_ceil,
)
# Redefined to get them in the right order in docs
@named_scope("eqx.nn.MaxPool2d")
def __call__(self, x: Array, *, key: PRNGKeyArray | None = None) -> Array:
"""**Arguments:**
- `x`: The input. Should be a JAX array of shape `(channels, dim_1, dim_2)`.
- `key`: Ignored; provided for compatibility with the rest of the Equinox API.
(Keyword only argument.)
**Returns:**
A JAX array of shape `(channels, new_dim_1, new_dim_2)`.
"""
return super().__call__(x)
| MaxPool2d |
python | allegroai__clearml | clearml/backend_api/services/v2_13/events.py | {
"start": 109396,
"end": 110484
} | class ____(Response):
"""
Response of events.scalar_metrics_iter_histogram endpoint.
:param images:
:type images: Sequence[dict]
"""
_service = "events"
_action = "scalar_metrics_iter_histogram"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {"images": {"items": {"type": "object"}, "type": ["array", "null"]}},
"type": "object",
}
def __init__(self, images: Optional[List[dict]] = None, **kwargs: Any) -> None:
super(ScalarMetricsIterHistogramResponse, self).__init__(**kwargs)
self.images = images
@schema_property("images")
def images(self) -> Optional[List[dict]]:
return self._property_images
@images.setter
def images(self, value: Optional[List[dict]]) -> None:
if value is None:
self._property_images = None
return
self.assert_isinstance(value, "images", (list, tuple))
self.assert_isinstance(value, "images", (dict,), is_array=True)
self._property_images = value
| ScalarMetricsIterHistogramResponse |
python | getsentry__sentry | tests/sentry/notifications/api/endpoints/test_user_notification_settings_options.py | {
"start": 500,
"end": 2551
} | class ____(UserNotificationSettingsOptionsBaseTest):
def setUp(self) -> None:
super().setUp()
self.login_as(self.user)
def test_simple(self) -> None:
other_user = self.create_user()
NotificationSettingOption.objects.create(
user_id=self.user.id,
scope_type=NotificationScopeEnum.ORGANIZATION.value,
scope_identifier=self.organization.id,
type=NotificationSettingEnum.ISSUE_ALERTS.value,
value=NotificationSettingsOptionEnum.ALWAYS.value,
)
NotificationSettingOption.objects.create(
user_id=self.user.id,
scope_type=NotificationScopeEnum.ORGANIZATION.value,
scope_identifier=self.organization.id,
type=NotificationSettingEnum.WORKFLOW.value,
value=NotificationSettingsOptionEnum.ALWAYS.value,
)
NotificationSettingOption.objects.create(
user_id=other_user.id,
scope_type=NotificationScopeEnum.ORGANIZATION.value,
scope_identifier=self.organization.id,
type=NotificationSettingEnum.ISSUE_ALERTS.value,
value=NotificationSettingsOptionEnum.ALWAYS.value,
)
response = self.get_success_response("me", type="alerts").data
assert len(response) == 1
assert response[0]["scopeType"] == "organization"
assert response[0]["scopeIdentifier"] == str(self.organization.id)
assert response[0]["user_id"] == str(self.user.id)
assert response[0]["team_id"] is None
assert response[0]["value"] == "always"
assert response[0]["type"] == "alerts"
response = self.get_success_response("me").data
assert len(response) == 2
def test_invalid_type(self) -> None:
response = self.get_error_response(
"me",
type="invalid",
status_code=status.HTTP_400_BAD_REQUEST,
)
assert response.data["type"] == ["Invalid type"]
@control_silo_test
| UserNotificationSettingsOptionsGetTest |
python | coleifer__peewee | tests/models.py | {
"start": 160743,
"end": 162474
} | class ____(ModelTestCase):
requires = [User, Tweet]
def test_lateral_join(self):
with self.database.atomic():
for i in range(3):
u = User.create(username='u%s' % i)
for j in range(4):
Tweet.create(user=u, content='u%s-t%s' % (i, j))
# GOAL: query users and their 2 most-recent tweets (by ID).
TA = Tweet.alias()
# The "outer loop" will be iterating over the users whose tweets we are
# trying to find.
user_query = (User
.select(User.id, User.username)
.order_by(User.id)
.alias('uq'))
# The inner loop will select tweets and is correlated to the outer loop
# via the WHERE clause. Note that we are using a LIMIT clause.
tweet_query = (TA
.select(TA.id, TA.content)
.where(TA.user == user_query.c.id)
.order_by(TA.id.desc())
.limit(2)
.alias('pq'))
join = NodeList((user_query, SQL('LEFT JOIN LATERAL'), tweet_query,
SQL('ON %s', [True])))
query = (Tweet
.select(user_query.c.username, tweet_query.c.content)
.from_(join)
.dicts())
self.assertEqual([row for row in query], [
{'username': 'u0', 'content': 'u0-t3'},
{'username': 'u0', 'content': 'u0-t2'},
{'username': 'u1', 'content': 'u1-t3'},
{'username': 'u1', 'content': 'u1-t2'},
{'username': 'u2', 'content': 'u2-t3'},
{'username': 'u2', 'content': 'u2-t2'}])
| TestLateralJoin |
python | django__django | django/db/models/expressions.py | {
"start": 60736,
"end": 63274
} | class ____(BaseExpression, Combinable):
"""
An explicit subquery. It may contain OuterRef() references to the outer
query which will be resolved when it is applied to that query.
"""
template = "(%(subquery)s)"
contains_aggregate = False
empty_result_set_value = None
subquery = True
def __init__(self, queryset, output_field=None, **extra):
# Allow the usage of both QuerySet and sql.Query objects.
self.query = getattr(queryset, "query", queryset).clone()
self.query.subquery = True
self.template = extra.pop("template", self.template)
self.extra = extra
super().__init__(output_field)
def get_source_expressions(self):
return [self.query]
def set_source_expressions(self, exprs):
self.query = exprs[0]
def _resolve_output_field(self):
return self.query.output_field
def resolve_expression(self, *args, **kwargs):
resolved = super().resolve_expression(*args, **kwargs)
if type(self) is Subquery and self.template == Subquery.template:
resolved.query.contains_subquery = True
# Subquery is an unnecessary shim for a resolved query as it
# complexifies the lookup's right-hand-side introspection.
try:
self.output_field
except AttributeError:
return resolved.query
if self.output_field and type(self.output_field) is not type(
resolved.query.output_field
):
return ExpressionWrapper(resolved.query, output_field=self.output_field)
return resolved.query
return resolved
def copy(self):
clone = super().copy()
clone.query = clone.query.clone()
return clone
@property
def external_aliases(self):
return self.query.external_aliases
def get_external_cols(self):
return self.query.get_external_cols()
def as_sql(self, compiler, connection, template=None, **extra_context):
connection.ops.check_expression_support(self)
template_params = {**self.extra, **extra_context}
subquery_sql, sql_params = self.query.as_sql(compiler, connection)
template_params["subquery"] = subquery_sql[1:-1]
template = template or template_params.get("template", self.template)
sql = template % template_params
return sql, sql_params
def get_group_by_cols(self):
return self.query.get_group_by_cols(wrapper=self)
| Subquery |
python | django__django | tests/admin_views/models.py | {
"start": 23363,
"end": 23535
} | class ____(models.Model):
choice = models.IntegerField(
blank=True,
null=True,
choices=((1, "Yes"), (0, "No"), (None, "No opinion")),
)
| Choice |
python | facelessuser__pymdown-extensions | pymdownx/blocks/details.py | {
"start": 2329,
"end": 3864
} | class ____(BlocksExtension):
"""Admonition Blocks Extension."""
def __init__(self, *args, **kwargs):
"""Initialize."""
self.config = {
"types": [
[],
"Generate Admonition block extensions for the given types."
]
}
super().__init__(*args, **kwargs)
def extendMarkdownBlocks(self, md, block_mgr):
"""Extend Markdown blocks."""
block_mgr.register(Details, self.getConfigs())
# Generate an details subclass based on the given names.
for obj in self.getConfig('types', []):
if isinstance(obj, dict):
name = obj['name']
class_name = obj.get('class', name)
title = obj.get('title', RE_SEP.sub(' ', class_name).title())
else:
name = obj
class_name = name
title = RE_SEP.sub(' ', class_name).title()
subclass = RE_SEP.sub('', name).title()
block_mgr.register(
type(
subclass,
(Details,),
{
'OPTIONS': {'open': [False, type_boolean]},
'NAME': name,
'DEF_TITLE': title,
'DEF_CLASS': class_name
}
),
{}
)
def makeExtension(*args, **kwargs):
"""Return extension."""
return DetailsExtension(*args, **kwargs)
| DetailsExtension |
python | tiangolo__fastapi | tests/test_validate_response_dataclass.py | {
"start": 247,
"end": 1208
} | class ____:
name: str
price: Optional[float] = None
owner_ids: Optional[List[int]] = None
@app.get("/items/invalid", response_model=Item)
def get_invalid():
return {"name": "invalid", "price": "foo"}
@app.get("/items/innerinvalid", response_model=Item)
def get_innerinvalid():
return {"name": "double invalid", "price": "foo", "owner_ids": ["foo", "bar"]}
@app.get("/items/invalidlist", response_model=List[Item])
def get_invalidlist():
return [
{"name": "foo"},
{"name": "bar", "price": "bar"},
{"name": "baz", "price": "baz"},
]
client = TestClient(app)
def test_invalid():
with pytest.raises(ResponseValidationError):
client.get("/items/invalid")
def test_double_invalid():
with pytest.raises(ResponseValidationError):
client.get("/items/innerinvalid")
def test_invalid_list():
with pytest.raises(ResponseValidationError):
client.get("/items/invalidlist")
| Item |
python | Netflix__metaflow | metaflow/_vendor/packaging/version.py | {
"start": 1734,
"end": 4491
} | class ____:
_key: CmpKey
def __hash__(self) -> int:
return hash(self._key)
# Please keep the duplicated `isinstance` check
# in the six comparisons hereunder
# unless you find a way to avoid adding overhead function calls.
def __lt__(self, other: "_BaseVersion") -> bool:
if not isinstance(other, _BaseVersion):
return NotImplemented
return self._key < other._key
def __le__(self, other: "_BaseVersion") -> bool:
if not isinstance(other, _BaseVersion):
return NotImplemented
return self._key <= other._key
def __eq__(self, other: object) -> bool:
if not isinstance(other, _BaseVersion):
return NotImplemented
return self._key == other._key
def __ge__(self, other: "_BaseVersion") -> bool:
if not isinstance(other, _BaseVersion):
return NotImplemented
return self._key >= other._key
def __gt__(self, other: "_BaseVersion") -> bool:
if not isinstance(other, _BaseVersion):
return NotImplemented
return self._key > other._key
def __ne__(self, other: object) -> bool:
if not isinstance(other, _BaseVersion):
return NotImplemented
return self._key != other._key
# Deliberately not anchored to the start and end of the string, to make it
# easier for 3rd party code to reuse
_VERSION_PATTERN = r"""
v?
(?:
(?:(?P<epoch>[0-9]+)!)? # epoch
(?P<release>[0-9]+(?:\.[0-9]+)*) # release segment
(?P<pre> # pre-release
[-_\.]?
(?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview))
[-_\.]?
(?P<pre_n>[0-9]+)?
)?
(?P<post> # post release
(?:-(?P<post_n1>[0-9]+))
|
(?:
[-_\.]?
(?P<post_l>post|rev|r)
[-_\.]?
(?P<post_n2>[0-9]+)?
)
)?
(?P<dev> # dev release
[-_\.]?
(?P<dev_l>dev)
[-_\.]?
(?P<dev_n>[0-9]+)?
)?
)
(?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version
"""
VERSION_PATTERN = _VERSION_PATTERN
"""
A string containing the regular expression used to match a valid version.
The pattern is not anchored at either end, and is intended for embedding in larger
expressions (for example, matching a version number as part of a file name). The
regular expression should be compiled with the ``re.VERBOSE`` and ``re.IGNORECASE``
flags set.
:meta hide-value:
"""
| _BaseVersion |
python | getsentry__sentry | tests/sentry/sentry_apps/api/endpoints/test_sentry_app_webhook_requests.py | {
"start": 627,
"end": 18373
} | class ____(APITestCase):
def setUp(self) -> None:
self.superuser = self.create_user(email="superuser@example.com", is_superuser=True)
self.user = self.create_user(email="user@example.com")
self.org = self.create_organization(
owner=self.user,
region="us",
slug="test-org",
)
self.project = self.create_project(organization=self.org)
self.event_id = "d5111da2c28645c5889d072017e3445d"
self.published_app = self.create_sentry_app(
name="Published App", organization=self.org, published=True
)
self.unowned_published_app = self.create_sentry_app(
name="Unowned Published App", organization=self.create_organization(), published=True
)
self.unpublished_app = self.create_sentry_app(name="Unpublished App", organization=self.org)
self.unowned_unpublished_app = self.create_sentry_app(
name="Unowned Unpublished App", organization=self.create_organization()
)
self.internal_app = self.create_internal_integration(
name="Internal app", organization=self.org
)
self.create_sentry_app_installation(
organization=self.org, slug=self.published_app.slug, prevent_token_exchange=True
)
self.mock_response = Mock(spec=Response)
self.mock_response.content = '{"content": "mock response content"}'
self.mock_request = Mock()
self.mock_request.body = "mock request body"
self.mock_response.request = self.mock_request
@with_feature("organizations:sentry-app-webhook-requests")
def test_superuser_sees_unowned_published_requests(self) -> None:
self.login_as(user=self.superuser, superuser=True)
buffer = SentryAppWebhookRequestsBuffer(self.unowned_published_app)
buffer.add_request(
response_code=200,
org_id=self.org.id,
event="issue.assigned",
url=self.unowned_published_app.webhook_url,
)
buffer.add_request(
response_code=200,
org_id=self.org.id,
event="issue.assigned",
url=self.unowned_published_app.webhook_url,
)
url = reverse(
"sentry-api-0-sentry-app-webhook-requests", args=[self.unowned_published_app.slug]
)
response = self.client.get(url, format="json")
assert response.status_code == 200
assert len(response.data) == 2
assert response.data[0]["organization"]["slug"] == self.org.slug
assert response.data[0]["sentryAppSlug"] == self.unowned_published_app.slug
assert response.data[0]["responseCode"] == 200
@with_feature("organizations:sentry-app-webhook-requests")
def test_superuser_sees_unpublished_stats(self) -> None:
self.login_as(user=self.superuser, superuser=True)
buffer = SentryAppWebhookRequestsBuffer(self.unowned_unpublished_app)
buffer.add_request(
response_code=200,
org_id=self.org.id,
event="issue.assigned",
url=self.unowned_unpublished_app.webhook_url,
)
url = reverse(
"sentry-api-0-sentry-app-webhook-requests", args=[self.unowned_unpublished_app.slug]
)
response = self.client.get(url, format="json")
assert response.status_code == 200
assert len(response.data) == 1
assert response.data[0]["sentryAppSlug"] == self.unowned_unpublished_app.slug
@with_feature("organizations:sentry-app-webhook-requests")
def test_user_sees_owned_published_requests(self) -> None:
self.login_as(user=self.user)
buffer = SentryAppWebhookRequestsBuffer(self.published_app)
buffer.add_request(
response_code=200,
org_id=self.org.id,
event="issue.assigned",
url=self.published_app.webhook_url,
)
url = reverse("sentry-api-0-sentry-app-webhook-requests", args=[self.published_app.slug])
response = self.client.get(url, format="json")
assert response.status_code == 200
assert len(response.data) == 1
assert response.data[0]["organization"]["slug"] == self.org.slug
assert response.data[0]["sentryAppSlug"] == self.published_app.slug
assert response.data[0]["responseCode"] == 200
@with_feature("organizations:sentry-app-webhook-requests")
def test_user_does_not_see_unowned_published_requests(self) -> None:
self.login_as(user=self.user)
buffer = SentryAppWebhookRequestsBuffer(self.unowned_published_app)
buffer.add_request(
response_code=200,
org_id=self.org.id,
event="issue.assigned",
url=self.unowned_published_app.webhook_url,
)
url = reverse(
"sentry-api-0-sentry-app-webhook-requests", args=[self.unowned_published_app.slug]
)
response = self.client.get(url, format="json")
assert response.status_code == 403
assert response.data["detail"] == "You do not have permission to perform this action."
@with_feature("organizations:sentry-app-webhook-requests")
def test_user_sees_owned_unpublished_requests(self) -> None:
self.login_as(user=self.user)
buffer = SentryAppWebhookRequestsBuffer(self.unpublished_app)
buffer.add_request(
response_code=200,
org_id=self.org.id,
event="issue.assigned",
url=self.unpublished_app.webhook_url,
)
url = reverse("sentry-api-0-sentry-app-webhook-requests", args=[self.unpublished_app.slug])
response = self.client.get(url, format="json")
assert response.status_code == 200
assert len(response.data) == 1
@with_feature("organizations:sentry-app-webhook-requests")
def test_internal_app_requests_does_not_have_organization_field(self) -> None:
self.login_as(user=self.user)
buffer = SentryAppWebhookRequestsBuffer(self.internal_app)
buffer.add_request(
response_code=200,
org_id=self.org.id,
event="issue.assigned",
url=self.internal_app.webhook_url,
)
url = reverse("sentry-api-0-sentry-app-webhook-requests", args=[self.internal_app.slug])
response = self.client.get(url, format="json")
assert response.status_code == 200
assert len(response.data) == 1
assert "organization" not in response.data[0]
assert response.data[0]["sentryAppSlug"] == self.internal_app.slug
assert response.data[0]["responseCode"] == 200
@with_feature("organizations:sentry-app-webhook-requests")
def test_event_type_filter(self) -> None:
self.login_as(user=self.user)
buffer = SentryAppWebhookRequestsBuffer(self.published_app)
buffer.add_request(
response_code=200,
org_id=self.org.id,
event="issue.assigned",
url=self.published_app.webhook_url,
)
buffer.add_request(
response_code=400,
org_id=self.org.id,
event="installation.created",
url=self.published_app.webhook_url,
)
url = reverse("sentry-api-0-sentry-app-webhook-requests", args=[self.published_app.slug])
response1 = self.client.get(f"{url}?eventType=issue.created", format="json")
assert response1.status_code == 200
assert len(response1.data) == 0
response2 = self.client.get(f"{url}?eventType=issue.assigned", format="json")
assert response2.status_code == 200
assert len(response2.data) == 1
assert response2.data[0]["sentryAppSlug"] == self.published_app.slug
assert response2.data[0]["responseCode"] == 200
response3 = self.client.get(f"{url}?eventType=installation.created", format="json")
assert response3.status_code == 200
assert len(response3.data) == 1
assert response3.data[0]["sentryAppSlug"] == self.published_app.slug
assert response3.data[0]["responseCode"] == 400
@with_feature("organizations:sentry-app-webhook-requests")
def test_invalid_event_type(self) -> None:
self.login_as(user=self.user)
url = reverse("sentry-api-0-sentry-app-webhook-requests", args=[self.published_app.slug])
response = self.client.get(f"{url}?eventType=invalid_type", format="json")
assert response.status_code == 400
@with_feature("organizations:sentry-app-webhook-requests")
def test_errors_only_filter(self) -> None:
self.login_as(user=self.user)
buffer = SentryAppWebhookRequestsBuffer(self.published_app)
now = datetime.now()
buffer.add_request(
response_code=200,
org_id=self.org.id,
event="issue.assigned",
url=self.published_app.webhook_url,
)
with freeze_time(now):
buffer.add_request(
response_code=500,
org_id=self.org.id,
event="issue.assigned",
url=self.published_app.webhook_url,
error_id="abc123",
project_id=1,
response=self.mock_response,
headers={
"Content-Type": "application/json",
},
)
url = reverse("sentry-api-0-sentry-app-webhook-requests", args=[self.published_app.slug])
errors_only_response = self.client.get(f"{url}?errorsOnly=true", format="json")
assert errors_only_response.status_code == 200
assert len(errors_only_response.data) == 1
assert errors_only_response.data[0] == {
"webhookUrl": self.published_app.webhook_url,
"sentryAppSlug": self.published_app.slug,
"eventType": "issue.assigned",
"responseCode": 500,
"project_id": 1,
"date": str(now) + "+00:00",
"error_id": "abc123",
"request_body": json.dumps(self.mock_request.body),
"request_headers": {"Content-Type": "application/json"},
"response_body": json.dumps(self.mock_response.content),
"organization": {"name": self.org.name, "id": self.org.id, "slug": self.org.slug},
}
response = self.client.get(url, format="json")
assert response.status_code == 200
assert len(response.data) == 2
@with_feature("organizations:sentry-app-webhook-requests")
def test_linked_error_not_returned_if_project_does_not_exist(self) -> None:
self.login_as(user=self.user)
self.store_event(
data={"event_id": self.event_id, "timestamp": before_now(minutes=1).isoformat()},
project_id=self.project.id,
)
buffer = SentryAppWebhookRequestsBuffer(self.published_app)
buffer.add_request(
response_code=200,
org_id=self.org.id,
event="issue.assigned",
url=self.unpublished_app.webhook_url,
error_id=self.event_id,
project_id=1000,
)
url = reverse("sentry-api-0-sentry-app-webhook-requests", args=[self.published_app.slug])
response = self.client.get(url, format="json")
assert response.status_code == 200
assert len(response.data) == 1
assert response.data[0]["organization"]["slug"] == self.org.slug
assert response.data[0]["sentryAppSlug"] == self.published_app.slug
assert "errorUrl" not in response.data[0]
@with_feature("organizations:sentry-app-webhook-requests")
def test_org_slug_filter(self) -> None:
"""Test that filtering by the qparam organizationSlug properly filters results"""
self.login_as(user=self.user)
buffer = SentryAppWebhookRequestsBuffer(self.published_app)
buffer.add_request(
response_code=200,
org_id=self.org.id,
event="issue.assigned",
url=self.published_app.webhook_url,
)
buffer.add_request(
response_code=200,
org_id=self.org.id,
event="issue.assigned",
url=self.published_app.webhook_url,
)
url = reverse("sentry-api-0-sentry-app-webhook-requests", args=[self.published_app.slug])
made_up_org_response = self.client.get(f"{url}?organizationSlug=madeUpOrg", format="json")
assert made_up_org_response.status_code == 400
assert made_up_org_response.data["detail"] == "Invalid organization."
org_response = self.client.get(f"{url}?organizationSlug={self.org.slug}", format="json")
assert org_response.status_code == 200
assert len(org_response.data) == 2
response = self.client.get(url, format="json")
assert response.status_code == 200
assert len(response.data) == 2
@with_feature("organizations:sentry-app-webhook-requests")
def test_date_filter(self) -> None:
"""Test that filtering by the qparams start and end properly filters results"""
self.login_as(user=self.user)
buffer = SentryAppWebhookRequestsBuffer(self.published_app)
now = datetime.now() - timedelta(hours=1)
buffer.add_request(
response_code=200,
org_id=self.org.id,
event="issue.assigned",
url=self.published_app.webhook_url,
)
with freeze_time(now + timedelta(seconds=1)):
buffer.add_request(
response_code=200,
org_id=self.org.id,
event="issue.assigned",
url=self.published_app.webhook_url,
)
with freeze_time(now + timedelta(seconds=2)):
buffer.add_request(
response_code=200,
org_id=self.org.id,
event="issue.assigned",
url=self.published_app.webhook_url,
)
url = reverse("sentry-api-0-sentry-app-webhook-requests", args=[self.published_app.slug])
response = self.client.get(url, format="json")
assert response.status_code == 200
assert len(response.data) == 3
# test adding a start time
start_date = now.strftime("%Y-%m-%d %H:%M:%S")
start_date_response = self.client.get(f"{url}?start={start_date}", format="json")
assert start_date_response.status_code == 200
assert len(start_date_response.data) == 3
# test adding an end time
end_date = (now + timedelta(seconds=2)).strftime("%Y-%m-%d %H:%M:%S")
end_date_response = self.client.get(f"{url}?end={end_date}", format="json")
assert end_date_response.status_code == 200
assert len(end_date_response.data) == 2
# test adding a start and end time
new_start_date = (now + timedelta(seconds=1)).strftime("%Y-%m-%d %H:%M:%S")
new_end_date = (now + timedelta(seconds=2)).strftime("%Y-%m-%d %H:%M:%S")
start_end_date_response = self.client.get(
f"{url}?start={new_start_date}&end={new_end_date}", format="json"
)
assert start_end_date_response.status_code == 200
assert len(start_end_date_response.data) == 2
# test adding an improperly formatted end time
bad_date_format_response = self.client.get(f"{url}?end=2000-01- 00:00:00", format="json")
assert bad_date_format_response.status_code == 400
# test adding a start and end time
late_start_date = (now + timedelta(seconds=2)).strftime("%Y-%m-%d %H:%M:%S")
early_end_date = (now + timedelta(seconds=1)).strftime("%Y-%m-%d %H:%M:%S")
start_after_end_response = self.client.get(
f"{url}?start={late_start_date}&end={early_end_date}", format="json"
)
assert start_after_end_response.status_code == 400
@with_feature("organizations:sentry-app-webhook-requests")
def test_get_includes_installation_requests(self) -> None:
self.login_as(user=self.user)
buffer = SentryAppWebhookRequestsBuffer(self.published_app)
now = datetime.now() - timedelta(hours=1)
with freeze_time(now):
buffer.add_request(
response_code=200,
org_id=self.org.id,
event="issue.created",
url=self.published_app.webhook_url,
)
with freeze_time(now + timedelta(seconds=1)):
buffer.add_request(
response_code=200,
org_id=self.org.id,
event="installation.created",
url=self.published_app.webhook_url,
)
with freeze_time(now + timedelta(seconds=2)):
buffer.add_request(
response_code=200,
org_id=self.org.id,
event="issue.assigned",
url=self.published_app.webhook_url,
)
with freeze_time(now + timedelta(seconds=3)):
buffer.add_request(
response_code=200,
org_id=self.org.id,
event="installation.deleted",
url=self.published_app.webhook_url,
)
url = reverse("sentry-api-0-sentry-app-webhook-requests", args=[self.published_app.slug])
response = self.client.get(url, format="json")
assert response.status_code == 200
assert len(response.data) == 4
assert response.data[0]["eventType"] == "installation.deleted"
assert response.data[1]["eventType"] == "issue.assigned"
assert response.data[2]["eventType"] == "installation.created"
assert response.data[3]["eventType"] == "issue.created"
| SentryAppWebhookRequestsGetTest |
python | kamyu104__LeetCode-Solutions | Python/concatenate-non-zero-digits-and-multiply-by-sum-i.py | {
"start": 39,
"end": 502
} | class ____(object):
def sumAndMultiply(self, n):
"""
:type n: int
:rtype: int
"""
def reverse(n):
result = 0
while n:
n, r = divmod(n, 10)
result = result*10+r
return result
total = x = 0
while n:
n, r = divmod(n, 10)
total += r
if r:
x = x*10+r
return reverse(x)*total
| Solution |
python | h5py__h5py | h5py/tests/test_dtype.py | {
"start": 13115,
"end": 14487
} | class ____(TestCase):
datetime_units = [
# Dates
'Y', 'M', 'D',
# Times
'h', 'm', 's', 'ms', 'us',
'ns', 'ps', 'fs', 'as',
]
def test_datetime(self):
fname = self.mktemp()
for dt_unit in self.datetime_units:
for dt_order in ['<', '>']:
dt_descr = f'{dt_order}M8[{dt_unit}]'
dt = h5py.opaque_dtype(np.dtype(dt_descr))
arr = np.array([0], dtype=np.int64).view(dtype=dt)
with h5py.File(fname, 'w') as f:
dset = f.create_dataset("default", data=arr, dtype=dt)
self.assertArrayEqual(arr, dset)
self.assertEqual(arr.dtype, dset.dtype)
def test_timedelta(self):
fname = self.mktemp()
for dt_unit in self.datetime_units:
for dt_order in ['<', '>']:
dt_descr = f'{dt_order}m8[{dt_unit}]'
dt = h5py.opaque_dtype(np.dtype(dt_descr))
arr = np.array([np.timedelta64(500, dt_unit)], dtype=dt)
with h5py.File(fname, 'w') as f:
dset = f.create_dataset("default", data=arr, dtype=dt)
self.assertArrayEqual(arr, dset)
self.assertEqual(arr.dtype, dset.dtype)
@ut.skipUnless(tables is not None, 'tables is required')
| TestDateTime |
python | pyqtgraph__pyqtgraph | pyqtgraph/parametertree/ParameterTree.py | {
"start": 184,
"end": 9782
} | class ____(TreeWidget):
"""Widget used to display or control data from a hierarchy of Parameters"""
def __init__(self, parent=None, showHeader=True):
"""
============== ========================================================
**Arguments:**
parent (QWidget) An optional parent widget
showHeader (bool) If True, then the QTreeView header is displayed.
============== ========================================================
"""
TreeWidget.__init__(self, parent)
self.setVerticalScrollMode(self.ScrollMode.ScrollPerPixel)
self.setHorizontalScrollMode(self.ScrollMode.ScrollPerPixel)
self.setAnimated(False)
self.setColumnCount(2)
self.setHeaderLabels(["Parameter", "Value"])
self.paramSet = None
self.header().setSectionResizeMode(QtWidgets.QHeaderView.ResizeMode.ResizeToContents)
self.setHeaderHidden(not showHeader)
self.itemChanged.connect(self.itemChangedEvent)
self.itemExpanded.connect(self.itemExpandedEvent)
self.itemCollapsed.connect(self.itemCollapsedEvent)
self.lastSel = None
self.setRootIsDecorated(False)
self.setAlternatingRowColors(True)
self._updatePalette(self.palette())
def setParameters(self, param, showTop=True):
"""
Set the top-level :class:`Parameter <pyqtgraph.parametertree.Parameter>`
to be displayed in this ParameterTree.
If *showTop* is False, then the top-level parameter is hidden and only
its children will be visible. This is a convenience method equivalent
to::
tree.clear()
tree.addParameters(param, showTop)
"""
self.clear()
self.addParameters(param, showTop=showTop)
def addParameters(self, param, root=None, depth=0, showTop=True):
"""
Adds one top-level :class:`Parameter <pyqtgraph.parametertree.Parameter>`
to the view.
============== ==========================================================
**Arguments:**
param The :class:`Parameter <pyqtgraph.parametertree.Parameter>`
to add.
root The item within the tree to which *param* should be added.
By default, *param* is added as a top-level item.
showTop If False, then *param* will be hidden, and only its
children will be visible in the tree.
============== ==========================================================
"""
item = param.makeTreeItem(depth=depth)
if root is None:
root = self.invisibleRootItem()
## Hide top-level item
if not showTop:
item.setText(0, '')
item.setSizeHint(0, QtCore.QSize(1,1))
item.setSizeHint(1, QtCore.QSize(1,1))
depth -= 1
root.addChild(item)
item.treeWidgetChanged()
for ch in param:
self.addParameters(ch, root=item, depth=depth+1)
def clear(self):
"""
Remove all parameters from the tree.
"""
self.invisibleRootItem().takeChildren()
def focusNext(self, item, forward=True):
"""Give input focus to the next (or previous) item after *item*
"""
while True:
parent = item.parent()
if parent is None:
return
nextItem = self.nextFocusableChild(parent, item, forward=forward)
if nextItem is not None:
nextItem.setFocus()
self.setCurrentItem(nextItem)
return
item = parent
def focusPrevious(self, item):
self.focusNext(item, forward=False)
def nextFocusableChild(self, root, startItem=None, forward=True):
if startItem is None:
if forward:
index = 0
else:
index = root.childCount()-1
else:
if forward:
index = root.indexOfChild(startItem) + 1
else:
index = root.indexOfChild(startItem) - 1
if forward:
inds = list(range(index, root.childCount()))
else:
inds = list(range(index, -1, -1))
for i in inds:
item = root.child(i)
if hasattr(item, 'isFocusable') and item.isFocusable():
return item
else:
item = self.nextFocusableChild(item, forward=forward)
if item is not None:
return item
return None
def contextMenuEvent(self, ev):
item = self.currentItem()
if hasattr(item, 'contextMenuEvent'):
item.contextMenuEvent(ev)
def _updatePalette(self, palette: QtGui.QPalette) -> None:
app = mkQApp()
# Docs say to use the following methods
# QApplication.instance().styleHints().colorScheme() == QtCore.Qt.ColorScheme.Dark
# but on macOS with Qt 6.7 this is giving opposite results (says color sceme is light
# when it is dark and vice versa). This was not observed in the ExampleApp, but was
# observed with the ParameterTree. We fall back to the "legacy" method of determining
# if the color theme is dark or light from QPalette
windowTextLightness = palette.color(QtGui.QPalette.ColorRole.WindowText).lightness()
windowLightness = palette.color(QtGui.QPalette.ColorRole.Window).lightness()
darkMode = windowTextLightness > windowLightness
app.setProperty('darkMode', darkMode)
for group in [
QtGui.QPalette.ColorGroup.Disabled,
QtGui.QPalette.ColorGroup.Active,
QtGui.QPalette.ColorGroup.Inactive
]:
baseColor = palette.color(
group,
QtGui.QPalette.ColorRole.Base
)
if app.property("darkMode"):
alternateColor = baseColor.lighter(180)
else:
alternateColor = baseColor.darker(110)
# apparently colors are transparent here by default!
alternateColor.setAlpha(255)
palette.setColor(
group,
QtGui.QPalette.ColorRole.AlternateBase,
alternateColor
)
self.setPalette(palette)
return None
def event(self, event: QtCore.QEvent) -> bool:
if event.type() == QtCore.QEvent.Type.FontChange:
for item in self.listAllItems():
if isinstance(item, GroupParameterItem):
item.updateDepth(item.depth)
elif event.type() == QtCore.QEvent.Type.ApplicationPaletteChange:
app = mkQApp()
self._updatePalette(app.palette())
elif event.type() == QtCore.QEvent.Type.PaletteChange:
# For Windows to effectively change all the rows we
# need to catch QEvent.Type.PaletteChange event as well
self._updatePalette(self.palette())
return super().event(event)
@QtCore.Slot(QtWidgets.QTreeWidgetItem, int)
def itemChangedEvent(self, item, col):
if hasattr(item, 'columnChangedEvent'):
item.columnChangedEvent(col)
@QtCore.Slot(QtWidgets.QTreeWidgetItem)
@QtCore.Slot(QtWidgets.QTreeWidgetItem, int)
def itemExpandedEvent(self, item):
if hasattr(item, 'expandedChangedEvent'):
item.expandedChangedEvent(True)
@QtCore.Slot(QtWidgets.QTreeWidgetItem)
@QtCore.Slot(QtWidgets.QTreeWidgetItem, int)
def itemCollapsedEvent(self, item):
if hasattr(item, 'expandedChangedEvent'):
item.expandedChangedEvent(False)
def selectionChanged(self, *args):
sel = self.selectedItems()
if len(sel) != 1:
sel = None
if self.lastSel is not None and isinstance(self.lastSel, ParameterItem):
self.lastSel.selected(False)
if sel is None:
self.lastSel = None
return
self.lastSel = sel[0]
if hasattr(sel[0], 'selected'):
sel[0].selected(True)
return super().selectionChanged(*args)
# commented out due to being unreliable
# def wheelEvent(self, ev):
# self.clearSelection()
# return super().wheelEvent(ev)
def sizeHint(self):
w, h = 0, 0
ind = self.indentation()
for x in self.listAllItems():
if x.isHidden():
continue
try:
depth = x.depth
except AttributeError:
depth = 0
s0 = x.sizeHint(0)
s1 = x.sizeHint(1)
w = max(w, depth * ind + max(0, s0.width()) + max(0, s1.width()))
h += max(0, s0.height(), s1.height())
# typ = x.param.opts['type'] if isinstance(x, ParameterItem) else x
# print(typ, depth * ind, (s0.width(), s0.height()), (s1.width(), s1.height()), (w, h))
# todo: find out if this alternative can be made to work (currently fails when color or colormap are present)
# print('custom', (w, h))
# w = self.sizeHintForColumn(0) + self.sizeHintForColumn(1)
# h = self.viewportSizeHint().height()
# print('alternative', (w, h))
if not self.header().isHidden():
h += self.header().height()
return QtCore.QSize(w, h)
| ParameterTree |
python | lazyprogrammer__machine_learning_examples | rl2/mountaincar/q_learning.py | {
"start": 1325,
"end": 2489
} | class ____:
def __init__(self, env, n_components=500):
observation_examples = np.array([env.observation_space.sample() for x in range(10000)])
scaler = StandardScaler()
scaler.fit(observation_examples)
# Used to converte a state to a featurizes represenation.
# We use RBF kernels with different variances to cover different parts of the space
featurizer = FeatureUnion([
("rbf1", RBFSampler(gamma=5.0, n_components=n_components)),
("rbf2", RBFSampler(gamma=2.0, n_components=n_components)),
("rbf3", RBFSampler(gamma=1.0, n_components=n_components)),
("rbf4", RBFSampler(gamma=0.5, n_components=n_components))
])
example_features = featurizer.fit_transform(scaler.transform(observation_examples))
self.dimensions = example_features.shape[1]
self.scaler = scaler
self.featurizer = featurizer
def transform(self, observations):
# print "observations:", observations
scaled = self.scaler.transform(observations)
# assert(len(scaled.shape) == 2)
return self.featurizer.transform(scaled)
# Holds one SGDRegressor for each action
| FeatureTransformer |
python | gevent__gevent | src/gevent/tests/test__pywsgi.py | {
"start": 69068,
"end": 69433
} | class ____(TestCase):
@staticmethod
def application(env, start_response):
raise AssertionError('should not get there')
def test(self):
longline = 'x' * 20000
with self.makefile() as fd:
fd.write(('''GET /%s HTTP/1.0\r\nHello: world\r\n\r\n''' % longline).encode('latin-1'))
read_http(fd, code=414)
| Test414 |
python | ray-project__ray | python/ray/llm/_internal/batch/benchmark/dataset.py | {
"start": 236,
"end": 1649
} | class ____(ABC):
DEFAULT_RANDOM_SEED = 0
def __init__(
self,
dataset_path: Optional[str] = None,
random_seed: int = DEFAULT_RANDOM_SEED,
) -> None:
"""
Abstract base class for benchmark datasets.
All benchmark datasets should inherit from this class and implement
the required abstract methods.
Args:
dataset_path: The path to the dataset on disk.
random_seed: The seed for the random number generator.
"""
self._dataset_path = dataset_path
self._random_seed = random_seed
@abstractmethod
def load_data(self) -> None:
"""
Load data from the dataset source into memory.
Raises:
NotImplementedError: If the method is not implemented in subclasses.
"""
raise NotImplementedError("load_data must be implemented in subclasses.")
@abstractmethod
def sample(self, num_requests: int) -> List[Dict]:
"""
Sample prompts from the loaded dataset.
Args:
num_requests: The number of prompts to sample from the dataset.
Returns:
A list of sampled request dictionaries.
Raises:
NotImplementedError: If the method is not implemented in subclasses.
"""
raise NotImplementedError("sample must be implemented in subclasses.")
| BenchmarkDataset |
python | getsentry__sentry | src/sentry/users/api/serializers/user_identity_config.py | {
"start": 4137,
"end": 4512
} | class ____(TypedDict):
id: str
category: str
provider: UserIdentityProviderSerializerResponse
name: str
status: str
isLogin: bool
organization: ControlSiloOrganizationSerializerResponse
dateAdded: datetime | None
dateVerified: datetime | None
dateSynced: datetime | None
@register(UserIdentityConfig)
| UserIdentityConfigSerializerResponse |
python | ray-project__ray | release/train_tests/benchmark/config.py | {
"start": 1565,
"end": 4707
} | class ____(BaseModel):
# ScalingConfig
num_workers: int = 1
# Run CPU training where train workers request a `MOCK_GPU` resource instead.
mock_gpu: bool = False
# FailureConfig
max_failures: int = 0
task: str = "image_classification"
task_config: TaskConfig = Field(
default_factory=lambda: TaskConfig(),
)
# Data
dataloader_type: DataloaderType = DataloaderType.RAY_DATA
dataloader_config: DataLoaderConfig = Field(
default_factory=lambda: DataLoaderConfig(),
)
# Training
num_epochs: int = 1
skip_train_step: bool = False
# Checkpointing
checkpoint_every_n_steps: int = -1
# Validation
validate_every_n_steps: int = -1
skip_validation_step: bool = False
skip_validation_at_epoch_end: bool = False
# Logging
log_metrics_every_n_steps: int = 512
def _is_pydantic_model(field_type) -> bool:
"""Check if a type is a subclass of Pydantic's BaseModel."""
return isinstance(field_type, type) and issubclass(field_type, BaseModel)
def _add_field_to_parser(parser: argparse.ArgumentParser, field: str, field_info):
field_type = field_info.annotation
if field_type is bool:
parser.add_argument(
f"--{field}",
action="store_true",
help=f"Enable {field} (default: {field_info.default})",
)
else:
parser.add_argument(f"--{field}", type=field_type, default=field_info.default)
def cli_to_config(benchmark_config_cls=BenchmarkConfig) -> BenchmarkConfig:
parser = argparse.ArgumentParser()
nested_fields = []
for field, field_info in benchmark_config_cls.model_fields.items():
# Skip nested configs for now
if _is_pydantic_model(field_info.annotation):
nested_fields.append(field)
continue
_add_field_to_parser(parser, field, field_info)
top_level_args, _ = parser.parse_known_args()
# Handle nested configs that depend on top-level args
nested_configs = {}
for nested_field in nested_fields:
nested_parser = argparse.ArgumentParser()
nested_config_cls = benchmark_config_cls.model_fields[nested_field].annotation
if nested_config_cls == DataLoaderConfig:
if top_level_args.dataloader_type == DataloaderType.RAY_DATA:
nested_config_cls = RayDataConfig
elif top_level_args.dataloader_type == DataloaderType.TORCH:
nested_config_cls = TorchConfig
if nested_config_cls == TaskConfig:
if top_level_args.task == ImageClassificationConfig.TASK_NAME:
nested_config_cls = ImageClassificationConfig
elif top_level_args.task == RecsysConfig.TASK_NAME:
nested_config_cls = RecsysConfig
for field, field_info in nested_config_cls.model_fields.items():
_add_field_to_parser(nested_parser, field, field_info)
args, _ = nested_parser.parse_known_args()
nested_configs[nested_field] = nested_config_cls(**vars(args))
return benchmark_config_cls(**vars(top_level_args), **nested_configs)
| BenchmarkConfig |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/descriptor3.py | {
"start": 523,
"end": 579
} | class ____:
not_working: Desc1[int] = func1(list)
| ClassA |
python | langchain-ai__langchain | libs/langchain/langchain_classic/evaluation/schema.py | {
"start": 9892,
"end": 13917
} | class ____(_EvalArgsMixin, ABC):
"""Compare the output of two models (or two outputs of the same model)."""
@abstractmethod
def _evaluate_string_pairs(
self,
*,
prediction: str,
prediction_b: str,
reference: str | None = None,
input: str | None = None, # noqa: A002
**kwargs: Any,
) -> dict:
"""Evaluate the output string pairs.
Args:
prediction: The output string from the first model.
prediction_b: The output string from the second model.
reference: The expected output / reference string.
input: The input string.
**kwargs: Additional keyword arguments, such as callbacks and optional reference strings.
Returns:
`dict` containing the preference, scores, and/or other information.
""" # noqa: E501
async def _aevaluate_string_pairs(
self,
*,
prediction: str,
prediction_b: str,
reference: str | None = None,
input: str | None = None, # noqa: A002
**kwargs: Any,
) -> dict:
"""Asynchronously evaluate the output string pairs.
Args:
prediction: The output string from the first model.
prediction_b: The output string from the second model.
reference: The expected output / reference string.
input: The input string.
**kwargs: Additional keyword arguments, such as callbacks and optional reference strings.
Returns:
`dict` containing the preference, scores, and/or other information.
""" # noqa: E501
return await run_in_executor(
None,
self._evaluate_string_pairs,
prediction=prediction,
prediction_b=prediction_b,
reference=reference,
input=input,
**kwargs,
)
def evaluate_string_pairs(
self,
*,
prediction: str,
prediction_b: str,
reference: str | None = None,
input: str | None = None, # noqa: A002
**kwargs: Any,
) -> dict:
"""Evaluate the output string pairs.
Args:
prediction: The output string from the first model.
prediction_b: The output string from the second model.
reference: The expected output / reference string.
input: The input string.
**kwargs: Additional keyword arguments, such as callbacks and optional reference strings.
Returns:
`dict` containing the preference, scores, and/or other information.
""" # noqa: E501
self._check_evaluation_args(reference=reference, input_=input)
return self._evaluate_string_pairs(
prediction=prediction,
prediction_b=prediction_b,
reference=reference,
input=input,
**kwargs,
)
async def aevaluate_string_pairs(
self,
*,
prediction: str,
prediction_b: str,
reference: str | None = None,
input: str | None = None, # noqa: A002
**kwargs: Any,
) -> dict:
"""Asynchronously evaluate the output string pairs.
Args:
prediction: The output string from the first model.
prediction_b: The output string from the second model.
reference: The expected output / reference string.
input: The input string.
**kwargs: Additional keyword arguments, such as callbacks and optional reference strings.
Returns:
`dict` containing the preference, scores, and/or other information.
""" # noqa: E501
self._check_evaluation_args(reference=reference, input_=input)
return await self._aevaluate_string_pairs(
prediction=prediction,
prediction_b=prediction_b,
reference=reference,
input=input,
**kwargs,
)
| PairwiseStringEvaluator |
python | run-llama__llama_index | llama-index-core/llama_index/core/base/llms/types.py | {
"start": 16467,
"end": 20318
} | class ____(BaseModel):
"""Chat message."""
role: MessageRole = MessageRole.USER
additional_kwargs: dict[str, Any] = Field(default_factory=dict)
blocks: list[ContentBlock] = Field(default_factory=list)
def __init__(self, /, content: Any | None = None, **data: Any) -> None:
"""
Keeps backward compatibility with the old `content` field.
If content was passed and contained text, store a single TextBlock.
If content was passed and it was a list, assume it's a list of content blocks and store it.
"""
if content is not None:
if isinstance(content, str):
data["blocks"] = [TextBlock(text=content)]
elif isinstance(content, list):
data["blocks"] = content
super().__init__(**data)
@model_validator(mode="after")
def legacy_additional_kwargs_image(self) -> Self:
"""
Provided for backward compatibility.
If `additional_kwargs` contains an `images` key, assume the value is a list
of ImageDocument and convert them into image blocks.
"""
if documents := self.additional_kwargs.get("images"):
documents = cast(list[ImageDocument], documents)
for doc in documents:
img_base64_bytes = doc.resolve_image(as_base64=True).read()
self.blocks.append(ImageBlock(image=img_base64_bytes))
return self
@property
def content(self) -> str | None:
"""
Keeps backward compatibility with the old `content` field.
Returns:
The cumulative content of the TextBlock blocks, None if there are none.
"""
content_strs = []
for block in self.blocks:
if isinstance(block, TextBlock):
content_strs.append(block.text)
ct = "\n".join(content_strs) or None
if ct is None and len(content_strs) == 1:
return ""
return ct
@content.setter
def content(self, content: str) -> None:
"""
Keeps backward compatibility with the old `content` field.
Raises:
ValueError: if blocks contains more than a block, or a block that's not TextBlock.
"""
if not self.blocks:
self.blocks = [TextBlock(text=content)]
elif len(self.blocks) == 1 and isinstance(self.blocks[0], TextBlock):
self.blocks = [TextBlock(text=content)]
else:
raise ValueError(
"ChatMessage contains multiple blocks, use 'ChatMessage.blocks' instead."
)
def __str__(self) -> str:
return f"{self.role.value}: {self.content}"
@classmethod
def from_str(
cls,
content: str,
role: Union[MessageRole, str] = MessageRole.USER,
**kwargs: Any,
) -> Self:
if isinstance(role, str):
role = MessageRole(role)
return cls(role=role, blocks=[TextBlock(text=content)], **kwargs)
def _recursive_serialization(self, value: Any) -> Any:
if isinstance(value, BaseModel):
value.model_rebuild() # ensures all fields are initialized and serializable
return value.model_dump() # type: ignore
if isinstance(value, dict):
return {
key: self._recursive_serialization(value)
for key, value in value.items()
}
if isinstance(value, list):
return [self._recursive_serialization(item) for item in value]
if isinstance(value, bytes):
return base64.b64encode(value).decode("utf-8")
return value
@field_serializer("additional_kwargs", check_fields=False)
def serialize_additional_kwargs(self, value: Any, _info: Any) -> Any:
return self._recursive_serialization(value)
| ChatMessage |
python | sqlalchemy__sqlalchemy | test/typing/plain_files/orm/dataclass_transforms_decorator_w_mixins.py | {
"start": 303,
"end": 377
} | class ____:
pass
@unmapped_dataclass(init=False, kw_only=True)
| DataModel |
python | doocs__leetcode | solution/3200-3299/3294.Convert Doubly Linked List to Array II/Solution.py | {
"start": 171,
"end": 419
} | class ____:
def toArray(self, node: "Optional[Node]") -> List[int]:
while node.prev:
node = node.prev
ans = []
while node:
ans.append(node.val)
node = node.next
return ans
| Solution |
python | readthedocs__readthedocs.org | readthedocs/api/v3/mixins.py | {
"start": 7041,
"end": 7505
} | class ____(NestedParentObjectMixin):
"""
Mixin to define queryset permissions for ViewSet only in one place.
All APIv3 user' ViewSet should inherit this mixin, unless specific permissions
required. In that case, a specific mixin for that case should be defined.
"""
def has_admin_permission(self, requesting_user, accessing_user):
if requesting_user == accessing_user:
return True
return False
| UserQuerySetMixin |
python | fastai__fastai | fastai/callback/wandb.py | {
"start": 640,
"end": 15040
} | class ____(Callback):
"Saves model topology, losses & metrics"
remove_on_fetch,order = True,Recorder.order+1
# Record if watch has been called previously (even in another instance)
_wandb_watch_called = False
def __init__(self,
log:str=None, # What to log (can be `gradients`, `parameters`, `all` or None)
log_preds:bool=True, # Whether to log model predictions on a `wandb.Table`
log_preds_every_epoch:bool=False, # Whether to log predictions every epoch or at the end
log_model:bool=False, # Whether to save the model checkpoint to a `wandb.Artifact`
model_name:str=None, # The name of the `model_name` to save, overrides `SaveModelCallback`
log_dataset:bool=False, # Whether to log the dataset to a `wandb.Artifact`
dataset_name:str=None, # A name to log the dataset with
valid_dl:TfmdDL=None, # If `log_preds=True`, then the samples will be drawn from `valid_dl`
n_preds:int=36, # How many samples to log predictions
seed:int=12345, # The seed of the samples drawn
reorder=True):
store_attr()
def after_create(self):
# log model
if self.log_model:
if not hasattr(self, 'save_model'):
# does not have the SaveModelCallback
self.learn.add_cb(SaveModelCallback(fname=ifnone(self.model_name, 'model')))
else:
# override SaveModelCallback
if self.model_name is not None:
self.save_model.fname = self.model_name
def before_fit(self):
"Call watch method to log model topology, gradients & weights"
# Check if wandb.init has been called
if wandb.run is None:
raise ValueError('You must call wandb.init() before WandbCallback()')
# W&B log step
self._wandb_step = wandb.run.step - 1 # -1 except if the run has previously logged data (incremented at each batch)
self._wandb_epoch = 0 if not(wandb.run.step) else math.ceil(wandb.run.summary['epoch']) # continue to next epoch
self.run = not hasattr(self.learn, 'lr_finder') and not hasattr(self, "gather_preds") and rank_distrib()==0
if not self.run: return
# Log config parameters
log_config = self.learn.gather_args()
_format_config(log_config)
try:
wandb.config.update(log_config, allow_val_change=True)
except Exception as e:
print(f'WandbCallback could not log config parameters -> {e}')
if not WandbCallback._wandb_watch_called:
WandbCallback._wandb_watch_called = True
# Logs model topology and optionally gradients and weights
if self.log is not None:
wandb.watch(self.learn.model, log=self.log)
# log dataset
assert isinstance(self.log_dataset, (str, Path, bool)), 'log_dataset must be a path or a boolean'
if self.log_dataset is True:
if Path(self.dls.path) == Path('.'):
print('WandbCallback could not retrieve the dataset path, please provide it explicitly to "log_dataset"')
self.log_dataset = False
else:
self.log_dataset = self.dls.path
if self.log_dataset:
self.log_dataset = Path(self.log_dataset)
assert self.log_dataset.is_dir(), f'log_dataset must be a valid directory: {self.log_dataset}'
metadata = {'path relative to learner': os.path.relpath(self.log_dataset, self.learn.path)}
log_dataset(path=self.log_dataset, name=self.dataset_name, metadata=metadata)
if self.log_preds:
try:
if not self.valid_dl:
#Initializes the batch watched
wandbRandom = random.Random(self.seed) # For repeatability
self.n_preds = min(self.n_preds, len(self.dls.valid_ds))
idxs = wandbRandom.sample(range(len(self.dls.valid_ds)), self.n_preds)
if isinstance(self.dls, TabularDataLoaders):
test_items = getattr(self.dls.valid_ds.items, 'iloc', self.dls.valid_ds.items)[idxs]
self.valid_dl = self.dls.test_dl(test_items, with_labels=True, process=False)
else:
test_items = [getattr(self.dls.valid_ds.items, 'iloc', self.dls.valid_ds.items)[i] for i in idxs]
self.valid_dl = self.dls.test_dl(test_items, with_labels=True)
self.learn.add_cb(FetchPredsCallback(dl=self.valid_dl, with_input=True, with_decoded=True, reorder=self.reorder))
except Exception as e:
self.log_preds = False
print(f'WandbCallback was not able to prepare a DataLoader for logging prediction samples -> {e}')
def before_batch(self):
self.ti_batch = time.perf_counter()
def after_batch(self):
"Log hyper-parameters and training loss"
if self.training:
batch_time = time.perf_counter() - self.ti_batch
self._wandb_step += 1
self._wandb_epoch += 1/self.n_iter
hypers = {f'{k}_{i}':v for i,h in enumerate(self.opt.hypers) for k,v in h.items()}
wandb.log({'epoch': self._wandb_epoch, 'train_loss': self.smooth_loss, 'raw_loss': self.loss, **hypers}, step=self._wandb_step)
wandb.log({'train_samples_per_sec': len(self.xb[0]) / batch_time}, step=self._wandb_step)
def log_predictions(self):
try:
inp,preds,targs,out = self.learn.fetch_preds.preds
b = tuplify(inp) + tuplify(targs)
x,y,its,outs = self.valid_dl.show_results(b, out, show=False, max_n=self.n_preds)
wandb.log(wandb_process(x, y, its, outs, preds), step=self._wandb_step)
except Exception as e:
self.log_preds = False
self.remove_cb(FetchPredsCallback)
print(f'WandbCallback was not able to get prediction samples -> {e}')
def after_epoch(self):
"Log validation loss and custom metrics & log prediction samples"
# Correct any epoch rounding error and overwrite value
self._wandb_epoch = round(self._wandb_epoch)
if self.log_preds and self.log_preds_every_epoch:
self.log_predictions()
wandb.log({'epoch': self._wandb_epoch}, step=self._wandb_step)
wandb.log({n:s for n,s in zip(self.recorder.metric_names, self.recorder.log) if n not in ['train_loss', 'epoch', 'time']}, step=self._wandb_step)
def after_fit(self):
if self.log_preds and not self.log_preds_every_epoch:
self.log_predictions()
if self.log_model:
if self.save_model.last_saved_path is None:
print('WandbCallback could not retrieve a model to upload')
else:
metadata = {n:s for n,s in zip(self.recorder.metric_names, self.recorder.log) if n not in ['train_loss', 'epoch', 'time']}
log_model(self.save_model.last_saved_path, name=self.save_model.fname, metadata=metadata)
self.run = True
if self.log_preds: self.remove_cb(FetchPredsCallback)
wandb.log({}) # ensure sync of last step
self._wandb_step += 1
# %% ../../nbs/70_callback.wandb.ipynb 11
@patch
def gather_args(self:Learner):
"Gather config parameters accessible to the learner"
# args stored by `store_attr`
cb_args = {f'{cb}':getattr(cb,'__stored_args__',True) for cb in self.cbs}
args = {'Learner':self, **cb_args}
# input dimensions
try:
n_inp = self.dls.train.n_inp
args['n_inp'] = n_inp
xb = self.dls.valid.one_batch()[:n_inp]
args.update({f'input {n+1} dim {i+1}':d for n in range(n_inp) for i,d in enumerate(list(detuplify(xb[n]).shape))})
except: print(f'Could not gather input dimensions')
# other useful information
with ignore_exceptions():
args['batch size'] = self.dls.bs
args['batch per epoch'] = len(self.dls.train)
args['model parameters'] = total_params(self.model)[0]
args['device'] = self.dls.device.type
args['frozen'] = bool(self.opt.frozen_idx)
args['frozen idx'] = self.opt.frozen_idx
args['dataset.tfms'] = f'{self.dls.dataset.tfms}'
args['dls.after_item'] = f'{self.dls.after_item}'
args['dls.before_batch'] = f'{self.dls.before_batch}'
args['dls.after_batch'] = f'{self.dls.after_batch}'
return args
# %% ../../nbs/70_callback.wandb.ipynb 13
def _make_plt(img):
"Make plot to image resolution"
# from https://stackoverflow.com/a/13714915
my_dpi = 100
fig = plt.figure(frameon=False, dpi=my_dpi)
h, w = img.shape[:2]
fig.set_size_inches(w / my_dpi, h / my_dpi)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
return fig, ax
# %% ../../nbs/70_callback.wandb.ipynb 14
def _format_config_value(v):
if isinstance(v, list):
return [_format_config_value(item) for item in v]
elif hasattr(v, '__stored_args__'):
return {**_format_config(v.__stored_args__), '_name': v}
return v
# %% ../../nbs/70_callback.wandb.ipynb 15
def _format_config(config):
"Format config parameters before logging them"
for k,v in config.items():
if isinstance(v, dict):
config[k] = _format_config(v)
else:
config[k] = _format_config_value(v)
return config
# %% ../../nbs/70_callback.wandb.ipynb 16
def _format_metadata(metadata):
"Format metadata associated to artifacts"
for k,v in metadata.items(): metadata[k] = str(v)
# %% ../../nbs/70_callback.wandb.ipynb 17
def log_dataset(path, name=None, metadata={}, description='raw dataset'):
"Log dataset folder"
# Check if wandb.init has been called in case datasets are logged manually
if wandb.run is None:
raise ValueError('You must call wandb.init() before log_dataset()')
path = Path(path)
if not path.is_dir():
raise f'path must be a valid directory: {path}'
name = ifnone(name, path.name)
_format_metadata(metadata)
artifact_dataset = wandb.Artifact(name=name, type='dataset', metadata=metadata, description=description)
# log everything except "models" folder
for p in path.ls():
if p.is_dir():
if p.name != 'models': artifact_dataset.add_dir(str(p.resolve()), name=p.name)
else: artifact_dataset.add_file(str(p.resolve()))
wandb.run.use_artifact(artifact_dataset)
# %% ../../nbs/70_callback.wandb.ipynb 19
def log_model(path, name=None, metadata={}, description='trained model'):
"Log model file"
if wandb.run is None:
raise ValueError('You must call wandb.init() before log_model()')
path = Path(path)
if not path.is_file():
raise f'path must be a valid file: {path}'
name = ifnone(name, f'run-{wandb.run.id}-model')
_format_metadata(metadata)
artifact_model = wandb.Artifact(name=name, type='model', metadata=metadata, description=description)
with artifact_model.new_file(str(Path(name).with_suffix(".pth")), mode='wb') as fa:
fa.write(path.read_bytes())
wandb.run.log_artifact(artifact_model)
# %% ../../nbs/70_callback.wandb.ipynb 21
@dispatch
def wandb_process(x:TensorImage, y, samples, outs, preds):
"Process `sample` and `out` depending on the type of `x/y`"
res_input, res_pred, res_label = [],[],[]
for s,o in zip(samples, outs):
img = s[0].permute(1,2,0)
res_input.append(wandb.Image(img, caption='Input_data'))
for t, capt, res in ((o[0], "Prediction", res_pred), (s[1], "Ground_Truth", res_label)):
fig, ax = _make_plt(img)
# Superimpose label or prediction to input image
ax = img.show(ctx=ax)
ax = t.show(ctx=ax)
res.append(wandb.Image(fig, caption=capt))
plt.close(fig)
return {"Inputs":res_input, "Predictions":res_pred, "Ground_Truth":res_label}
# %% ../../nbs/70_callback.wandb.ipynb 22
def _unlist(l):
"get element of lists of lenght 1"
if isinstance(l, (list, tuple)):
if len(l) == 1: return l[0]
else: return l
# %% ../../nbs/70_callback.wandb.ipynb 23
@dispatch
def wandb_process(x:TensorImage, y:TensorCategory|TensorMultiCategory, samples, outs, preds):
table = wandb.Table(columns=["Input image", "Ground_Truth", "Predictions"])
for (image, label), pred_label in zip(samples,outs):
table.add_data(wandb.Image(image.permute(1,2,0)), label, _unlist(pred_label))
return {"Prediction_Samples": table}
# %% ../../nbs/70_callback.wandb.ipynb 24
@dispatch
def wandb_process(x:TensorImage, y:TensorMask, samples, outs, preds):
res = []
codes = getattr(outs[0][0], 'codes', None)
if codes is not None:
class_labels = [{'name': name, 'id': id} for id, name in enumerate(codes)]
else:
class_labels = [{'name': i, 'id': i} for i in range(preds.shape[1])]
table = wandb.Table(columns=["Input Image", "Ground_Truth", "Predictions"])
for (image, label), pred_label in zip(samples, outs):
img = image.permute(1,2,0)
table.add_data(wandb.Image(img),
wandb.Image(img, masks={"Ground_Truth": {'mask_data': label.numpy().astype(np.uint8)}}, classes=class_labels),
wandb.Image(img, masks={"Prediction": {'mask_data': pred_label[0].numpy().astype(np.uint8)}}, classes=class_labels)
)
return {"Prediction_Samples": table}
# %% ../../nbs/70_callback.wandb.ipynb 25
@dispatch
def wandb_process(x:TensorText, y:TensorCategory|TensorMultiCategory, samples, outs, preds):
data = [[s[0], s[1], o[0]] for s,o in zip(samples,outs)]
return {"Prediction_Samples": wandb.Table(data=data, columns=["Text", "Target", "Prediction"])}
# %% ../../nbs/70_callback.wandb.ipynb 26
@dispatch
def wandb_process(x:Tabular, y:Tabular, samples, outs, preds):
df = x.all_cols
for n in x.y_names: df[n+'_pred'] = y[n].values
return {"Prediction_Samples": wandb.Table(dataframe=df)}
# %% ../../nbs/70_callback.wandb.ipynb 31
_all_ = ['wandb_process']
| WandbCallback |
python | kubernetes-client__python | kubernetes/client/models/v1_object_field_selector.py | {
"start": 383,
"end": 4735
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'field_path': 'str'
}
attribute_map = {
'api_version': 'apiVersion',
'field_path': 'fieldPath'
}
def __init__(self, api_version=None, field_path=None, local_vars_configuration=None): # noqa: E501
"""V1ObjectFieldSelector - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._field_path = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.field_path = field_path
@property
def api_version(self):
"""Gets the api_version of this V1ObjectFieldSelector. # noqa: E501
Version of the schema the FieldPath is written in terms of, defaults to \"v1\". # noqa: E501
:return: The api_version of this V1ObjectFieldSelector. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1ObjectFieldSelector.
Version of the schema the FieldPath is written in terms of, defaults to \"v1\". # noqa: E501
:param api_version: The api_version of this V1ObjectFieldSelector. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def field_path(self):
"""Gets the field_path of this V1ObjectFieldSelector. # noqa: E501
Path of the field to select in the specified API version. # noqa: E501
:return: The field_path of this V1ObjectFieldSelector. # noqa: E501
:rtype: str
"""
return self._field_path
@field_path.setter
def field_path(self, field_path):
"""Sets the field_path of this V1ObjectFieldSelector.
Path of the field to select in the specified API version. # noqa: E501
:param field_path: The field_path of this V1ObjectFieldSelector. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and field_path is None: # noqa: E501
raise ValueError("Invalid value for `field_path`, must not be `None`") # noqa: E501
self._field_path = field_path
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1ObjectFieldSelector):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1ObjectFieldSelector):
return True
return self.to_dict() != other.to_dict()
| V1ObjectFieldSelector |
python | doocs__leetcode | solution/0500-0599/0508.Most Frequent Subtree Sum/Solution.py | {
"start": 192,
"end": 643
} | class ____:
def findFrequentTreeSum(self, root: Optional[TreeNode]) -> List[int]:
def dfs(root: Optional[TreeNode]) -> int:
if root is None:
return 0
l, r = dfs(root.left), dfs(root.right)
s = l + r + root.val
cnt[s] += 1
return s
cnt = Counter()
dfs(root)
mx = max(cnt.values())
return [k for k, v in cnt.items() if v == mx]
| Solution |
python | PrefectHQ__prefect | src/integrations/prefect-gcp/prefect_gcp/experimental/bundles/upload.py | {
"start": 240,
"end": 3064
} | class ____(TypedDict):
"""
The output of the `upload_bundle_to_gcs` step.
"""
bucket: str
key: str
def upload_bundle_to_gcs(
local_filepath: Path,
bucket: str,
key: str,
gcp_credentials_block_name: str | None = None,
) -> UploadBundleToGcsOutput:
"""
Uploads a bundle file to a GCS bucket.
Args:
local_filepath: The path to the bundle file to upload.
bucket: The name of the GCS bucket to upload the bundle to.
key: The key (path) to upload the bundle to in the GCS bucket.
gcp_credentials_block_name: The name of the GCP credentials block to use.
Returns:
A dictionary containing the bucket and key of the uploaded bundle.
"""
if not local_filepath.exists():
raise ValueError(f"Bundle file not found: {local_filepath}")
key = key or local_filepath.name
if gcp_credentials_block_name:
logger.debug(
"Loading GCP credentials from block %s", gcp_credentials_block_name
)
gcp_credentials = cast(
prefect_gcp.credentials.GcpCredentials,
prefect_gcp.credentials.GcpCredentials.load(
gcp_credentials_block_name,
_sync=True, # pyright: ignore[reportCallIssue] _sync is needed to prevent incidental async
),
)
else:
logger.debug("Loading default GCP credentials")
gcp_credentials = prefect_gcp.credentials.GcpCredentials()
gcs_client = gcp_credentials.get_cloud_storage_client()
try:
logger.debug(
"Uploading bundle from path %s to GCS bucket %s with key %s",
local_filepath,
bucket,
key,
)
gcs_client.bucket(bucket).blob(key).upload_from_filename(local_filepath) # pyright: ignore[reportUnknownMemberType] Incomplete type hints
except Exception as e:
raise RuntimeError(f"Failed to upload bundle to GCS: {e}")
return {"bucket": bucket, "key": key}
def _cli_wrapper(
local_filepath: Path = typer.Argument(
..., help="The path to the bundle file to upload."
),
bucket: str = typer.Option(
..., help="The name of the GCS bucket to upload the bundle to."
),
key: str = typer.Option(
..., help="The key (path) to upload the bundle to in the GCS bucket."
),
gcp_credentials_block_name: Optional[str] = typer.Option(
None,
help="The name of the GCP credentials block to use for authentication. If not provided, the default credentials will be used.",
),
) -> UploadBundleToGcsOutput:
"""
Uploads a bundle file to a GCS bucket.
"""
return upload_bundle_to_gcs(local_filepath, bucket, key, gcp_credentials_block_name)
if __name__ == "__main__":
typer.run(_cli_wrapper)
| UploadBundleToGcsOutput |
python | MongoEngine__mongoengine | mongoengine/connection.py | {
"start": 17882,
"end": 18536
} | class ____(threading.local):
def __init__(self):
self.sessions = collections.deque()
def append(self, session):
self.sessions.append(session)
def get_current(self):
if len(self.sessions):
return self.sessions[-1]
def clear_current(self):
if len(self.sessions):
self.sessions.pop()
def clear_all(self):
self.sessions.clear()
_local_sessions = _LocalSessions()
def _set_session(session):
_local_sessions.append(session)
def _get_session():
return _local_sessions.get_current()
def _clear_session():
return _local_sessions.clear_current()
| _LocalSessions |
python | encode__django-rest-framework | tests/test_model_serializer.py | {
"start": 51005,
"end": 51146
} | class ____(serializers.ModelSerializer):
class Meta:
model = Issue6110TestModel
fields = ('name',)
| Issue6110ModelSerializer |
python | sanic-org__sanic | sanic/exceptions.py | {
"start": 6138,
"end": 8176
} | class ____(HTTPException):
"""405 Method Not Allowed
Args:
message (Optional[Union[str, bytes]], optional): The message to be sent to the client. If `None`
then the HTTP status 'Method Not Allowed' will be sent. Defaults to `None`.
method (Optional[str], optional): The HTTP method that was used. Defaults to an empty string.
allowed_methods (Optional[Sequence[str]], optional): The HTTP methods that can be used instead of the
one that was attempted.
quiet (Optional[bool], optional): When `True`, the error traceback will be suppressed
from the logs. Defaults to `None`.
context (Optional[Dict[str, Any]], optional): Additional mapping of key/value data that will be
sent to the client upon exception. Defaults to `None`.
extra (Optional[Dict[str, Any]], optional): Additional mapping of key/value data that will NOT be
sent to the client when in PRODUCTION mode. Defaults to `None`.
headers (Optional[Dict[str, Any]], optional): Additional headers that should be sent with the HTTP
response. Defaults to `None`.
""" # noqa: E501
status_code = 405
quiet = True
def __init__(
self,
message: Optional[Union[str, bytes]] = None,
method: str = "",
allowed_methods: Optional[Sequence[str]] = None,
*,
quiet: Optional[bool] = None,
context: Optional[dict[str, Any]] = None,
extra: Optional[dict[str, Any]] = None,
headers: Optional[dict[str, Any]] = None,
):
super().__init__(
message,
quiet=quiet,
context=context,
extra=extra,
headers=headers,
)
if allowed_methods:
self.headers = {
**self.headers,
"Allow": ", ".join(allowed_methods),
}
self.method = method
self.allowed_methods = allowed_methods
MethodNotSupported = MethodNotAllowed
| MethodNotAllowed |
python | sympy__sympy | sympy/physics/biomechanics/tests/test_activation.py | {
"start": 4854,
"end": 13395
} | class ____:
@staticmethod
def test_class():
assert issubclass(FirstOrderActivationDeGroote2016, ActivationBase)
assert issubclass(FirstOrderActivationDeGroote2016, _NamedMixin)
assert FirstOrderActivationDeGroote2016.__name__ == 'FirstOrderActivationDeGroote2016'
@pytest.fixture(autouse=True)
def _first_order_activation_de_groote_2016_fixture(self):
self.name = 'name'
self.e = dynamicsymbols('e_name')
self.a = dynamicsymbols('a_name')
self.tau_a = Symbol('tau_a')
self.tau_d = Symbol('tau_d')
self.b = Symbol('b')
self.instance = FirstOrderActivationDeGroote2016(
self.name,
self.tau_a,
self.tau_d,
self.b,
)
def test_instance(self):
instance = FirstOrderActivationDeGroote2016(self.name)
assert isinstance(instance, FirstOrderActivationDeGroote2016)
def test_with_defaults(self):
instance = FirstOrderActivationDeGroote2016.with_defaults(self.name)
assert isinstance(instance, FirstOrderActivationDeGroote2016)
assert instance.tau_a == Float('0.015')
assert instance.activation_time_constant == Float('0.015')
assert instance.tau_d == Float('0.060')
assert instance.deactivation_time_constant == Float('0.060')
assert instance.b == Float('10.0')
assert instance.smoothing_rate == Float('10.0')
def test_name(self):
assert hasattr(self.instance, 'name')
assert self.instance.name == self.name
def test_order(self):
assert hasattr(self.instance, 'order')
assert self.instance.order == 1
def test_excitation(self):
assert hasattr(self.instance, 'e')
assert hasattr(self.instance, 'excitation')
e_expected = dynamicsymbols('e_name')
assert self.instance.e == e_expected
assert self.instance.excitation == e_expected
assert self.instance.e is self.instance.excitation
def test_excitation_is_immutable(self):
with pytest.raises(AttributeError):
self.instance.e = None
with pytest.raises(AttributeError):
self.instance.excitation = None
def test_activation(self):
assert hasattr(self.instance, 'a')
assert hasattr(self.instance, 'activation')
a_expected = dynamicsymbols('a_name')
assert self.instance.a == a_expected
assert self.instance.activation == a_expected
def test_activation_is_immutable(self):
with pytest.raises(AttributeError):
self.instance.a = None
with pytest.raises(AttributeError):
self.instance.activation = None
@pytest.mark.parametrize(
'tau_a, expected',
[
(None, Symbol('tau_a_name')),
(Symbol('tau_a'), Symbol('tau_a')),
(Float('0.015'), Float('0.015')),
]
)
def test_activation_time_constant(self, tau_a, expected):
instance = FirstOrderActivationDeGroote2016(
'name', activation_time_constant=tau_a,
)
assert instance.tau_a == expected
assert instance.activation_time_constant == expected
assert instance.tau_a is instance.activation_time_constant
def test_activation_time_constant_is_immutable(self):
with pytest.raises(AttributeError):
self.instance.tau_a = None
with pytest.raises(AttributeError):
self.instance.activation_time_constant = None
@pytest.mark.parametrize(
'tau_d, expected',
[
(None, Symbol('tau_d_name')),
(Symbol('tau_d'), Symbol('tau_d')),
(Float('0.060'), Float('0.060')),
]
)
def test_deactivation_time_constant(self, tau_d, expected):
instance = FirstOrderActivationDeGroote2016(
'name', deactivation_time_constant=tau_d,
)
assert instance.tau_d == expected
assert instance.deactivation_time_constant == expected
assert instance.tau_d is instance.deactivation_time_constant
def test_deactivation_time_constant_is_immutable(self):
with pytest.raises(AttributeError):
self.instance.tau_d = None
with pytest.raises(AttributeError):
self.instance.deactivation_time_constant = None
@pytest.mark.parametrize(
'b, expected',
[
(None, Symbol('b_name')),
(Symbol('b'), Symbol('b')),
(Integer('10'), Integer('10')),
]
)
def test_smoothing_rate(self, b, expected):
instance = FirstOrderActivationDeGroote2016(
'name', smoothing_rate=b,
)
assert instance.b == expected
assert instance.smoothing_rate == expected
assert instance.b is instance.smoothing_rate
def test_smoothing_rate_is_immutable(self):
with pytest.raises(AttributeError):
self.instance.b = None
with pytest.raises(AttributeError):
self.instance.smoothing_rate = None
def test_state_vars(self):
assert hasattr(self.instance, 'x')
assert hasattr(self.instance, 'state_vars')
assert self.instance.x == self.instance.state_vars
x_expected = Matrix([self.a])
assert self.instance.x == x_expected
assert self.instance.state_vars == x_expected
assert isinstance(self.instance.x, Matrix)
assert isinstance(self.instance.state_vars, Matrix)
assert self.instance.x.shape == (1, 1)
assert self.instance.state_vars.shape == (1, 1)
def test_input_vars(self):
assert hasattr(self.instance, 'r')
assert hasattr(self.instance, 'input_vars')
assert self.instance.r == self.instance.input_vars
r_expected = Matrix([self.e])
assert self.instance.r == r_expected
assert self.instance.input_vars == r_expected
assert isinstance(self.instance.r, Matrix)
assert isinstance(self.instance.input_vars, Matrix)
assert self.instance.r.shape == (1, 1)
assert self.instance.input_vars.shape == (1, 1)
def test_constants(self):
assert hasattr(self.instance, 'p')
assert hasattr(self.instance, 'constants')
assert self.instance.p == self.instance.constants
p_expected = Matrix([self.tau_a, self.tau_d, self.b])
assert self.instance.p == p_expected
assert self.instance.constants == p_expected
assert isinstance(self.instance.p, Matrix)
assert isinstance(self.instance.constants, Matrix)
assert self.instance.p.shape == (3, 1)
assert self.instance.constants.shape == (3, 1)
def test_M(self):
assert hasattr(self.instance, 'M')
M_expected = Matrix([1])
assert self.instance.M == M_expected
assert isinstance(self.instance.M, Matrix)
assert self.instance.M.shape == (1, 1)
def test_F(self):
assert hasattr(self.instance, 'F')
da_expr = (
((1/(self.tau_a*(Rational(1, 2) + Rational(3, 2)*self.a)))
*(Rational(1, 2) + Rational(1, 2)*tanh(self.b*(self.e - self.a)))
+ ((Rational(1, 2) + Rational(3, 2)*self.a)/self.tau_d)
*(Rational(1, 2) - Rational(1, 2)*tanh(self.b*(self.e - self.a))))
*(self.e - self.a)
)
F_expected = Matrix([da_expr])
assert self.instance.F == F_expected
assert isinstance(self.instance.F, Matrix)
assert self.instance.F.shape == (1, 1)
def test_rhs(self):
assert hasattr(self.instance, 'rhs')
da_expr = (
((1/(self.tau_a*(Rational(1, 2) + Rational(3, 2)*self.a)))
*(Rational(1, 2) + Rational(1, 2)*tanh(self.b*(self.e - self.a)))
+ ((Rational(1, 2) + Rational(3, 2)*self.a)/self.tau_d)
*(Rational(1, 2) - Rational(1, 2)*tanh(self.b*(self.e - self.a))))
*(self.e - self.a)
)
rhs_expected = Matrix([da_expr])
rhs = self.instance.rhs()
assert rhs == rhs_expected
assert isinstance(rhs, Matrix)
assert rhs.shape == (1, 1)
assert simplify(self.instance.M.solve(self.instance.F) - rhs) == zeros(1)
def test_repr(self):
expected = (
'FirstOrderActivationDeGroote2016(\'name\', '
'activation_time_constant=tau_a, '
'deactivation_time_constant=tau_d, '
'smoothing_rate=b)'
)
assert repr(self.instance) == expected
| TestFirstOrderActivationDeGroote2016 |
python | sympy__sympy | sympy/functions/special/zeta_functions.py | {
"start": 21000,
"end": 21990
} | class ____(DefinedFunction):
r"""
Riemann Xi function.
Examples
========
The Riemann Xi function is closely related to the Riemann zeta function.
The zeros of Riemann Xi function are precisely the non-trivial zeros
of the zeta function.
>>> from sympy import riemann_xi, zeta
>>> from sympy.abc import s
>>> riemann_xi(s).rewrite(zeta)
s*(s - 1)*gamma(s/2)*zeta(s)/(2*pi**(s/2))
References
==========
.. [1] https://en.wikipedia.org/wiki/Riemann_Xi_function
"""
@classmethod
def eval(cls, s):
from sympy.functions.special.gamma_functions import gamma
z = zeta(s)
if s in (S.Zero, S.One):
return S.Half
if not isinstance(z, zeta):
return s*(s - 1)*gamma(s/2)*z/(2*pi**(s/2))
def _eval_rewrite_as_zeta(self, s, **kwargs):
from sympy.functions.special.gamma_functions import gamma
return s*(s - 1)*gamma(s/2)*zeta(s)/(2*pi**(s/2))
| riemann_xi |
python | euske__pdfminer | pdfminer/cmapdb.py | {
"start": 4920,
"end": 6490
} | class ____:
_cmap_cache = {}
_umap_cache = {}
class CMapNotFound(CMapError):
pass
@classmethod
def _load_data(klass, name):
filename = '%s.marshal.gz' % name
logging.info('loading: %r' % name)
cmap_paths = (os.environ.get('CMAP_PATH', '/usr/share/pdfminer/'),
os.path.join(os.path.dirname(__file__), 'cmap'),)
for directory in cmap_paths:
path = os.path.join(directory, filename)
if os.path.exists(path):
gzfile = gzip.open(path)
try:
return type(str(name), (), marshal.loads(gzfile.read()))
finally:
gzfile.close()
else:
raise CMapDB.CMapNotFound(name)
@classmethod
def get_cmap(klass, name):
if name == 'Identity-H':
return IdentityCMap(WMode=0)
elif name == 'Identity-V':
return IdentityCMap(WMode=1)
try:
return klass._cmap_cache[name]
except KeyError:
pass
data = klass._load_data(name)
klass._cmap_cache[name] = cmap = PyCMap(name, data)
return cmap
@classmethod
def get_unicode_map(klass, name, vertical=False):
try:
return klass._umap_cache[name][vertical]
except KeyError:
pass
data = klass._load_data('to-unicode-%s' % name)
klass._umap_cache[name] = umaps = [PyUnicodeMap(name, data, v) for v in (False, True)]
return umaps[vertical]
## CMapParser
##
| CMapDB |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_bandit/S105.py | {
"start": 632,
"end": 1347
} | class ____:
password = "s3cr3t"
safe = password
MyClass.password = "s3cr3t"
MyClass._pass = "s3cr3t"
MyClass.passwd = "s3cr3t"
MyClass.pwd = "s3cr3t"
MyClass.secret = "s3cr3t"
MyClass.token = "s3cr3t"
MyClass.secrete = "s3cr3t"
password == "s3cr3t"
_pass == "s3cr3t"
passwd == "s3cr3t"
pwd == "s3cr3t"
secret == "s3cr3t"
token == "s3cr3t"
secrete == "s3cr3t"
password == safe == "s3cr3t"
if token == "1\n2":
pass
if token == "3\t4":
pass
if token == "5\r6":
pass
# These should not be flagged
passed_msg = "You have passed!"
compassion = "Please don't match!"
impassable = "You shall not pass!"
passwords = ""
PASSWORDS = ""
passphrases = ""
PassPhrases = ""
tokens = ""
secrets = ""
| MyClass |
python | great-expectations__great_expectations | contrib/great_expectations_zipcode_expectations/great_expectations_zipcode_expectations/expectations/expect_column_values_to_be_valid_new_jersey_zip.py | {
"start": 757,
"end": 1766
} | class ____(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
condition_metric_name = "column_values.valid_new_jersey_zip"
# This method implements the core logic for the PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
return column.apply(lambda x: is_valid_new_jersey_zip(x))
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# raise NotImplementedError
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @column_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, **kwargs):
# raise NotImplementedError
# This class defines the Expectation itself
| ColumnValuesToBeValidNewJerseyZip |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/partitions/utils/time_window.py | {
"start": 2189,
"end": 10468
} | class ____(
NamedTuple(
"_PersistedTimeWindow", [("start", TimestampWithTimezone), ("end", TimestampWithTimezone)]
)
):
"""Internal serialized representation of a time interval that is closed at the
start and open at the end.
"""
def __new__(
cls,
start: TimestampWithTimezone,
end: TimestampWithTimezone,
):
return super(cls, PersistedTimeWindow).__new__(
cls,
start=check.inst_param(start, "start", TimestampWithTimezone),
end=check.inst_param(end, "end", TimestampWithTimezone),
)
@property
def start_timestamp(self) -> float:
return self._asdict()["start"].timestamp
@property
def start_timestamp_with_timezone(self) -> TimestampWithTimezone:
return self._asdict()["start"]
@property
def end_timestamp_with_timezone(self) -> TimestampWithTimezone:
return self._asdict()["end"]
@property
def end_timestamp(self) -> float:
return self._asdict()["end"].timestamp
@cached_property
def start(self) -> datetime: # pyright: ignore[reportIncompatibleVariableOverride]
start_timestamp_with_timezone = self._asdict()["start"]
return datetime.fromtimestamp(
start_timestamp_with_timezone.timestamp,
tz=get_timezone(start_timestamp_with_timezone.timezone),
)
@cached_property
def end(self) -> datetime: # pyright: ignore[reportIncompatibleVariableOverride]
end_timestamp_with_timezone = self._asdict()["end"]
return datetime.fromtimestamp(
end_timestamp_with_timezone.timestamp,
tz=get_timezone(end_timestamp_with_timezone.timezone),
)
@staticmethod
def from_public_time_window(tw: TimeWindow, timezone: str):
return PersistedTimeWindow(
TimestampWithTimezone(tw.start.timestamp(), timezone),
TimestampWithTimezone(tw.end.timestamp(), timezone),
)
def subtract(self, other: "PersistedTimeWindow") -> Sequence["PersistedTimeWindow"]:
other_start_timestamp = other.start_timestamp
start_timestamp = self.start_timestamp
other_end_timestamp = other.end_timestamp
end_timestamp = self.end_timestamp
# Case where the two don't intersect at all - just return self
# Note that this assumes end is exclusive
if end_timestamp <= other_start_timestamp or other_end_timestamp <= start_timestamp:
return [self]
windows = []
if other_start_timestamp > start_timestamp:
windows.append(
PersistedTimeWindow(start=self._asdict()["start"], end=other._asdict()["start"]),
)
if other_end_timestamp < end_timestamp:
windows.append(
PersistedTimeWindow(start=other._asdict()["end"], end=self._asdict()["end"])
)
return windows
def to_public_time_window(self) -> TimeWindow:
"""Used for exposing TimeWindows over the public Dagster API."""
return TimeWindow(start=self.start, end=self.end)
def _flatten(
high_pri_time_windows: list[PartitionTimeWindowStatus],
low_pri_time_windows: list[PartitionTimeWindowStatus],
) -> list[PartitionTimeWindowStatus]:
high_pri_time_windows = sorted(high_pri_time_windows, key=lambda t: t.time_window.start)
low_pri_time_windows = sorted(low_pri_time_windows, key=lambda t: t.time_window.start)
high_pri_idx = 0
low_pri_idx = 0
filtered_low_pri: list[PartitionTimeWindowStatus] = []
# slice and dice the low pri time windows so there's no overlap with high pri
while True:
if low_pri_idx >= len(low_pri_time_windows):
# reached end of materialized
break
if high_pri_idx >= len(high_pri_time_windows):
# reached end of failed, add all remaining materialized bc there's no overlap
filtered_low_pri.extend(low_pri_time_windows[low_pri_idx:])
break
low_pri_tw = low_pri_time_windows[low_pri_idx]
high_pri_tw = high_pri_time_windows[high_pri_idx]
if low_pri_tw.time_window.start.timestamp() < high_pri_tw.time_window.start.timestamp():
if low_pri_tw.time_window.end.timestamp() <= high_pri_tw.time_window.start.timestamp():
# low_pri_tw is entirely before high pri
filtered_low_pri.append(low_pri_tw)
low_pri_idx += 1
else:
# high pri cuts the low pri short
filtered_low_pri.append(
PartitionTimeWindowStatus(
TimeWindow(
low_pri_tw.time_window.start,
high_pri_tw.time_window.start,
),
low_pri_tw.status,
)
)
if low_pri_tw.time_window.end.timestamp() > high_pri_tw.time_window.end.timestamp():
# the low pri time window will continue on the other end of the high pri
# and get split in two. Modify low_pri[low_pri_idx] to be
# the second half of the low pri time window. It will be added in the next iteration.
# (don't add it now, because we need to check if it overlaps with the next high pri)
low_pri_time_windows[low_pri_idx] = PartitionTimeWindowStatus(
TimeWindow(high_pri_tw.time_window.end, low_pri_tw.time_window.end),
low_pri_tw.status,
)
high_pri_idx += 1
else:
# the rest of the low pri time window is inside the high pri time window
low_pri_idx += 1
else:
if low_pri_tw.time_window.start.timestamp() >= high_pri_tw.time_window.end.timestamp():
# high pri is entirely before low pri. The next high pri may overlap
high_pri_idx += 1
elif low_pri_tw.time_window.end.timestamp() <= high_pri_tw.time_window.end.timestamp():
# low pri is entirely within high pri, skip it
low_pri_idx += 1
else:
# high pri cuts out the start of the low pri. It will continue on the other end.
# Modify low_pri[low_pri_idx] to shorten the start. It will be added
# in the next iteration. (don't add it now, because we need to check if it overlaps with the next high pri)
low_pri_time_windows[low_pri_idx] = PartitionTimeWindowStatus(
TimeWindow(high_pri_tw.time_window.end, low_pri_tw.time_window.end),
low_pri_tw.status,
)
high_pri_idx += 1
# combine the high pri windwos with the filtered low pri windows
flattened_time_windows = high_pri_time_windows
flattened_time_windows.extend(filtered_low_pri)
flattened_time_windows.sort(key=lambda t: t.time_window.start)
return flattened_time_windows
PARTITION_RANGE_STATUS_PRIORITY = [
PartitionRangeStatus.MATERIALIZING,
PartitionRangeStatus.FAILED,
PartitionRangeStatus.MATERIALIZED,
]
def fetch_flattened_time_window_ranges(
subsets: Mapping[PartitionRangeStatus, "TimeWindowPartitionsSubset"],
) -> Sequence[PartitionTimeWindowStatus]:
"""Given potentially overlapping subsets, return a flattened list of timewindows where the highest priority status wins
on overlaps.
"""
prioritized_subsets = sorted(
[(status, subset) for status, subset in subsets.items()],
key=lambda t: PARTITION_RANGE_STATUS_PRIORITY.index(t[0]),
)
# progressively add lower priority time windows to the list of higher priority time windows
flattened_time_window_statuses = []
for status, subset in prioritized_subsets:
subset_time_window_statuses = [
PartitionTimeWindowStatus(tw.to_public_time_window(), status)
for tw in subset.included_time_windows
]
flattened_time_window_statuses = _flatten(
flattened_time_window_statuses, subset_time_window_statuses
)
return flattened_time_window_statuses
| PersistedTimeWindow |
python | ansible__ansible | test/lib/ansible_test/_internal/commands/sanity/pslint.py | {
"start": 605,
"end": 3211
} | class ____(SanityVersionNeutral):
"""Sanity test using PSScriptAnalyzer."""
@property
def error_code(self) -> t.Optional[str]:
"""Error code for ansible-test matching the format used by the underlying test program, or None if the program does not use error codes."""
return 'AnsibleTest'
def filter_targets(self, targets: list[TestTarget]) -> list[TestTarget]:
"""Return the given list of test targets, filtered to include only those relevant for the test."""
return [target for target in targets if os.path.splitext(target.path)[1] in ('.ps1', '.psm1', '.psd1')]
def test(self, args: SanityConfig, targets: SanityTargets) -> TestResult:
settings = self.load_processor(args)
paths = [target.path for target in targets.include]
if not find_executable('pwsh', required='warning'):
return SanitySkipped(self.name)
cmds = []
if args.controller.is_managed or args.requirements:
cmds.append(['pwsh', os.path.join(ANSIBLE_TEST_DATA_ROOT, 'requirements', 'sanity.pslint.ps1')])
cmds.append(['pwsh', os.path.join(SANITY_ROOT, 'pslint', 'pslint.ps1')] + paths)
stdout = ''
for cmd in cmds:
try:
stdout, stderr = run_command(args, cmd, capture=True)
status = 0
except SubprocessError as ex:
stdout = ex.stdout
stderr = ex.stderr
status = ex.status
if stderr:
raise SubprocessError(cmd=cmd, status=status, stderr=stderr, stdout=stdout)
if args.explain:
return SanitySuccess(self.name)
severity = [
'Information',
'Warning',
'Error',
'ParseError',
]
cwd = data_context().content.root + '/'
# replace unicode smart quotes and ellipsis with ascii versions
stdout = re.sub('[\u2018\u2019]', "'", stdout)
stdout = re.sub('[\u201c\u201d]', '"', stdout)
stdout = re.sub('[\u2026]', '...', stdout)
messages = json.loads(stdout)
errors = [SanityMessage(
code=m['RuleName'],
message=m['Message'],
path=m['ScriptPath'].replace(cwd, ''),
line=m['Line'] or 0,
column=m['Column'] or 0,
level=severity[m['Severity']],
) for m in messages]
errors = settings.process_errors(errors, paths)
if errors:
return SanityFailure(self.name, messages=errors)
return SanitySuccess(self.name)
| PslintTest |
python | pennersr__django-allauth | allauth/socialaccount/providers/box/views.py | {
"start": 181,
"end": 1066
} | class ____(OAuth2Adapter):
provider_id = "box"
access_token_url = "https://api.box.com/oauth2/token" # nosec
authorize_url = "https://account.box.com/api/oauth2/authorize"
profile_url = "https://api.box.com/2.0/users/me"
redirect_uri_protocol = None
def complete_login(self, request, app, token, **kwargs):
extra_data = (
get_adapter()
.get_requests_session()
.get(self.profile_url, params={"access_token": token.token})
)
# This only here because of weird response from the test suite
if isinstance(extra_data, list):
extra_data = extra_data[0]
return self.get_provider().sociallogin_from_response(request, extra_data.json())
oauth_login = OAuth2LoginView.adapter_view(BoxOAuth2Adapter)
oauth_callback = OAuth2CallbackView.adapter_view(BoxOAuth2Adapter)
| BoxOAuth2Adapter |
python | more-itertools__more-itertools | tests/test_more.py | {
"start": 215524,
"end": 220200
} | class ____(TestCase):
def test_basics(self):
extract = mi.extract
data = 'abcdefghijklmnopqrstuvwxyz'
# Test iterator inputs, increasing and decreasing indices, and repeats.
self.assertEqual(
list(extract(iter(data), iter([7, 4, 11, 11, 14]))),
['h', 'e', 'l', 'l', 'o'],
)
# Empty indices
self.assertEqual(list(extract(iter(data), iter([]))), [])
# Result is an iterator
iterator = extract('abc', [0, 1, 2])
self.assertTrue(hasattr(iterator, '__next__'))
# Error cases
with self.assertRaises(TypeError):
list(extract(None, [])) # Non-iterable data source
with self.assertRaises(TypeError):
list(extract(data, None)) # Non-iterable indices
with self.assertRaises(ValueError):
list(extract(data, [0.0, 1.0, 2.0])) # Non-integer indices
with self.assertRaises(ValueError):
list(extract(data, [1, 2, -3])) # Negative indices
with self.assertRaises(IndexError):
list(extract(data, [1, 2, len(data)])) # Indices out of range
def test_negative_one_bug(self):
# When the lowest index was exactly -1, it matched the initial
# iterator_position of -1 giving a zero advance step.
extract = mi.extract
with self.assertRaises(ValueError):
list(extract('abcdefg', [1, 2, -1]))
def test_none_value_bug(self):
# The buffer used to be a list with unused slots marked with None.
# The mark got conflated with None values in the data stream.
extract = mi.extract
data = ['a', 'b', 'None', 'c', 'd']
self.assertEqual(list(extract(data, range(5))), data)
def test_all_orderings(self):
# Thorough test for all cases of five indices to detect
# obscure corner case bugs.
extract = mi.extract
data = 'abcdefg'
for indices in product(range(6), repeat=5):
with self.subTest(indices=indices):
actual = tuple(extract(data, indices))
expected = itemgetter(*indices)(data)
self.assertEqual(actual, expected)
def test_early_free(self):
# No references are held for skipped values or for previously
# emitted values regardless of how long they were in the buffer.
extract = mi.extract
class TrackDels(str):
def __del__(self):
dead.add(str(self))
dead = set()
iterator = extract(map(TrackDels, 'ABCDEF'), [3, 2, 4, 5])
value = next(iterator)
gc.collect() # Force collection on PyPy.
self.assertEqual(value, 'D') # Returns D. Buffered C is alive.
self.assertEqual(dead, {'A', 'B'}) # A and B are dead.
value = next(iterator)
gc.collect() # Force collection on PyPy
self.assertEqual(value, 'C') # Returns C.
value = next(iterator)
gc.collect() # Force collection on PyPy
self.assertEqual(value, 'E') # Returns E.
self.assertEqual(dead, {'A', 'B', 'D', 'C'}) # D and C are now dead.
def test_monotonic(self):
collatz = mi.iterate(lambda x: 3 * x + 1 if x % 2 == 1 else x // 2, 42)
indices = count(0, 2)
self.assertEqual(
mi.take(3, mi.extract(collatz, indices, monotonic=True)),
[42, 64, 16],
)
self.assertEqual(next(collatz), 8)
self.assertEqual(next(indices), 6)
# Finite Inputs
self.assertEqual(
list(mi.extract('abcdefgh', [0, 2, 4], monotonic=True)),
['a', 'c', 'e'],
)
with self.assertRaises(IndexError):
list(mi.extract('abcdefgh', [0, 2, 40], monotonic=True))
# Error cases
with self.assertRaises(ValueError):
list(
mi.extract('abcdefg', [2, 4, 3], monotonic=True)
) # decreasing index
with self.assertRaises(ValueError):
list(
mi.extract('abcdefg', [-1, 0, 1], monotonic=True)
) # negative index
def test_lazy_consumption(self):
extract = mi.extract
input_stream = mi.peekable(iter('ABCDEFGHIJKLM'))
iterator = extract(input_stream, [4, 2, 10])
self.assertEqual(next(iterator), 'E') # C is still buffered
self.assertEqual(input_stream.peek(), 'F')
self.assertEqual(next(iterator), 'C')
self.assertEqual(input_stream.peek(), 'F')
# Infinite input
self.assertEqual(
list(extract(count(), [5, 7, 3, 9, 4])), [5, 7, 3, 9, 4]
)
| ExtractTests |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_set_column10.py | {
"start": 315,
"end": 2274
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("set_column01.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.set_column_pixels("A:A", 1)
worksheet.set_column_pixels("B:B", 2)
worksheet.set_column_pixels("C:C", 3)
worksheet.set_column_pixels("D:D", 4)
worksheet.set_column_pixels("E:E", 5)
worksheet.set_column_pixels("F:F", 6)
worksheet.set_column_pixels("G:G", 7)
worksheet.set_column_pixels("H:H", 8)
worksheet.set_column_pixels("I:I", 9)
worksheet.set_column_pixels("J:J", 10)
worksheet.set_column_pixels("K:K", 11)
worksheet.set_column_pixels("L:L", 12)
worksheet.set_column_pixels("M:M", 13)
worksheet.set_column_pixels("N:N", 14)
worksheet.set_column_pixels("O:O", 15)
worksheet.set_column_pixels("P:P", 16)
worksheet.set_column_pixels("Q:Q", 17)
worksheet.set_column_pixels("R:R", 18)
worksheet.set_column_pixels("S:S", 19)
worksheet.set_column_pixels("T:T", 20)
worksheet.set_column_pixels("U:U", 21)
worksheet.set_column_pixels("V:V", 22)
worksheet.set_column_pixels("W:W", 23)
worksheet.set_column_pixels("X:X", 24)
worksheet.set_column_pixels("Y:Y", 25)
worksheet.set_column_pixels("Z:Z", 26)
worksheet.set_column_pixels("AB:AB", 65)
worksheet.set_column_pixels("AC:AC", 66)
worksheet.set_column_pixels("AD:AD", 67)
worksheet.set_column_pixels("AE:AE", 68)
worksheet.set_column_pixels("AF:AF", 69)
worksheet.set_column_pixels("AG:AG", 70)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | run-llama__llama_index | llama-index-core/llama_index/core/base/base_multi_modal_retriever.py | {
"start": 330,
"end": 1843
} | class ____(BaseRetriever, BaseImageRetriever):
"""Multi Modal base retriever."""
@abstractmethod
def text_retrieve(self, str_or_query_bundle: QueryType) -> List[NodeWithScore]:
"""
Retrieve text nodes given text query.
Implemented by the user.
"""
@abstractmethod
def text_to_image_retrieve(
self, str_or_query_bundle: QueryType
) -> List[NodeWithScore]:
"""
Retrieve image nodes given text query.
Implemented by the user.
"""
@abstractmethod
def image_to_image_retrieve(
self, str_or_query_bundle: QueryType
) -> List[NodeWithScore]:
"""
Retrieve image nodes given image query.
Implemented by the user.
"""
@abstractmethod
async def atext_retrieve(
self, str_or_query_bundle: QueryType
) -> List[NodeWithScore]:
"""
Async Retrieve text nodes given text query.
Implemented by the user.
"""
@abstractmethod
async def atext_to_image_retrieve(
self, str_or_query_bundle: QueryType
) -> List[NodeWithScore]:
"""
Async Retrieve image nodes given text query.
Implemented by the user.
"""
@abstractmethod
async def aimage_to_image_retrieve(
self, str_or_query_bundle: QueryType
) -> List[NodeWithScore]:
"""
Async Retrieve image nodes given image query.
Implemented by the user.
"""
| MultiModalRetriever |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_hyperlink41.py | {
"start": 315,
"end": 938
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("hyperlink41.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.insert_image(
"E9",
self.image_dir + "red.png",
{"url": "https://github.com/jmcnamara<foo>"},
)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | pytorch__pytorch | torch/fx/experimental/meta_tracer.py | {
"start": 4721,
"end": 10776
} | class ____(torch.fx.Tracer):
allow_insert_stateless_mods: bool = True
_TORCH_METHODS_TO_PATCH = ["arange", "zeros", "ones", "full_like", "eye"]
def create_proxy(
self,
kind,
target,
args,
kwargs,
name=None,
type_expr=None,
proxy_factory_fn=None,
):
rv = super().create_proxy(
kind,
target,
args,
kwargs,
name,
type_expr,
# pyrefly: ignore [bad-argument-type]
proxy_factory_fn,
)
if kind == "placeholder" and target in self.meta_args:
rv.install_tensor_meta(self.meta_args[target])
return rv
if target in self.orig_fns:
# NOTE: tensor constructors in PyTorch define the `device` argument as
# *kwargs-only*. That is why this works. If you add methods to
# _TORCH_METHODS_TO_PATCH that do not define `device` as kwarg-only,
# this will break and you will likely see issues where we cannot infer
# the size of the output.
if "device" in kwargs:
kwargs["device"] = "meta"
try:
args_metas = torch.fx.node.map_aggregate(args, proxys_to_metas)
kwargs_metas = torch.fx.node.map_aggregate(kwargs, proxys_to_metas)
if kind == "call_function":
meta_target = manual_meta_overrides.get(target, target)
# pyrefly: ignore [not-callable]
meta_out = meta_target(*args_metas, **kwargs_metas)
elif kind == "call_method":
meta_target = getattr(args_metas[0], target) # type: ignore[index]
meta_out = meta_target(*args_metas[1:], **kwargs_metas) # type: ignore[index]
elif kind == "call_module":
assert hasattr(self, "orig_forward")
self._disable_module_getattr = True
try:
mod = self.root.get_submodule(target)
mod_type = type(mod)
if mod_type in manual_meta_overrides:
meta_out = manual_meta_overrides[mod_type](
mod, *args_metas, **kwargs_metas
) # type: ignore[misc, arg-type]
else:
meta_out = self.orig_forward(*args_metas, **kwargs_metas)
finally:
self._disable_module_getattr = False
elif kind == "get_attr":
self._disable_module_getattr = True
try:
attr_itr = self.root
atoms = target.split(".")
for atom in atoms:
attr_itr = getattr(attr_itr, atom)
assert isinstance(attr_itr, torch.Tensor)
meta_out = attr_itr.to(device="meta")
finally:
self._disable_module_getattr = False
else:
return rv
# TODO
assert isinstance(rv, torch.fx.Proxy), "Dont support composite output yet"
rv.install_tensor_meta(meta_out)
except Exception as e:
warnings.warn(f"Could not compute metadata for {kind} target {target}: {e}")
return rv
def getattr(self, attr, attr_val, parameter_proxy_cache):
if getattr(self, "_disable_module_getattr", False):
return attr_val
else:
return super().getattr(attr, attr_val, parameter_proxy_cache)
def call_module(self, m, forward, args, kwargs):
self.orig_forward = forward
return super().call_module(m, forward, args, kwargs)
def _insert_module_as_submodule(self, mod: torch.nn.Module) -> str:
"""
Helper method which tries to insert a module that was not declared as submodule.
"""
idx = 0
mod_name = mod.__class__.__name__.lower()
path = f"{mod_name}_{idx}"
while hasattr(self.root, path):
path = f"{mod_name}_{idx}"
idx += 1
self.root.add_module(path, mod)
return path
def path_of_module(self, mod: torch.nn.Module) -> str:
try:
return super().path_of_module(mod)
except NameError:
if (
self.allow_insert_stateless_mods
and len(list(mod.parameters())) == 0
and len(list(mod.buffers())) == 0
):
path = self._insert_module_as_submodule(mod)
self.prev_module = path
return path
raise
def proxy(self, node):
return MetaProxy(node, self)
def trace(self, root, meta_args: dict[str, torch.Tensor], concrete_args=None): # type: ignore[override]
assert isinstance(meta_args, dict)
self.meta_args = meta_args
self.patched_torch_methods = {
target: gen_constructor_wrapper(getattr(torch, target))
for target in self._TORCH_METHODS_TO_PATCH
}
self.orig_fns = set()
for name, (wrapper, orig) in self.patched_torch_methods.items():
setattr(torch, name, wrapper)
self.orig_fns.add(orig)
try:
graph = super().trace(root, concrete_args)
graph._tracer_extras = {"meta_args": meta_args}
return graph
finally:
for name, (_, orig) in self.patched_torch_methods.items():
setattr(torch, name, orig)
def symbolic_trace(
root: Union[torch.nn.Module, Callable[..., Any]],
meta_args: Optional[dict[str, torch.Tensor]] = None,
concrete_args: Optional[dict[str, Any]] = None,
) -> torch.fx.GraphModule:
tracer = MetaTracer()
graph = tracer.trace(root, meta_args, concrete_args) # type: ignore[arg-type]
name = (
root.__class__.__name__ if isinstance(root, torch.nn.Module) else root.__name__
)
gm = torch.fx.GraphModule(tracer.root, graph, name)
return gm
| MetaTracer |
python | kamyu104__LeetCode-Solutions | Python/count-number-of-bad-pairs.py | {
"start": 63,
"end": 392
} | class ____(object):
def countBadPairs(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
result = len(nums)*(len(nums)-1)//2
cnt = collections.Counter()
for i, x in enumerate(nums):
result -= cnt[x-i]
cnt[x-i] += 1
return result
| Solution |
python | dagster-io__dagster | python_modules/libraries/dagster-airlift/dagster_airlift/core/runtime_representations.py | {
"start": 244,
"end": 2339
} | class ____:
webserver_url: str
dag_id: str
run_id: str
metadata: dict[str, Any]
@property
def note(self) -> str:
return self.metadata.get("note") or ""
@property
def dag_handle(self) -> "DagHandle":
from dagster_airlift.core.serialization.serialized_data import DagHandle
return DagHandle(dag_id=self.dag_id)
@property
def url(self) -> str:
return f"{self.webserver_url}/dags/{self.dag_id}/grid?dag_run_id={self.run_id}&tab=details"
@property
def success(self) -> bool:
return self.metadata["state"] == "success"
@property
def finished(self) -> bool:
from dagster_airlift.core.airflow_instance import TERMINAL_STATES
return self.state in TERMINAL_STATES
@property
def state(self) -> str:
return self.metadata["state"]
@property
def run_type(self) -> str:
return self.metadata["run_type"]
@property
def config(self) -> dict[str, Any]:
return self.metadata["conf"]
@property
def logical_date(self) -> datetime.datetime:
"""Returns the airflow-coined "logical date" from the dag run metadata.
The logical date refers to the starting time of the "data interval" that the dag run is processing.
In airflow < 2.2, this was set as the execution_date parameter in the dag run metadata.
"""
# In airflow < 2.2, execution_date is set instead of logical_date.
logical_date_str = check.not_none(
self.metadata.get("logical_date") or self.metadata.get("execution_date"),
"Expected one of execution_date or logical_date to be returned from the airflow rest API when querying for dag information.",
)
return datetime.datetime.fromisoformat(logical_date_str)
@property
def start_date(self) -> datetime.datetime:
return datetime.datetime.fromisoformat(self.metadata["start_date"])
@property
def end_date(self) -> datetime.datetime:
return datetime.datetime.fromisoformat(self.metadata["end_date"])
@record
| DagRun |
python | django__django | tests/migrations/test_migrations_plan/0005_fifth.py | {
"start": 121,
"end": 413
} | class ____(migrations.Migration):
dependencies = [
("migrations", "0004_fourth"),
]
operations = [
migrations.RunPython(migrations.RunPython.noop),
migrations.RunPython(grow_tail),
migrations.RunPython(feed, migrations.RunPython.noop),
]
| Migration |
python | getsentry__sentry | src/sentry/hybridcloud/services/tombstone/model.py | {
"start": 280,
"end": 360
} | class ____(RpcModel):
table_name: str = ""
identifier: int = -1
| RpcTombstone |
python | spyder-ide__spyder | spyder/utils/snippets/nodes.py | {
"start": 5521,
"end": 6447
} | class ____(ASTNode):
"""Node that represents a terminal symbol."""
KIND = NodeKind.LEAF
def __init__(self, name='EPSILON', value=''):
ASTNode.__init__(self)
self.name = name
self.value = value
def compute_position(self, offset):
value = self.text()
new_offset, mark_for_position = _compute_offset_str(offset, value)
self.mark_for_position = mark_for_position
if len(self.value) == 1:
self.position = (offset,)
else:
self.position = (offset, new_offset)
return new_offset
def text(self):
text = BACKSLASH_REPLACE_REGEX.sub(r'\2', self.value)
if self.name == 'left_curly_name':
text = text[1:]
return text
def __str__(self):
return 'LeafNode({0}: {1})'.format(self.name, self.value)
def __repr__(self):
return r'{0}'.format(self.__str__())
| LeafNode |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/sensors/cloud_composer.py | {
"start": 13077,
"end": 34096
} | class ____(BaseSensorOperator):
"""
Waits for a different DAG, task group, or task to complete for a specific composer environment.
If both `composer_external_task_group_id` and `composer_external_task_id` are ``None`` (default), the sensor
waits for the DAG.
Values for `composer_external_task_group_id` and `composer_external_task_id` can't be set at the same time.
By default, the CloudComposerExternalTaskSensor will wait for the external task to
succeed, at which point it will also succeed. However, by default it will
*not* fail if the external task fails, but will continue to check the status
until the sensor times out (thus giving you time to retry the external task
without also having to clear the sensor).
By default, the CloudComposerExternalTaskSensor will not skip if the external task skips.
To change this, simply set ``skipped_states=[TaskInstanceState.SKIPPED]``.
Note that if you are monitoring multiple tasks, and one enters error state
and the other enters a skipped state, then the external task will react to
whichever one it sees first. If both happen together, then the failed state
takes priority.
It is possible to alter the default behavior by setting states which
cause the sensor to fail, e.g. by setting ``allowed_states=[DagRunState.FAILED]``
and ``failed_states=[DagRunState.SUCCESS]`` you will flip the behaviour to
get a sensor which goes green when the external task *fails* and immediately
goes red if the external task *succeeds*!
Note that ``soft_fail`` is respected when examining the failed_states. Thus
if the external task enters a failed state and ``soft_fail == True`` the
sensor will _skip_ rather than fail. As a result, setting ``soft_fail=True``
and ``failed_states=[DagRunState.SKIPPED]`` will result in the sensor
skipping if the external task skips. However, this is a contrived
example---consider using ``skipped_states`` if you would like this
behaviour. Using ``skipped_states`` allows the sensor to skip if the target
fails, but still enter failed state on timeout. Using ``soft_fail == True``
as above will cause the sensor to skip if the target fails, but also if it
times out.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param environment_id: The name of the Composer environment.
:param composer_external_dag_id: The dag_id that contains the task you want to
wait for. (templated)
:param composer_external_task_id: The task_id that contains the task you want to
wait for. (templated)
:param composer_external_task_ids: The list of task_ids that you want to wait for. (templated)
If ``None`` (default value) the sensor waits for the DAG. Either
composer_external_task_id or composer_external_task_ids can be passed to
CloudComposerExternalTaskSensor, but not both.
:param composer_external_task_group_id: The task_group_id that contains the task you want to
wait for. (templated)
:param allowed_states: Iterable of allowed states, default is ``['success']``
:param skipped_states: Iterable of states to make this task mark as skipped, default is ``None``
:param failed_states: Iterable of failed or dis-allowed states, default is ``None``
:param execution_range: execution DAGs time range. Sensor checks DAGs states only for DAGs which were
started in this time range. For yesterday, use [positive!] datetime.timedelta(days=1).
For future, use [negative!] datetime.timedelta(days=-1). For specific time, use list of
datetimes [datetime(2024,3,22,11,0,0), datetime(2024,3,22,12,0,0)].
Or [datetime(2024,3,22,0,0,0)] in this case sensor will check for states from specific time in the
past till current time execution.
Default value datetime.timedelta(days=1).
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param poll_interval: Optional: Control the rate of the poll for the result of deferrable run.
:param deferrable: Run sensor in deferrable mode.
"""
template_fields = (
"project_id",
"region",
"environment_id",
"composer_external_dag_id",
"composer_external_task_id",
"composer_external_task_ids",
"composer_external_task_group_id",
"impersonation_chain",
)
def __init__(
self,
*,
project_id: str,
region: str,
environment_id: str,
composer_external_dag_id: str,
composer_external_task_id: str | None = None,
composer_external_task_ids: Collection[str] | None = None,
composer_external_task_group_id: str | None = None,
allowed_states: Iterable[str] | None = None,
skipped_states: Iterable[str] | None = None,
failed_states: Iterable[str] | None = None,
execution_range: timedelta | list[datetime] | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
poll_interval: int = 10,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.region = region
self.environment_id = environment_id
self.allowed_states = list(allowed_states) if allowed_states else [TaskInstanceState.SUCCESS.value]
self.skipped_states = list(skipped_states) if skipped_states else []
self.failed_states = list(failed_states) if failed_states else []
total_states = set(self.allowed_states + self.skipped_states + self.failed_states)
if len(total_states) != len(self.allowed_states) + len(self.skipped_states) + len(self.failed_states):
raise DuplicateStateError(
"Duplicate values provided across allowed_states, skipped_states and failed_states."
)
# convert [] to None
if not composer_external_task_ids:
composer_external_task_ids = None
# can't set both single task id and a list of task ids
if composer_external_task_id is not None and composer_external_task_ids is not None:
raise ValueError(
"Only one of `composer_external_task_id` or `composer_external_task_ids` may "
"be provided to CloudComposerExternalTaskSensor; "
"use `composer_external_task_id` or `composer_external_task_ids` or `composer_external_task_group_id`."
)
# since both not set, convert the single id to a 1-elt list - from here on, we only consider the list
if composer_external_task_id is not None:
composer_external_task_ids = [composer_external_task_id]
if composer_external_task_group_id is not None and composer_external_task_ids is not None:
raise ValueError(
"Only one of `composer_external_task_group_id` or `composer_external_task_ids` may "
"be provided to CloudComposerExternalTaskSensor; "
"use `composer_external_task_id` or `composer_external_task_ids` or `composer_external_task_group_id`."
)
# check the requested states are all valid states for the target type, be it dag or task
if composer_external_task_ids or composer_external_task_group_id:
if not total_states <= set(State.task_states):
raise ValueError(
"Valid values for `allowed_states`, `skipped_states` and `failed_states` "
"when `composer_external_task_id` or `composer_external_task_ids` or `composer_external_task_group_id` "
f"is not `None`: {State.task_states}"
)
elif not total_states <= set(State.dag_states):
raise ValueError(
"Valid values for `allowed_states`, `skipped_states` and `failed_states` "
f"when `composer_external_task_id` and `composer_external_task_group_id` is `None`: {State.dag_states}"
)
self.execution_range = execution_range
self.composer_external_dag_id = composer_external_dag_id
self.composer_external_task_id = composer_external_task_id
self.composer_external_task_ids = composer_external_task_ids
self.composer_external_task_group_id = composer_external_task_group_id
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.deferrable = deferrable
self.poll_interval = poll_interval
def _get_logical_dates(self, context) -> tuple[datetime, datetime]:
logical_date = context.get("logical_date", None)
if logical_date is None:
raise RuntimeError(
"logical_date is None. Please make sure the sensor is not used in an asset-triggered Dag. "
"CloudComposerDAGRunSensor was designed to be used in time-based scheduled Dags only, "
"and asset-triggered Dags do not have logical_date. "
)
if isinstance(self.execution_range, timedelta):
if self.execution_range < timedelta(0):
return logical_date, logical_date - self.execution_range
return logical_date - self.execution_range, logical_date
if isinstance(self.execution_range, list) and len(self.execution_range) > 0:
return self.execution_range[0], self.execution_range[1] if len(
self.execution_range
) > 1 else logical_date
return logical_date - timedelta(1), logical_date
def poke(self, context: Context) -> bool:
start_date, end_date = self._get_logical_dates(context)
task_instances = self._get_task_instances(
start_date=start_date.strftime("%Y-%m-%dT%H:%M:%SZ"),
end_date=end_date.strftime("%Y-%m-%dT%H:%M:%SZ"),
)
if len(task_instances) == 0:
self.log.info("Task Instances are empty. Sensor waits for task instances...")
return False
if self.failed_states:
external_task_status = self._check_task_instances_states(
task_instances=task_instances,
start_date=start_date,
end_date=end_date,
states=self.failed_states,
)
self._handle_failed_states(external_task_status)
if self.skipped_states:
external_task_status = self._check_task_instances_states(
task_instances=task_instances,
start_date=start_date,
end_date=end_date,
states=self.skipped_states,
)
self._handle_skipped_states(external_task_status)
self.log.info("Sensor waits for allowed states: %s", self.allowed_states)
external_task_status = self._check_task_instances_states(
task_instances=task_instances,
start_date=start_date,
end_date=end_date,
states=self.allowed_states,
)
return external_task_status
def _get_task_instances(self, start_date: str, end_date: str) -> list[dict]:
"""Get the list of task instances."""
try:
environment = self.hook.get_environment(
project_id=self.project_id,
region=self.region,
environment_id=self.environment_id,
timeout=self.timeout,
)
except NotFound as not_found_err:
self.log.info("The Composer environment %s does not exist.", self.environment_id)
raise AirflowException(not_found_err)
composer_airflow_uri = environment.config.airflow_uri
self.log.info(
"Pulling the DAG '%s' task instances from the '%s' environment...",
self.composer_external_dag_id,
self.environment_id,
)
task_instances_response = self.hook.get_task_instances(
composer_airflow_uri=composer_airflow_uri,
composer_dag_id=self.composer_external_dag_id,
query_parameters={
"execution_date_gte"
if self._composer_airflow_version < 3
else "logical_date_gte": start_date,
"execution_date_lte" if self._composer_airflow_version < 3 else "logical_date_lte": end_date,
},
timeout=self.timeout,
)
task_instances = task_instances_response["task_instances"]
if self.composer_external_task_ids:
task_instances = [
task_instance
for task_instance in task_instances
if task_instance["task_id"] in self.composer_external_task_ids
]
elif self.composer_external_task_group_id:
task_instances = [
task_instance
for task_instance in task_instances
if self.composer_external_task_group_id in task_instance["task_id"].split(".")
]
return task_instances
def _check_task_instances_states(
self,
task_instances: list[dict],
start_date: datetime,
end_date: datetime,
states: Iterable[str],
) -> bool:
for task_instance in task_instances:
if (
start_date.timestamp()
< parser.parse(
task_instance["execution_date" if self._composer_airflow_version < 3 else "logical_date"]
).timestamp()
< end_date.timestamp()
) and task_instance["state"] not in states:
return False
return True
def _get_composer_airflow_version(self) -> int:
"""Return Composer Airflow version."""
environment_obj = self.hook.get_environment(
project_id=self.project_id,
region=self.region,
environment_id=self.environment_id,
)
environment_config = Environment.to_dict(environment_obj)
image_version = environment_config["config"]["software_config"]["image_version"]
return int(image_version.split("airflow-")[1].split(".")[0])
def _handle_failed_states(self, failed_status: bool) -> None:
"""Handle failed states and raise appropriate exceptions."""
if failed_status:
if self.composer_external_task_ids:
if self.soft_fail:
raise AirflowSkipException(
f"Some of the external tasks '{self.composer_external_task_ids}' "
f"in DAG '{self.composer_external_dag_id}' failed. Skipping due to soft_fail."
)
raise ExternalTaskFailedError(
f"Some of the external tasks '{self.composer_external_task_ids}' "
f"in DAG '{self.composer_external_dag_id}' failed."
)
if self.composer_external_task_group_id:
if self.soft_fail:
raise AirflowSkipException(
f"The external task_group '{self.composer_external_task_group_id}' "
f"in DAG '{self.composer_external_dag_id}' failed. Skipping due to soft_fail."
)
raise ExternalTaskGroupFailedError(
f"The external task_group '{self.composer_external_task_group_id}' "
f"in DAG '{self.composer_external_dag_id}' failed."
)
if self.soft_fail:
raise AirflowSkipException(
f"The external DAG '{self.composer_external_dag_id}' failed. Skipping due to soft_fail."
)
raise ExternalDagFailedError(f"The external DAG '{self.composer_external_dag_id}' failed.")
def _handle_skipped_states(self, skipped_status: bool) -> None:
"""Handle skipped states and raise appropriate exceptions."""
if skipped_status:
if self.composer_external_task_ids:
raise AirflowSkipException(
f"Some of the external tasks '{self.composer_external_task_ids}' "
f"in DAG '{self.composer_external_dag_id}' reached a state in our states-to-skip-on list. Skipping."
)
if self.composer_external_task_group_id:
raise AirflowSkipException(
f"The external task_group '{self.composer_external_task_group_id}' "
f"in DAG '{self.composer_external_dag_id}' reached a state in our states-to-skip-on list. Skipping."
)
raise AirflowSkipException(
f"The external DAG '{self.composer_external_dag_id}' reached a state in our states-to-skip-on list. "
"Skipping."
)
def execute(self, context: Context) -> None:
self._composer_airflow_version = self._get_composer_airflow_version()
if self.composer_external_task_ids and len(self.composer_external_task_ids) > len(
set(self.composer_external_task_ids)
):
raise ValueError("Duplicate task_ids passed in composer_external_task_ids parameter")
if self.composer_external_task_ids:
self.log.info(
"Poking for tasks '%s' in dag '%s' on Composer environment '%s' ... ",
self.composer_external_task_ids,
self.composer_external_dag_id,
self.environment_id,
)
if self.composer_external_task_group_id:
self.log.info(
"Poking for task_group '%s' in dag '%s' on Composer environment '%s' ... ",
self.composer_external_task_group_id,
self.composer_external_dag_id,
self.environment_id,
)
if (
self.composer_external_dag_id
and not self.composer_external_task_group_id
and not self.composer_external_task_ids
):
self.log.info(
"Poking for DAG '%s' on Composer environment '%s' ... ",
self.composer_external_dag_id,
self.environment_id,
)
if self.deferrable:
start_date, end_date = self._get_logical_dates(context)
self.defer(
timeout=timedelta(seconds=self.timeout) if self.timeout else None,
trigger=CloudComposerExternalTaskTrigger(
project_id=self.project_id,
region=self.region,
environment_id=self.environment_id,
composer_external_dag_id=self.composer_external_dag_id,
composer_external_task_ids=self.composer_external_task_ids,
composer_external_task_group_id=self.composer_external_task_group_id,
start_date=start_date,
end_date=end_date,
allowed_states=self.allowed_states,
skipped_states=self.skipped_states,
failed_states=self.failed_states,
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
poll_interval=self.poll_interval,
composer_airflow_version=self._composer_airflow_version,
),
method_name=GOOGLE_DEFAULT_DEFERRABLE_METHOD_NAME,
)
super().execute(context)
def execute_complete(self, context: Context, event: dict):
if event and event["status"] == "error":
raise AirflowException(event["message"])
if event and event["status"] == "failed":
self._handle_failed_states(True)
elif event and event["status"] == "skipped":
self._handle_skipped_states(True)
self.log.info("External tasks for DAG '%s' has executed successfully.", self.composer_external_dag_id)
@cached_property
def hook(self) -> CloudComposerHook:
return CloudComposerHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
| CloudComposerExternalTaskSensor |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-google-sheets/unit_tests/integration/test_source.py | {
"start": 12900,
"end": 52377
} | class ____(GoogleSheetsBaseTest):
@HttpMocker()
def test_when_read_then_return_records(self, http_mocker: HttpMocker) -> None:
GoogleSheetsBaseTest.get_spreadsheet_info_and_sheets(http_mocker, "read_records_meta")
GoogleSheetsBaseTest.get_sheet_first_row(http_mocker, "read_records_range")
GoogleSheetsBaseTest.get_stream_data(http_mocker, "read_records_range_with_dimensions")
first_property = "header_1"
second_property = "header_2"
configured_catalog = (
CatalogBuilder()
.with_stream(
ConfiguredAirbyteStreamBuilder()
.with_name(_STREAM_NAME)
.with_json_schema(
{"properties": {first_property: {"type": ["null", "string"]}, second_property: {"type": ["null", "string"]}}}
)
)
.build()
)
output = self._read(self._config, catalog=configured_catalog, expecting_exception=False)
expected_records = [
AirbyteMessage(
type=Type.RECORD,
record=AirbyteRecordMessage(
emitted_at=ANY, stream=_STREAM_NAME, data={first_property: "value_11", second_property: "value_12"}
),
),
AirbyteMessage(
type=Type.RECORD,
record=AirbyteRecordMessage(
emitted_at=ANY, stream=_STREAM_NAME, data={first_property: "value_21", second_property: "value_22"}
),
),
]
assert len(output.records) == 2
assert output.records == expected_records
@HttpMocker()
def test_when_read_empty_column_then_return_records(self, http_mocker: HttpMocker) -> None:
"""
The response from headers (first row) has columns "header_1 | header_2 | | address | address2" so everything after empty cell will be
discarded, in this case address and address2 shouldn't be part of the schema in records.
"""
test_file_base_name = "read_with_empty_column"
GoogleSheetsBaseTest.get_spreadsheet_info_and_sheets(http_mocker, f"{test_file_base_name}_{GET_SPREADSHEET_INFO}")
GoogleSheetsBaseTest.get_sheet_first_row(http_mocker, f"{test_file_base_name}_{GET_SHEETS_FIRST_ROW}")
GoogleSheetsBaseTest.get_stream_data(http_mocker, f"{test_file_base_name}_{GET_STREAM_DATA}")
first_property = "header_1"
second_property = "header_2"
configured_catalog = (
CatalogBuilder()
.with_stream(
ConfiguredAirbyteStreamBuilder()
.with_name(_STREAM_NAME)
.with_json_schema(
{"properties": {first_property: {"type": ["null", "string"]}, second_property: {"type": ["null", "string"]}}}
)
)
.build()
)
output = self._read(self._config, catalog=configured_catalog, expecting_exception=False)
expected_records = [
AirbyteMessage(
type=Type.RECORD,
record=AirbyteRecordMessage(
emitted_at=ANY, stream=_STREAM_NAME, data={first_property: "value_11", second_property: "value_12"}
),
),
AirbyteMessage(
type=Type.RECORD,
record=AirbyteRecordMessage(
emitted_at=ANY, stream=_STREAM_NAME, data={first_property: "value_21", second_property: "value_22"}
),
),
]
assert len(output.records) == 2
assert output.records == expected_records
@HttpMocker()
def test_when_read_with_duplicated_headers_then_return_records(self, http_mocker: HttpMocker):
""" "
header_2 will be deduplicated by appending cell position.
header_1 header_2 header_2 address address2
value_11 value_12 value_13 main main st
value_21 value_22 value_23 washington 3 colonial
It will correctly match row values and field/column names in read records.
"""
test_file_base_name = "read_duplicated_headers"
GoogleSheetsBaseTest.get_spreadsheet_info_and_sheets(http_mocker, f"{test_file_base_name}_{GET_SPREADSHEET_INFO}")
GoogleSheetsBaseTest.get_sheet_first_row(http_mocker, f"{test_file_base_name}_{GET_SHEETS_FIRST_ROW}")
GoogleSheetsBaseTest.get_stream_data(http_mocker, f"{test_file_base_name}_{GET_STREAM_DATA}")
first_property = "header_1"
second_property = "header_2_B1"
third_property = "header_2_C1"
fourth_property = "address"
fifth_property = "address2"
configured_catalog = (
CatalogBuilder()
.with_stream(
ConfiguredAirbyteStreamBuilder()
.with_name(_STREAM_NAME)
.with_json_schema(
{
"properties": {
first_property: {"type": ["null", "string"]},
second_property: {"type": ["null", "string"]},
third_property: {"type": ["null", "string"]},
fourth_property: {"type": ["null", "string"]},
fifth_property: {"type": ["null", "string"]},
}
}
)
)
.build()
)
output = self._read(self._config, catalog=configured_catalog, expecting_exception=False)
expected_records = [
AirbyteMessage(
type=Type.RECORD,
record=AirbyteRecordMessage(
emitted_at=ANY,
stream=_STREAM_NAME,
data={
first_property: "value_11",
second_property: "value_12",
third_property: "value_13",
fourth_property: "main",
fifth_property: "main st",
},
),
),
AirbyteMessage(
type=Type.RECORD,
record=AirbyteRecordMessage(
emitted_at=ANY,
stream=_STREAM_NAME,
data={
first_property: "value_21",
second_property: "value_22",
third_property: "value_23",
fourth_property: "washington 3",
fifth_property: "colonial",
},
),
),
]
assert len(output.records) == 2
assert output.records == expected_records
@HttpMocker()
def test_when_empty_rows_then_return_records(self, http_mocker: HttpMocker):
""" "
There are a few empty rows in the response that we shuld ignore
e.g.
id name normalized_name
7 Children children
12 Mechanical Santa mechanical santa
13 Tattoo Man tattoo man
16 DOCTOR ZITSOFSKY doctor zitsofsky
20 Students students
There are two empty rows between id 16 and 20 that we will not be present in read records
"""
test_file_base_name = "read_empty_rows"
GoogleSheetsBaseTest.get_spreadsheet_info_and_sheets(http_mocker, f"{test_file_base_name}_{GET_SPREADSHEET_INFO}")
GoogleSheetsBaseTest.get_sheet_first_row(http_mocker, f"{test_file_base_name}_{GET_SHEETS_FIRST_ROW}")
GoogleSheetsBaseTest.get_stream_data(http_mocker, f"{test_file_base_name}_{GET_STREAM_DATA}")
expected_properties = ["id", "name", "normalized_name"]
catalog_properties = {}
for property in expected_properties:
catalog_properties[property] = {"type": ["null", "string"]}
configured_catalog = (
CatalogBuilder()
.with_stream(ConfiguredAirbyteStreamBuilder().with_name(_STREAM_NAME).with_json_schema({"properties": catalog_properties}))
.build()
)
records_in_response = find_template(f"{test_file_base_name}_{GET_STREAM_DATA}", __file__)
empty_row_count = 0
expected_rows_found = 23
expected_empty_rows = 7
expected_records = []
for row in records_in_response["valueRanges"][0]["values"]:
if row:
expected_records += [
AirbyteMessage(
type=Type.RECORD,
record=AirbyteRecordMessage(
emitted_at=ANY,
stream=_STREAM_NAME,
data={expected_property: row_value for expected_property, row_value in zip(expected_properties, row)},
),
)
]
else:
empty_row_count += 1
assert empty_row_count == expected_empty_rows
assert len(expected_records) == expected_rows_found
output = self._read(self._config, catalog=configured_catalog, expecting_exception=False)
assert len(output.records) == expected_rows_found
assert output.records == expected_records
@HttpMocker()
def test_when_read_by_batches_make_expected_requests(self, http_mocker: HttpMocker):
test_file_base_name = "read_by_batches"
batch_size = 10
GoogleSheetsBaseTest.get_spreadsheet_info_and_sheets(http_mocker, f"{test_file_base_name}_{GET_SPREADSHEET_INFO}")
GoogleSheetsBaseTest.get_sheet_first_row(http_mocker, f"{test_file_base_name}_{GET_SHEETS_FIRST_ROW}")
start_range = 2
for range_file_postfix in ("first_batch", "second_batch", "third_batch", "fourth_batch", "fifth_batch"):
end_range = start_range + batch_size
request_range = (start_range, end_range)
GoogleSheetsBaseTest.get_stream_data(
http_mocker, data_response_file=f"{test_file_base_name}_{GET_STREAM_DATA}_{range_file_postfix}", request_range=request_range
)
start_range += batch_size + 1
catalog_properties = {}
for expected_property in ["id", "name", "normalized_name"]:
catalog_properties[expected_property] = {"type": ["null", "string"]}
configured_catalog = (
CatalogBuilder()
.with_stream(ConfiguredAirbyteStreamBuilder().with_name(_STREAM_NAME).with_json_schema({"properties": catalog_properties}))
.build()
)
self._config["batch_size"] = batch_size
output = self._read(self._config, catalog=configured_catalog, expecting_exception=False)
assert len(output.records) > 0
@HttpMocker()
def test_when_read_then_return_records_with_name_conversion(self, http_mocker: HttpMocker) -> None:
# will convert '1 тест' to '_1_test and 'header2' to 'header_2'
GoogleSheetsBaseTest.get_spreadsheet_info_and_sheets(http_mocker, "read_records_meta")
GoogleSheetsBaseTest.get_sheet_first_row(http_mocker, "names_conversion_range")
GoogleSheetsBaseTest.get_stream_data(http_mocker, "read_records_range_with_dimensions")
first_expected_converted_property = "_1_test"
second_expected_converted_property = "header_2"
configured_catalog = (
CatalogBuilder()
.with_stream(
ConfiguredAirbyteStreamBuilder()
.with_name(_STREAM_NAME)
.with_json_schema(
{
"properties": {
first_expected_converted_property: {"type": ["null", "string"]},
second_expected_converted_property: {"type": ["null", "string"]},
}
}
)
)
.build()
)
self._config["names_conversion"] = True
output = self._read(self._config, catalog=configured_catalog, expecting_exception=False)
expected_records = [
AirbyteMessage(
type=Type.RECORD,
record=AirbyteRecordMessage(
emitted_at=ANY,
stream=_STREAM_NAME,
data={first_expected_converted_property: "value_11", second_expected_converted_property: "value_12"},
),
),
AirbyteMessage(
type=Type.RECORD,
record=AirbyteRecordMessage(
emitted_at=ANY,
stream=_STREAM_NAME,
data={first_expected_converted_property: "value_21", second_expected_converted_property: "value_22"},
),
),
]
assert len(output.records) == 2
assert output.records == expected_records
@HttpMocker()
def test_when_read_multiple_streams_return_records(self, http_mocker: HttpMocker) -> None:
GoogleSheetsBaseTest.get_spreadsheet_info_and_sheets(http_mocker, "multiple_streams_schemas_meta", 200)
GoogleSheetsBaseTest.get_sheet_first_row(http_mocker, f"multiple_streams_schemas_{_STREAM_NAME}_range", 200)
GoogleSheetsBaseTest.get_sheet_first_row(http_mocker, f"multiple_streams_schemas_{_B_STREAM_NAME}_range", 200, _B_STREAM_NAME)
GoogleSheetsBaseTest.get_sheet_first_row(http_mocker, f"multiple_streams_schemas_{_C_STREAM_NAME}_range", 200, _C_STREAM_NAME)
GoogleSheetsBaseTest.get_stream_data(http_mocker, f"multiple_streams_schemas_{_STREAM_NAME}_range_2")
GoogleSheetsBaseTest.get_stream_data(http_mocker, f"multiple_streams_schemas_{_B_STREAM_NAME}_range_2", stream_name=_B_STREAM_NAME)
GoogleSheetsBaseTest.get_stream_data(http_mocker, f"multiple_streams_schemas_{_C_STREAM_NAME}_range_2", stream_name=_C_STREAM_NAME)
configured_catalog = (
CatalogBuilder()
.with_stream(
ConfiguredAirbyteStreamBuilder()
.with_name(_STREAM_NAME)
.with_json_schema({"properties": {"age": {"type": "string"}, "name": {"type": "string"}}})
)
.with_stream(
ConfiguredAirbyteStreamBuilder()
.with_name(_B_STREAM_NAME)
.with_json_schema({"properties": {"email": {"type": "string"}, "name": {"type": "string"}}})
)
.with_stream(
ConfiguredAirbyteStreamBuilder().with_name(_C_STREAM_NAME).with_json_schema({"properties": {"address": {"type": "string"}}})
)
.build()
)
output = self._read(self._config, catalog=configured_catalog, expecting_exception=False)
assert len(output.records) == 9
assert len(output.state_messages) == 3
state_messages_streams = []
for state_message in output.state_messages:
state_messages_streams.append(state_message.state.stream.stream_descriptor.name)
assert _STREAM_NAME in state_messages_streams
assert _B_STREAM_NAME in state_messages_streams
assert _C_STREAM_NAME in state_messages_streams
expected_messages = []
for current_stream in [_STREAM_NAME, _B_STREAM_NAME, _C_STREAM_NAME]:
for current_status in [AirbyteStreamStatus.COMPLETE, AirbyteStreamStatus.RUNNING, AirbyteStreamStatus.STARTED]:
stream_descriptor = StreamDescriptor(name=current_stream, namespace=None)
stream_status = AirbyteStreamStatusTraceMessage(status=current_status, stream_descriptor=stream_descriptor)
airbyte_trace_message = AirbyteTraceMessage(type=TraceType.STREAM_STATUS, emitted_at=ANY, stream_status=stream_status)
airbyte_message = AirbyteMessage(type=Type.TRACE, trace=airbyte_trace_message)
expected_messages.append(airbyte_message)
assert len(output.trace_messages) == len(expected_messages)
for message in expected_messages:
assert message in output.trace_messages
@HttpMocker()
def test_when_read_single_stream_with_multiple_streams_available_return_records_of_requested_stream(
self, http_mocker: HttpMocker
) -> None:
""" "
Source has multiple sheets/stream but configured catalog will just request data for one sheet/stream
then we just get records for that stream.
"""
file_name_base = "multiple_streams_schemas"
GoogleSheetsBaseTest.get_spreadsheet_info_and_sheets(http_mocker, f"{file_name_base}_meta", 200)
GoogleSheetsBaseTest.get_sheet_first_row(http_mocker, f"{file_name_base}_{_STREAM_NAME}_range", 200)
GoogleSheetsBaseTest.get_sheet_first_row(http_mocker, f"{file_name_base}_{_B_STREAM_NAME}_range", 200, _B_STREAM_NAME)
GoogleSheetsBaseTest.get_sheet_first_row(http_mocker, f"{file_name_base}_{_C_STREAM_NAME}_range", 200, _C_STREAM_NAME)
GoogleSheetsBaseTest.get_stream_data(http_mocker, f"{file_name_base}_{_B_STREAM_NAME}_range_2", stream_name=_B_STREAM_NAME)
configured_catalog = (
CatalogBuilder()
.with_stream(
ConfiguredAirbyteStreamBuilder()
.with_name(_B_STREAM_NAME)
.with_json_schema({"properties": {"email": {"type": "string"}, "name": {"type": "string"}}})
)
.build()
)
output = self._read(self._config, catalog=configured_catalog, expecting_exception=False)
assert len(output.records) == 2
assert len(output.state_messages) == 1
state_messages_streams = []
for state_message in output.state_messages:
state_messages_streams.append(state_message.state.stream.stream_descriptor.name)
assert _STREAM_NAME not in state_messages_streams
assert _B_STREAM_NAME in state_messages_streams
assert _C_STREAM_NAME not in state_messages_streams
expected_messages = []
for current_stream in [_B_STREAM_NAME]:
for current_status in [AirbyteStreamStatus.COMPLETE, AirbyteStreamStatus.RUNNING, AirbyteStreamStatus.STARTED]:
stream_descriptor = StreamDescriptor(name=current_stream, namespace=None)
stream_status = AirbyteStreamStatusTraceMessage(status=current_status, stream_descriptor=stream_descriptor)
airbyte_trace_message = AirbyteTraceMessage(type=TraceType.STREAM_STATUS, emitted_at=ANY, stream_status=stream_status)
airbyte_message = AirbyteMessage(type=Type.TRACE, trace=airbyte_trace_message)
expected_messages.append(airbyte_message)
assert len(output.trace_messages) == len(expected_messages)
for message in expected_messages:
assert message in output.trace_messages
@HttpMocker()
def test_when_read_stream_is_not_available_then_is_marked_incomplete(self, http_mocker: HttpMocker) -> None:
"""
Configured catalog will include a streams that is not available in first row response, so it will be marked as incomplete.
"""
base_file_name = "multiple_streams_schemas"
GoogleSheetsBaseTest.get_spreadsheet_info_and_sheets(http_mocker, f"{base_file_name}_meta", 200)
GoogleSheetsBaseTest.get_sheet_first_row(http_mocker, f"{base_file_name}_{_STREAM_NAME}_range", 200)
GoogleSheetsBaseTest.get_sheet_first_row(http_mocker, f"{base_file_name}_{_B_STREAM_NAME}_range", 200, _B_STREAM_NAME)
GoogleSheetsBaseTest.get_sheet_first_row(http_mocker, f"{base_file_name}_{_C_STREAM_NAME}_range", 200, _C_STREAM_NAME)
GoogleSheetsBaseTest.get_stream_data(http_mocker, f"{base_file_name}_{_STREAM_NAME}_range_2")
GoogleSheetsBaseTest.get_stream_data(http_mocker, f"{base_file_name}_{_B_STREAM_NAME}_range_2", stream_name=_B_STREAM_NAME)
unavailable_stream = "unavailable_stream"
configured_catalog = (
CatalogBuilder()
.with_stream(
ConfiguredAirbyteStreamBuilder()
.with_name(_STREAM_NAME)
.with_json_schema({"properties": {"age": {"type": "string"}, "name": {"type": "string"}}})
)
.with_stream(
ConfiguredAirbyteStreamBuilder()
.with_name(_B_STREAM_NAME)
.with_json_schema({"properties": {"email": {"type": "string"}, "name": {"type": "string"}}})
)
.with_stream(
ConfiguredAirbyteStreamBuilder()
.with_name(unavailable_stream)
.with_json_schema({"properties": {"address": {"type": "string"}}})
)
.build()
)
output = self._read(self._config, catalog=configured_catalog, expecting_exception=False)
a_and_b_stream_records_count = 5
assert len(output.records) == a_and_b_stream_records_count
catalog_available_streams = [_STREAM_NAME, _B_STREAM_NAME]
assert len(output.state_messages) == len(catalog_available_streams)
state_messages_streams = []
for state_message in output.state_messages:
state_messages_streams.append(state_message.state.stream.stream_descriptor.name)
assert _STREAM_NAME in state_messages_streams
assert _B_STREAM_NAME in state_messages_streams
expected_messages = []
for current_stream in catalog_available_streams:
for current_status in [AirbyteStreamStatus.COMPLETE, AirbyteStreamStatus.RUNNING, AirbyteStreamStatus.STARTED]:
stream_descriptor = StreamDescriptor(name=current_stream, namespace=None)
stream_status = AirbyteStreamStatusTraceMessage(status=current_status, stream_descriptor=stream_descriptor)
airbyte_trace_message = AirbyteTraceMessage(type=TraceType.STREAM_STATUS, emitted_at=ANY, stream_status=stream_status)
airbyte_message = AirbyteMessage(type=Type.TRACE, trace=airbyte_trace_message)
expected_messages.append(airbyte_message)
stream_descriptor = StreamDescriptor(name=unavailable_stream, namespace=None)
stream_status = AirbyteStreamStatusTraceMessage(status=AirbyteStreamStatus.INCOMPLETE, stream_descriptor=stream_descriptor)
airbyte_trace_message = AirbyteTraceMessage(type=TraceType.STREAM_STATUS, emitted_at=ANY, stream_status=stream_status)
airbyte_message_incomplete_stream = AirbyteMessage(type=Type.TRACE, trace=airbyte_trace_message)
expected_messages.append(airbyte_message_incomplete_stream)
assert len(output.trace_messages) == len(expected_messages)
for message in expected_messages:
assert message in output.trace_messages
@HttpMocker()
def test_when_read_then_status_and_state_messages_emitted(self, http_mocker: HttpMocker) -> None:
GoogleSheetsBaseTest.get_spreadsheet_info_and_sheets(http_mocker, "read_records_meta_2", 200)
GoogleSheetsBaseTest.get_sheet_first_row(http_mocker, "read_records_range_2", 200)
GoogleSheetsBaseTest.get_stream_data(http_mocker, "read_records_range_with_dimensions_2")
configured_catalog = (
CatalogBuilder()
.with_stream(
ConfiguredAirbyteStreamBuilder()
.with_name(_STREAM_NAME)
.with_json_schema({"properties": {"header_1": {"type": ["null", "string"]}, "header_2": {"type": ["null", "string"]}}})
)
.build()
)
output = self._read(self._config, catalog=configured_catalog, expecting_exception=False)
assert len(output.records) == 5
assert output.state_messages[0].state.stream.stream_state == AirbyteStateBlob(__ab_no_cursor_state_message=True)
assert output.state_messages[0].state.stream.stream_descriptor.name == _STREAM_NAME
assert output.trace_messages[0].trace.stream_status.status == AirbyteStreamStatus.STARTED
assert output.trace_messages[1].trace.stream_status.status == AirbyteStreamStatus.RUNNING
assert output.trace_messages[2].trace.stream_status.status == AirbyteStreamStatus.COMPLETE
@HttpMocker()
def test_read_empty_sheet(self, http_mocker: HttpMocker) -> None:
GoogleSheetsBaseTest.get_spreadsheet_info_and_sheets(http_mocker, "read_records_meta", 200)
GoogleSheetsBaseTest.get_sheet_first_row(http_mocker, "read_records_range_empty", 200)
configured_catalog = (
CatalogBuilder()
.with_stream(
ConfiguredAirbyteStreamBuilder()
.with_name(_STREAM_NAME)
.with_json_schema({"properties": {"header_1": {"type": ["null", "string"]}, "header_2": {"type": ["null", "string"]}}})
)
.build()
)
output = self._read(self._config, catalog=configured_catalog, expecting_exception=True)
expected_message = (
f"Unable to read the schema of sheet. Error: Unexpected return result: Sheet was expected to contain data on exactly 1 sheet."
)
assert output.errors[0].trace.error.message == expected_message
@HttpMocker()
def test_read_expected_data_on_1_sheet(self, http_mocker: HttpMocker) -> None:
GoogleSheetsBaseTest.get_spreadsheet_info_and_sheets(http_mocker, "read_records_meta", 200)
GoogleSheetsBaseTest.get_sheet_first_row(http_mocker, "read_records_range_with_unexpected_extra_sheet", 200)
configured_catalog = (
CatalogBuilder()
.with_stream(
ConfiguredAirbyteStreamBuilder()
.with_name(_STREAM_NAME)
.with_json_schema({"properties": {"header_1": {"type": ["null", "string"]}, "header_2": {"type": ["null", "string"]}}})
)
.build()
)
output = self._read(self._config, catalog=configured_catalog, expecting_exception=True)
expected_message = (
f"Unable to read the schema of sheet. Error: Unexpected return result: Sheet was expected to contain data on exactly 1 sheet."
)
assert output.errors[0].trace.error.message == expected_message
def _make_read_with_spreadsheet(self, http_mocker: HttpMocker, spreadsheet_id_to_mock: str, spreadsheet_id_for_config: str):
GoogleSheetsBaseTest.get_spreadsheet_info_and_sheets(http_mocker, "read_records_meta", spreadsheet_id=spreadsheet_id_to_mock)
GoogleSheetsBaseTest.get_sheet_first_row(http_mocker, "read_records_range", spreadsheet_id=spreadsheet_id_to_mock)
GoogleSheetsBaseTest.get_stream_data(http_mocker, "read_records_range_with_dimensions", spreadsheet_id=spreadsheet_id_to_mock)
first_property = "header_1"
second_property = "header_2"
configured_catalog = (
CatalogBuilder()
.with_stream(
ConfiguredAirbyteStreamBuilder()
.with_name(_STREAM_NAME)
.with_json_schema(
{"properties": {first_property: {"type": ["null", "string"]}, second_property: {"type": ["null", "string"]}}}
)
)
.build()
)
config_with_other_spreadsheet_format = deepcopy(self._config)
config_with_other_spreadsheet_format["spreadsheet_id"] = spreadsheet_id_for_config
output = self._read(config_with_other_spreadsheet_format, catalog=configured_catalog, expecting_exception=False)
expected_records = [
AirbyteMessage(
type=Type.RECORD,
record=AirbyteRecordMessage(
emitted_at=ANY, stream=_STREAM_NAME, data={first_property: "value_11", second_property: "value_12"}
),
),
AirbyteMessage(
type=Type.RECORD,
record=AirbyteRecordMessage(
emitted_at=ANY, stream=_STREAM_NAME, data={first_property: "value_21", second_property: "value_22"}
),
),
]
assert len(output.records) == 2
assert output.records == expected_records
@HttpMocker()
def test_spreadsheet_url_with_edit_and_gid_in_path(self, http_mocker: HttpMocker) -> None:
spreadsheet_id_to_mock = "18vWlVH8BfjGegwY_GdV1B_cPP9re66xI8uJK25dtY9Q"
spreadsheet_id_for_config = (
"https://docs.google.com/spreadsheets/d/18vWlVH8BfjGegwY_GdV1B_cPP9re66xI8uJK25dtY9Q/edit#gid=1820065035"
)
self._make_read_with_spreadsheet(
http_mocker=http_mocker, spreadsheet_id_to_mock=spreadsheet_id_to_mock, spreadsheet_id_for_config=spreadsheet_id_for_config
)
@HttpMocker()
def test_spreadsheet_url_with_edit_in_path(self, http_mocker: HttpMocker) -> None:
spreadsheet_id_to_mock = "18vWlVH8BfjGa-gwYGdV1BjcPP9re66xI8uJK25dtY9Q"
spreadsheet_id_for_config = "https://docs.google.com/spreadsheets/d/18vWlVH8BfjGa-gwYGdV1BjcPP9re66xI8uJK25dtY9Q/edit"
self._make_read_with_spreadsheet(
http_mocker=http_mocker, spreadsheet_id_to_mock=spreadsheet_id_to_mock, spreadsheet_id_for_config=spreadsheet_id_for_config
)
@HttpMocker()
def test_spreadsheet_path(self, http_mocker: HttpMocker) -> None:
spreadsheet_id_to_mock = "18vWlVH8BfjGegwY_GdV1BjcPP9re_6xI8uJ-25dtY9Q"
spreadsheet_id_for_config = "https://docs.google.com/spreadsheets/d/18vWlVH8BfjGegwY_GdV1BjcPP9re_6xI8uJ-25dtY9Q/"
self._make_read_with_spreadsheet(
http_mocker=http_mocker, spreadsheet_id_to_mock=spreadsheet_id_to_mock, spreadsheet_id_for_config=spreadsheet_id_for_config
)
@HttpMocker()
def test_spreadsheet_url_with_pound_in_path(self, http_mocker: HttpMocker) -> None:
spreadsheet_id_to_mock = "18vWlVH8BfjGegwY_GdV1BjcPP9re_6xI8uJ-25dtY9Q"
spreadsheet_id_for_config = "https://docs.google.com/spreadsheets/d/18vWlVH8BfjGegwY_GdV1BjcPP9re_6xI8uJ-25dtY9Q/#"
self._make_read_with_spreadsheet(
http_mocker=http_mocker, spreadsheet_id_to_mock=spreadsheet_id_to_mock, spreadsheet_id_for_config=spreadsheet_id_for_config
)
@HttpMocker()
def test_spreadsheet_id(self, http_mocker: HttpMocker) -> None:
spreadsheet_id_to_mock = "18vWlVH8BfjGegwY_GdV1BjcPP9re66xI8uJK25dtY9Q"
spreadsheet_id_for_config = "18vWlVH8BfjGegwY_GdV1BjcPP9re66xI8uJK25dtY9Q"
self._make_read_with_spreadsheet(
http_mocker=http_mocker, spreadsheet_id_to_mock=spreadsheet_id_to_mock, spreadsheet_id_for_config=spreadsheet_id_for_config
)
@pytest.mark.skip("Pending to do")
def test_for_increase_batch_size_when_rate_limit(self):
pass
@HttpMocker()
def test_discover_with_stream_name_overrides(self, http_mocker: HttpMocker) -> None:
# Define original and overridden stream names
original_sheet_name = "a_stream_name" # Matches existing test data
overridden_stream_name = "custom_sales_data"
# Configure config with stream name override
config_with_overrides = deepcopy(self._config)
config_with_overrides["stream_name_overrides"] = [
{"source_stream_name": original_sheet_name, "custom_stream_name": overridden_stream_name}
]
# Set up HTTP mocks using existing templates
GoogleSheetsBaseTest.get_spreadsheet_info_and_sheets(http_mocker, "only_headers_meta")
GoogleSheetsBaseTest.get_sheet_first_row(http_mocker, "only_headers_range")
# Define expected schema and catalog
expected_schema = {
"$schema": "https://json-schema.org/draft-07/schema#",
"additionalProperties": True,
"properties": {"header1": {"type": ["null", "string"]}, "header2": {"type": ["null", "string"]}},
"type": "object",
}
expected_catalog = AirbyteCatalog(
streams=[
AirbyteStream(
name=overridden_stream_name,
json_schema=expected_schema,
supported_sync_modes=[SyncMode.full_refresh],
default_cursor_field=None,
source_defined_primary_key=None,
is_resumable=False,
is_file_based=False,
)
]
)
expected_message = AirbyteMessage(type=Type.CATALOG, catalog=expected_catalog)
# Run discover and assert
output = self._discover(config_with_overrides, expecting_exception=False)
assert output.catalog == expected_message
@HttpMocker()
def test_discover_with_non_matching_stream_name_override(self, http_mocker: HttpMocker) -> None:
# Define a non-matching override
original_sheet_name = "a_stream_name"
non_matching_override = [{"source_stream_name": "NonExistingSheet", "custom_stream_name": "custom_name"}]
config_with_overrides = deepcopy(self._config)
config_with_overrides["stream_name_overrides"] = non_matching_override
# Set up HTTP mocks
GoogleSheetsBaseTest.get_spreadsheet_info_and_sheets(http_mocker, "only_headers_meta")
GoogleSheetsBaseTest.get_sheet_first_row(http_mocker, "only_headers_range")
# Define expected schema and catalog with original name
expected_schema = {
"$schema": "https://json-schema.org/draft-07/schema#",
"additionalProperties": True,
"properties": {"header1": {"type": ["null", "string"]}, "header2": {"type": ["null", "string"]}},
"type": "object",
}
expected_catalog = AirbyteCatalog(
streams=[
AirbyteStream(
name=original_sheet_name,
json_schema=expected_schema,
supported_sync_modes=[SyncMode.full_refresh],
default_cursor_field=None,
source_defined_primary_key=None,
is_resumable=False,
is_file_based=False,
)
]
)
expected_message = AirbyteMessage(type=Type.CATALOG, catalog=expected_catalog)
# Run discover and assert
output = self._discover(config_with_overrides, expecting_exception=False)
assert output.catalog == expected_message
@HttpMocker()
def test_read_with_stream_name_overrides(self, http_mocker: HttpMocker) -> None:
# Define original and overridden stream names
original_sheet_name = "a_stream_name"
overridden_stream_name = "custom_sales_data"
# Configure config with stream name override
config_with_overrides = deepcopy(self._config)
config_with_overrides["stream_name_overrides"] = [
{"source_stream_name": original_sheet_name, "custom_stream_name": overridden_stream_name}
]
# Set up HTTP mocks
GoogleSheetsBaseTest.get_spreadsheet_info_and_sheets(http_mocker, "read_records_meta")
GoogleSheetsBaseTest.get_sheet_first_row(http_mocker, "read_records_range")
GoogleSheetsBaseTest.get_stream_data(http_mocker, "read_records_range_with_dimensions")
# Define configured catalog with overridden stream name
first_property = "header_1"
second_property = "header_2"
configured_catalog = (
CatalogBuilder()
.with_stream(
ConfiguredAirbyteStreamBuilder()
.with_name(overridden_stream_name)
.with_json_schema(
{"properties": {first_property: {"type": ["null", "string"]}, second_property: {"type": ["null", "string"]}}}
)
)
.build()
)
# Run read and assert
output = self._read(config_with_overrides, catalog=configured_catalog, expecting_exception=False)
expected_records = [
AirbyteMessage(
type=Type.RECORD,
record=AirbyteRecordMessage(
emitted_at=ANY, stream=overridden_stream_name, data={first_property: "value_11", second_property: "value_12"}
),
),
AirbyteMessage(
type=Type.RECORD,
record=AirbyteRecordMessage(
emitted_at=ANY, stream=overridden_stream_name, data={first_property: "value_21", second_property: "value_22"}
),
),
]
assert len(output.records) == 2
assert output.records == expected_records
@HttpMocker()
def test_read_with_stream_name_overrides_and_primary_key(self, http_mocker: HttpMocker) -> None:
# Define original and overridden stream names
original_sheet_name = "a_stream_name"
overridden_stream_name = "custom_sales_data"
# Configure config with stream name override
config_with_overrides = deepcopy(self._config)
config_with_overrides["stream_name_overrides"] = [
{"source_stream_name": original_sheet_name, "custom_stream_name": overridden_stream_name}
]
# Set up HTTP mocks for Google Sheets API responses
GoogleSheetsBaseTest.get_spreadsheet_info_and_sheets(http_mocker, "read_records_meta")
GoogleSheetsBaseTest.get_sheet_first_row(http_mocker, "read_records_range")
GoogleSheetsBaseTest.get_stream_data(http_mocker, "read_records_range_with_dimensions")
# Define the schema fields and configured catalog with a primary key
first_property = "header_1"
second_property = "header_2"
configured_catalog = (
CatalogBuilder()
.with_stream(
ConfiguredAirbyteStreamBuilder()
.with_name(overridden_stream_name)
.with_json_schema(
{"properties": {first_property: {"type": ["null", "string"]}, second_property: {"type": ["null", "string"]}}}
)
.with_primary_key([[first_property]]) # User sets "header_1" as the primary key
)
.build()
)
# Run the read operation
output = self._read(config_with_overrides, catalog=configured_catalog, expecting_exception=False)
# Define expected records
expected_records = [
AirbyteMessage(
type=Type.RECORD,
record=AirbyteRecordMessage(
emitted_at=ANY, stream=overridden_stream_name, data={first_property: "value_11", second_property: "value_12"}
),
),
AirbyteMessage(
type=Type.RECORD,
record=AirbyteRecordMessage(
emitted_at=ANY, stream=overridden_stream_name, data={first_property: "value_21", second_property: "value_22"}
),
),
]
# Assertions
assert len(output.records) == 2, "Expected 2 records to be emitted"
assert output.records == expected_records, "Emitted records should match expected records with overridden stream name"
assert len(output.state_messages) == 1, "Expected 1 state message"
assert (
output.state_messages[0].state.stream.stream_descriptor.name == overridden_stream_name
), "State message should use overridden stream name"
assert output.state_messages[0].state.stream.stream_state == AirbyteStateBlob(
__ab_no_cursor_state_message=True
), "State should indicate no cursor for full refresh"
| TestSourceRead |
python | getsentry__sentry | src/sentry/explore/endpoints/serializers.py | {
"start": 4577,
"end": 8786
} | class ____(serializers.Serializer):
name = serializers.CharField(
required=True, max_length=255, help_text="The user-defined saved query name."
)
projects = ListField(
child=serializers.IntegerField(),
required=False,
default=list,
help_text="The saved projects filter for this query.",
)
dataset = serializers.ChoiceField(
choices=ExploreSavedQueryDataset.as_text_choices(),
default=ExploreSavedQueryDataset.get_type_name(ExploreSavedQueryDataset.SPANS),
help_text="The dataset you would like to query. Supported values: `spans`, `logs`, `metrics`.",
)
start = serializers.DateTimeField(
required=False,
allow_null=True,
help_text="The saved start time for this saved query.",
)
end = serializers.DateTimeField(
required=False,
allow_null=True,
help_text="The saved end time for this saved query.",
)
range = serializers.CharField(
required=False,
allow_null=True,
help_text="The saved time range period for this saved query.",
)
environment = ListField(
child=serializers.CharField(),
required=False,
allow_null=True,
help_text="The name of environments to filter by.",
)
interval = serializers.CharField(
required=False, allow_null=True, help_text="Resolution of the time series."
)
query = ListField(child=QuerySerializer(), required=False, allow_null=True)
def validate_projects(self, projects):
from sentry.api.validators import validate_project_ids
return validate_project_ids(projects, self.context["params"]["project_id"])
# Avoid including any side-effecting logic here, since this logic is also used when generating prebuilt queries on first read
def validate(self, data):
query = {}
query_keys = [
"environment",
"range",
"start",
"end",
"interval",
]
inner_query_keys = [
"query",
"fields",
"orderby",
"groupby",
"visualize",
"mode",
"aggregateField",
"aggregateOrderby",
"metric",
]
for key in query_keys:
if data.get(key) is not None:
value = data[key]
if key in ("start", "end"):
value = value.isoformat()
query[key] = value
if "query" in data:
query["query"] = []
for q in data["query"]:
if "metric" in q and data["dataset"] != "metrics":
raise serializers.ValidationError(
"Metric field is only allowed for metrics dataset"
)
if data["dataset"] == "metrics" and "metric" not in q:
raise serializers.ValidationError(
"Metric field is required for metrics dataset"
)
inner_query = {}
for key in inner_query_keys:
if key in q:
inner_query[key] = q[key]
query["query"].append(inner_query)
if data["projects"] == ALL_ACCESS_PROJECTS:
data["projects"] = []
query["all_projects"] = True
if "query" in query:
if "interval" in query:
interval = parse_stats_period(query["interval"])
if interval is None:
raise serializers.ValidationError("Interval could not be parsed")
date_range = self.context["params"]["end"] - self.context["params"]["start"]
validate_interval(
interval,
serializers.ValidationError("Interval would cause too many results"),
date_range,
0,
)
return {
"name": data["name"],
"project_ids": data["projects"],
"query": query,
"dataset": ExploreSavedQueryDataset.get_id_for_type_name(data["dataset"]),
}
| ExploreSavedQuerySerializer |
python | doocs__leetcode | solution/2200-2299/2258.Escape the Spreading Fire/Solution.py | {
"start": 0,
"end": 2239
} | class ____:
def maximumMinutes(self, grid: List[List[int]]) -> int:
def spread(q: Deque[int]) -> Deque[int]:
nq = deque()
while q:
i, j = q.popleft()
for a, b in pairwise(dirs):
x, y = i + a, j + b
if 0 <= x < m and 0 <= y < n and not fire[x][y] and grid[x][y] == 0:
fire[x][y] = True
nq.append((x, y))
return nq
def check(t: int) -> bool:
for i in range(m):
for j in range(n):
fire[i][j] = False
q1 = deque()
for i, row in enumerate(grid):
for j, x in enumerate(row):
if x == 1:
fire[i][j] = True
q1.append((i, j))
while t and q1:
q1 = spread(q1)
t -= 1
if fire[0][0]:
return False
q2 = deque([(0, 0)])
vis = [[False] * n for _ in range(m)]
vis[0][0] = True
while q2:
for _ in range(len(q2)):
i, j = q2.popleft()
if fire[i][j]:
continue
for a, b in pairwise(dirs):
x, y = i + a, j + b
if (
0 <= x < m
and 0 <= y < n
and not vis[x][y]
and not fire[x][y]
and grid[x][y] == 0
):
if x == m - 1 and y == n - 1:
return True
vis[x][y] = True
q2.append((x, y))
q1 = spread(q1)
return False
m, n = len(grid), len(grid[0])
l, r = -1, m * n
dirs = (-1, 0, 1, 0, -1)
fire = [[False] * n for _ in range(m)]
while l < r:
mid = (l + r + 1) >> 1
if check(mid):
l = mid
else:
r = mid - 1
return int(1e9) if l == m * n else l
| Solution |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/writeonly.py | {
"start": 16352,
"end": 18443
} | class ____(Generic[_T]):
"""Virtual collection which includes append/remove methods that synchronize
into the attribute event system.
"""
if not TYPE_CHECKING:
__slots__ = ()
instance: _T
_from_obj: Tuple[FromClause, ...]
def __init__(
self, attr: _WriteOnlyAttributeImpl, state: InstanceState[_T]
):
instance = state.obj()
if TYPE_CHECKING:
assert instance
self.instance = instance
self.attr = attr
mapper = object_mapper(instance)
prop = mapper._props[self.attr.key]
if prop.secondary is not None:
# this is a hack right now. The Query only knows how to
# make subsequent joins() without a given left-hand side
# from self._from_obj[0]. We need to ensure prop.secondary
# is in the FROM. So we purposely put the mapper selectable
# in _from_obj[0] to ensure a user-defined join() later on
# doesn't fail, and secondary is then in _from_obj[1].
# note also, we are using the official ORM-annotated selectable
# from __clause_element__(), see #7868
self._from_obj = (prop.mapper.__clause_element__(), prop.secondary)
else:
self._from_obj = ()
self._where_criteria = (
prop._with_parent(instance, alias_secondary=False),
)
if self.attr.order_by:
self._order_by_clauses = self.attr.order_by
else:
self._order_by_clauses = ()
def _add_all_impl(self, iterator: Iterable[_T]) -> None:
for item in iterator:
self.attr.append(
attributes.instance_state(self.instance),
attributes.instance_dict(self.instance),
item,
None,
)
def _remove_impl(self, item: _T) -> None:
self.attr.remove(
attributes.instance_state(self.instance),
attributes.instance_dict(self.instance),
item,
None,
)
| _AbstractCollectionWriter |
python | doocs__leetcode | solution/1100-1199/1165.Single-Row Keyboard/Solution.py | {
"start": 0,
"end": 251
} | class ____:
def calculateTime(self, keyboard: str, word: str) -> int:
pos = {c: i for i, c in enumerate(keyboard)}
ans = i = 0
for c in word:
ans += abs(pos[c] - i)
i = pos[c]
return ans
| Solution |
python | ray-project__ray | python/ray/util/annotations.py | {
"start": 122,
"end": 2925
} | class ____(Enum):
PUBLIC_API = "PublicAPI"
DEVELOPER_API = "DeveloperAPI"
DEPRECATED = "Deprecated"
UNKNOWN = "Unknown"
def PublicAPI(*args, **kwargs):
"""Annotation for documenting public APIs.
Public APIs are classes and methods exposed to end users of Ray.
If ``stability="alpha"``, the API can be used by advanced users who are
tolerant to and expect breaking changes.
If ``stability="beta"``, the API is still public and can be used by early
users, but are subject to change.
If ``stability="stable"``, the APIs will remain backwards compatible across
minor Ray releases (e.g., Ray 1.4 -> 1.8).
For a full definition of the stability levels, please refer to the
:ref:`Ray API Stability definitions <api-stability>`.
Args:
stability: One of {"stable", "beta", "alpha"}.
api_group: Optional. Used only for doc rendering purpose. APIs in the same group
will be grouped together in the API doc pages.
Examples:
>>> from ray.util.annotations import PublicAPI
>>> @PublicAPI
... def func(x):
... return x
>>> @PublicAPI(stability="beta")
... def func(y):
... return y
"""
if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):
return PublicAPI(stability="stable", api_group="Others")(args[0])
if "stability" in kwargs:
stability = kwargs["stability"]
assert stability in ["stable", "beta", "alpha"], stability
else:
stability = "stable"
api_group = kwargs.get("api_group", "Others")
def wrap(obj):
if stability in ["alpha", "beta"]:
message = (
f"**PublicAPI ({stability}):** This API is in {stability} "
"and may change before becoming stable."
)
_append_doc(obj, message=message)
_mark_annotated(obj, type=AnnotationType.PUBLIC_API, api_group=api_group)
return obj
return wrap
def DeveloperAPI(*args, **kwargs):
"""Annotation for documenting developer APIs.
Developer APIs are lower-level methods explicitly exposed to advanced Ray
users and library developers. Their interfaces may change across minor
Ray releases.
Examples:
>>> from ray.util.annotations import DeveloperAPI
>>> @DeveloperAPI
... def func(x):
... return x
"""
if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):
return DeveloperAPI()(args[0])
def wrap(obj):
_append_doc(
obj,
message="**DeveloperAPI:** This API may change across minor Ray releases.",
)
_mark_annotated(obj, type=AnnotationType.DEVELOPER_API)
return obj
return wrap
| AnnotationType |
python | PrefectHQ__prefect | tests/server/orchestration/api/test_deployments.py | {
"start": 1412,
"end": 41882
} | class ____:
async def test_create_oldstyle_deployment(
self,
session,
hosted_api_client,
flow,
flow_function,
storage_document_id,
):
data = DeploymentCreate(
name="My Deployment",
version="mint",
flow_id=flow.id,
tags=["foo"],
parameters={"foo": "bar"},
storage_document_id=storage_document_id,
).model_dump(mode="json")
response = await hosted_api_client.post("/deployments/", json=data)
assert response.status_code == status.HTTP_201_CREATED
assert response.json()["name"] == "My Deployment"
assert response.json()["version"] == "mint"
assert response.json()["storage_document_id"] == str(storage_document_id)
deployment_id = response.json()["id"]
deployment = await models.deployments.read_deployment(
session=session, deployment_id=deployment_id
)
assert str(deployment.id) == deployment_id
assert deployment.name == "My Deployment"
assert deployment.tags == ["foo"]
assert deployment.flow_id == flow.id
assert deployment.parameters == {"foo": "bar"}
assert deployment.storage_document_id == storage_document_id
async def test_create_deployment(
self,
session,
hosted_api_client,
flow,
flow_function,
storage_document_id,
):
data = DeploymentCreate(
name="My Deployment",
version="mint",
path="/",
entrypoint="/file.py:flow",
flow_id=flow.id,
tags=["foo"],
labels={"env": "dev"},
parameters={"foo": "bar"},
job_variables={"cpu": 24},
storage_document_id=storage_document_id,
).model_dump(mode="json")
response = await hosted_api_client.post("/deployments/", json=data)
assert response.status_code == status.HTTP_201_CREATED
deployment_response = DeploymentResponse(**response.json())
assert deployment_response.name == "My Deployment"
assert deployment_response.version == "mint"
assert deployment_response.path == "/"
assert deployment_response.entrypoint == "/file.py:flow"
assert deployment_response.storage_document_id == storage_document_id
assert deployment_response.job_variables == {"cpu": 24}
deployment = await models.deployments.read_deployment(
session=session, deployment_id=deployment_response.id
)
assert deployment.id == deployment_response.id
assert deployment.name == "My Deployment"
assert deployment.tags == ["foo"]
assert deployment.flow_id == flow.id
assert deployment.parameters == {"foo": "bar"}
assert deployment.storage_document_id == storage_document_id
assert deployment.job_variables == {"cpu": 24}
async def test_create_deployment_with_single_schedule(
self,
session,
client,
flow,
):
schedule = schemas.schedules.IntervalSchedule(
interval=datetime.timedelta(days=1)
)
data = DeploymentCreate( # type: ignore
name="My Deployment",
version="mint",
flow_id=flow.id,
tags=["foo"],
parameters={"foo": "bar"},
schedules=[schemas.actions.DeploymentScheduleCreate(schedule=schedule)],
).model_dump(mode="json")
response = await client.post(
"/deployments/",
json=data,
)
data = response.json()
deployment_id = data["id"]
assert response.status_code == 201
assert data["name"] == "My Deployment"
assert len(data["schedules"]) == 1
assert (
schemas.core.DeploymentSchedule(**data["schedules"][0]).schedule == schedule
)
schedules = await models.deployments.read_deployment_schedules(
session=session,
deployment_id=deployment_id,
)
assert len(schedules) == 1
assert schedules[0] == schemas.core.DeploymentSchedule(**data["schedules"][0])
async def test_create_deployment_with_multiple_schedules(
self,
client,
flow,
):
schedule1 = schemas.schedules.IntervalSchedule(
interval=datetime.timedelta(days=1)
)
schedule2 = schemas.schedules.IntervalSchedule(
interval=datetime.timedelta(days=2)
)
data = DeploymentCreate( # type: ignore
name="My Deployment",
version="mint",
flow_id=flow.id,
tags=["foo"],
parameters={"foo": "bar"},
schedules=[
schemas.actions.DeploymentScheduleCreate(
schedule=schedule1,
active=True,
),
schemas.actions.DeploymentScheduleCreate(
schedule=schedule2,
active=False,
),
],
).model_dump(mode="json")
response = await client.post(
"/deployments/",
json=data,
)
assert response.status_code == 201
deployment_id = response.json()["id"]
data = response.json()
schedules = [schemas.core.DeploymentSchedule(**s) for s in data["schedules"]]
assert len(schedules) == 2
assert schedules == [
schemas.core.DeploymentSchedule(
schedule=schedule2,
active=False,
deployment_id=deployment_id,
),
schemas.core.DeploymentSchedule(
schedule=schedule1,
active=True,
deployment_id=deployment_id,
),
]
async def test_create_deployment_with_multiple_schedules_populates_legacy_schedule(
self,
session,
client,
flow,
):
schedule1 = schemas.schedules.IntervalSchedule(
interval=datetime.timedelta(days=1)
)
schedule2 = schemas.schedules.IntervalSchedule(
interval=datetime.timedelta(days=2)
)
data = DeploymentCreate( # type: ignore
name="My Deployment",
version="mint",
flow_id=flow.id,
tags=["foo"],
parameters={"foo": "bar"},
schedules=[
schemas.actions.DeploymentScheduleCreate(
schedule=schedule1,
active=True,
),
schemas.actions.DeploymentScheduleCreate(
schedule=schedule2,
active=True,
),
],
).model_dump(mode="json")
response = await client.post(
"/deployments/",
json=data,
)
assert response.status_code == 201
deployment_id = response.json()["id"]
# Just to make sure this test is deterministic, let's update one of the
# schedules so that it's updated datetime is after the other schedule.
first_schedule = schemas.core.DeploymentSchedule(
**response.json()["schedules"][0]
)
await models.deployments.update_deployment_schedule(
session=session,
deployment_id=deployment_id,
deployment_schedule_id=first_schedule.id,
schedule=schemas.actions.DeploymentScheduleUpdate(active=False),
)
await session.commit()
# Then we'll read the deployment again and ensure that the schedules
# are returned in the correct order.
response = await client.get(f"/deployments/{deployment_id}")
assert response.status_code == 200
data = response.json()
schedules = [schemas.core.DeploymentSchedule(**s) for s in data["schedules"]]
assert data["name"] == "My Deployment"
assert len(schedules) == 2
assert schedules[0].id == first_schedule.id
async def test_default_work_queue_name_is_none(self, session, client, flow):
data = DeploymentCreate(name="My Deployment", flow_id=flow.id).model_dump(
mode="json"
)
response = await client.post("/deployments/", json=data)
assert response.status_code == status.HTTP_201_CREATED
assert response.json()["work_queue_name"] is None
async def test_create_deployment_respects_flow_id_name_uniqueness(
self,
session,
hosted_api_client,
flow,
storage_document_id,
):
data = DeploymentCreate(
name="My Deployment",
flow_id=flow.id,
paused=True,
storage_document_id=storage_document_id,
).model_dump(mode="json")
response = await hosted_api_client.post("/deployments/", json=data)
assert response.status_code == status.HTTP_201_CREATED
assert response.json()["name"] == "My Deployment"
deployment_id = response.json()["id"]
# post the same data
data = DeploymentCreate(
name="My Deployment",
flow_id=flow.id,
paused=True,
storage_document_id=storage_document_id,
).model_dump(mode="json")
response = await hosted_api_client.post("/deployments/", json=data)
assert response.status_code == status.HTTP_200_OK
assert response.json()["name"] == "My Deployment"
assert response.json()["id"] == deployment_id
assert response.json()["paused"]
assert response.json()["storage_document_id"] == str(storage_document_id)
# post different data, upsert should be respected
data = DeploymentCreate(
name="My Deployment",
flow_id=flow.id,
paused=False, # CHANGED
storage_document_id=storage_document_id,
).model_dump(mode="json")
response = await hosted_api_client.post("/deployments/", json=data)
assert response.status_code == status.HTTP_200_OK
assert response.json()["name"] == "My Deployment"
assert response.json()["id"] == deployment_id
assert not response.json()["paused"]
async def test_create_deployment_populates_and_returned_created(
self,
client,
flow,
):
current_time = now_fn("UTC")
data = DeploymentCreate(
name="My Deployment",
flow_id=flow.id,
).model_dump(mode="json")
response = await client.post("/deployments/", json=data)
assert response.status_code == 201
assert response.json()["name"] == "My Deployment"
assert parse_datetime(response.json()["created"]) >= current_time
assert parse_datetime(response.json()["updated"]) >= current_time
async def test_creating_deployment_with_inactive_schedule_creates_no_runs(
self, session, client, flow
):
n_runs = await models.flow_runs.count_flow_runs(session)
assert n_runs == 0
await client.post(
"/deployments/",
json=DeploymentCreate(
name="My Deployment",
flow_id=flow.id,
schedules=[
schemas.actions.DeploymentScheduleCreate(
schedule=schemas.schedules.IntervalSchedule(
interval=datetime.timedelta(days=1),
anchor_date=datetime.datetime(
2020, 1, 1, tzinfo=datetime.timezone.utc
),
),
active=False,
)
],
).model_dump(mode="json"),
)
n_runs = await models.flow_runs.count_flow_runs(
session, flow_filter=schemas.filters.FlowFilter(id=dict(any_=[flow.id]))
)
assert n_runs == 0
async def test_creating_deployment_with_no_schedule_creates_no_runs(
self, session, client, flow
):
n_runs = await models.flow_runs.count_flow_runs(session)
assert n_runs == 0
await client.post(
"/deployments/",
json=DeploymentCreate(
name="My Deployment",
flow_id=flow.id,
paused=False,
).model_dump(mode="json"),
)
n_runs = await models.flow_runs.count_flow_runs(
session, flow_filter=schemas.filters.FlowFilter(id=dict(any_=[flow.id]))
)
assert n_runs == 0
async def test_creating_deployment_with_global_concurrency_limit_id(
self, session, client, flow
):
# Create a global concurrency limit
concurrency_limit = await models.concurrency_limits_v2.create_concurrency_limit(
session=session,
concurrency_limit=schemas.core.ConcurrencyLimitV2(
name="test-limit",
limit=5,
),
)
await session.commit()
# Create deployment with global concurrency limit
response = await client.post(
"/deployments/",
json=DeploymentCreate(
name="My Deployment1",
flow_id=flow.id,
global_concurrency_limit_id=concurrency_limit.id, # Changed from global_concurrency_limit_id
).model_dump(mode="json", exclude_unset=True),
)
assert response.status_code == 201
deployment_data = response.json()
assert deployment_data["global_concurrency_limit"]["id"] == str(
concurrency_limit.id
)
async def test_creating_deployment_with_both_concurrency_limits_fails(
self, session, client, flow
):
# Create a global concurrency limit
concurrency_limit = await models.concurrency_limits_v2.create_concurrency_limit(
session=session,
concurrency_limit=schemas.core.ConcurrencyLimitV2(
name="test-limit",
limit=5,
),
)
await session.commit()
# Attempt to create deployment with both limits
response = await client.post(
"/deployments/",
json={
"name": "My Deployment",
"flow_id": str(flow.id),
"global_concurrency_limit_id": str(
concurrency_limit.id
), # Changed from global_concurrency_limit_id
"concurrency_limit": 2,
},
)
assert response.status_code == 422
assert (
"Value error, A deployment cannot have both a concurrency limit and a global concurrency limit."
in response.json()["exception_detail"][0]["msg"]
)
async def test_upserting_deployment_with_inactive_schedule_deletes_existing_auto_scheduled_runs(
self, client, deployment, session
):
# schedule runs
await models.deployments.schedule_runs(
session=session, deployment_id=deployment.id
)
n_runs = await models.flow_runs.count_flow_runs(session)
assert n_runs == PREFECT_API_SERVICES_SCHEDULER_MIN_RUNS.value()
# create a run manually to ensure it isn't deleted
await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(
flow_id=deployment.flow_id,
deployment_id=deployment.id,
state=schemas.states.Scheduled(
scheduled_time=now_fn("UTC") + datetime.timedelta(days=1)
),
),
)
await session.commit()
# upsert the deployment to be paused and have no schedules
await client.post(
"/deployments/",
json=schemas.actions.DeploymentCreate(
name=deployment.name,
flow_id=deployment.flow_id,
schedules=[
schemas.actions.DeploymentScheduleCreate(
schedule=deployment.schedules[0].schedule, active=False
)
],
paused=True,
).model_dump(mode="json"),
)
n_runs = await models.flow_runs.count_flow_runs(session)
assert n_runs == 1
async def test_upserting_deployment_with_new_schedule_deletes_existing_auto_scheduled_runs(
self,
client,
deployment,
session,
db,
):
# schedule runs
await models.deployments.schedule_runs(
session=session, deployment_id=deployment.id
)
n_runs = await models.flow_runs.count_flow_runs(session)
assert n_runs == PREFECT_API_SERVICES_SCHEDULER_MIN_RUNS.value()
# create a run manually to ensure it isn't deleted
await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(
flow_id=deployment.flow_id,
deployment_id=deployment.id,
state=schemas.states.Scheduled(
scheduled_time=now_fn("UTC") + datetime.timedelta(seconds=2)
),
),
)
await session.commit()
# upsert the deployment a new schedule active
await client.post(
"/deployments/",
json=schemas.actions.DeploymentCreate(
name=deployment.name,
flow_id=deployment.flow_id,
schedules=[
schemas.actions.DeploymentScheduleCreate(
active=True,
schedule=schemas.schedules.IntervalSchedule(
interval=datetime.timedelta(seconds=1),
anchor_date=datetime.datetime(
2020, 1, 1, tzinfo=datetime.timezone.utc
),
),
)
],
paused=False,
).model_dump(mode="json"),
)
# auto-scheduled runs should be deleted
n_runs = await models.flow_runs.count_flow_runs(session)
assert n_runs == 1
# check that the maximum run is from the secondly schedule
query = sa.select(sa.func.max(db.FlowRun.expected_start_time))
result = await session.execute(query)
assert result.scalar() < now_fn("UTC") + datetime.timedelta(seconds=100)
async def test_create_deployment_throws_useful_error_on_missing_blocks(
self,
client,
flow,
storage_document_id,
):
data = DeploymentCreate(
name="My Deployment",
flow_id=flow.id,
tags=["foo"],
parameters={"foo": "bar"},
storage_document_id=uuid4(),
).model_dump(mode="json")
response = await client.post("/deployments/", json=data)
assert response.status_code == status.HTTP_409_CONFLICT
assert (
"Error creating deployment. Could not find storage block with id"
in response.json()["detail"]
), "Error message identifies storage block could not be found."
async def test_create_deployment_with_pool_and_queue(
self,
client,
flow,
session,
work_pool,
work_queue_1,
):
data = DeploymentCreate(
name="My Deployment",
version="mint",
path="/",
entrypoint="/file.py:flow",
flow_id=flow.id,
tags=["foo"],
parameters={"foo": "bar"},
job_variables={"cpu": 24},
work_pool_name=work_pool.name,
work_queue_name=work_queue_1.name,
).model_dump(mode="json")
response = await client.post("/deployments/", json=data)
assert response.status_code == status.HTTP_201_CREATED
deployment_response = DeploymentResponse(**response.json())
assert deployment_response.name == "My Deployment"
assert deployment_response.version == "mint"
assert deployment_response.path == "/"
assert deployment_response.entrypoint == "/file.py:flow"
assert deployment_response.job_variables == {"cpu": 24}
assert deployment_response.work_pool_name == work_pool.name
assert deployment_response.work_queue_name == work_queue_1.name
assert deployment_response.work_queue_id == work_queue_1.id
deployment = await models.deployments.read_deployment(
session=session, deployment_id=deployment_response.id
)
assert deployment.id == deployment_response.id
assert deployment.name == "My Deployment"
assert deployment.tags == ["foo"]
assert deployment.flow_id == flow.id
assert deployment.parameters == {"foo": "bar"}
assert deployment.work_queue_id == work_queue_1.id
async def test_create_deployment_with_only_work_pool(
self,
client,
flow,
session,
work_pool,
):
default_queue = await models.workers.read_work_queue(
session=session, work_queue_id=work_pool.default_queue_id
)
data = DeploymentCreate(
name="My Deployment",
version="mint",
path="/",
entrypoint="/file.py:flow",
flow_id=flow.id,
tags=["foo"],
parameters={"foo": "bar"},
job_variables={"cpu": 24},
work_pool_name=work_pool.name,
).model_dump(mode="json")
response = await client.post("/deployments/", json=data)
assert response.status_code == status.HTTP_201_CREATED
deployment_response = DeploymentResponse(**response.json())
assert deployment_response.name == "My Deployment"
assert deployment_response.version == "mint"
assert deployment_response.path == "/"
assert deployment_response.entrypoint == "/file.py:flow"
assert deployment_response.job_variables == {"cpu": 24}
assert deployment_response.work_pool_name == work_pool.name
assert deployment_response.work_queue_name == default_queue.name
assert deployment_response.work_queue_id == work_pool.default_queue_id
deployment = await models.deployments.read_deployment(
session=session, deployment_id=deployment_response.id
)
assert deployment.id == deployment_response.id
assert deployment.name == "My Deployment"
assert deployment.tags == ["foo"]
assert deployment.flow_id == flow.id
assert deployment.parameters == {"foo": "bar"}
assert deployment.work_queue_id == work_pool.default_queue_id
async def test_create_deployment_creates_work_queue(
self,
client,
flow,
session,
work_pool,
):
data = DeploymentCreate(
name="My Deployment",
version="mint",
path="/",
entrypoint="/file.py:flow",
flow_id=flow.id,
tags=["foo"],
parameters={"foo": "bar"},
job_variables={"cpu": 24},
work_pool_name=work_pool.name,
work_queue_name="new-queue",
).model_dump(mode="json")
response = await client.post("/deployments/", json=data)
assert response.status_code == status.HTTP_201_CREATED
assert response.json()["work_pool_name"] == work_pool.name
assert response.json()["work_queue_name"] == "new-queue"
deployment_id = response.json()["id"]
work_queue = await models.workers.read_work_queue_by_name(
session=session, work_pool_name=work_pool.name, work_queue_name="new-queue"
)
assert work_queue is not None
# Regression test for #19415: work_queue_id should be in API response
assert response.json()["work_queue_id"] == str(work_queue.id)
deployment = await models.deployments.read_deployment(
session=session, deployment_id=deployment_id
)
assert deployment.work_queue_id == work_queue.id
@pytest.mark.parametrize(
"template, overrides",
[
( # test with no overrides
{
"job_configuration": {"thing_one": "{{ var1 }}"},
"variables": {
"properties": {
"var1": {
"type": "string",
}
},
"required": ["var1"],
},
},
{}, # no overrides
),
( # test with incomplete overrides
{
"job_configuration": {
"thing_one": "{{ var1 }}",
"thing_two": "{{ var2 }}",
},
"variables": {
"properties": {
"var1": {
"type": "string",
},
"var2": {
"type": "string",
},
},
"required": ["var1", "var2"],
},
},
{"var2": "hello"}, # wrong override
),
],
)
async def test_create_deployment_ignores_required_fields(
self,
client,
flow,
session,
template,
overrides,
):
"""
Test that creating a deployment does not require required fields to be overridden
as job variables. We don't know the full set of overrides until a flow run is
running because the flow run may have overridden required fields.
"""
work_pool = await models.workers.create_work_pool(
session=session,
work_pool=schemas.actions.WorkPoolCreate(
name="Test Work Pool", base_job_template=template
),
)
await session.commit()
await models.workers.read_work_queue(
session=session, work_queue_id=work_pool.default_queue_id
)
data = DeploymentCreate(
name="My Deployment",
version="mint",
path="/",
entrypoint="/file.py:flow",
flow_id=flow.id,
tags=["foo"],
parameters={"foo": "bar"},
job_variables=overrides,
work_pool_name=work_pool.name,
).model_dump(mode="json")
response = await client.post("/deployments/", json=data)
assert response.status_code == 201
@pytest.mark.parametrize(
"template, overrides",
[
( # test with no overrides, no required
{
"job_configuration": {"thing_one": "{{ var1 }}"},
"variables": {
"properties": {"var1": {"type": "string", "default": "hello"}},
"required": [],
},
},
{}, # no overrides
),
( # test with override
{
"job_configuration": {
"thing_one": "{{ var1 }}",
},
"variables": {
"properties": {
"var1": {
"type": "string",
},
},
"required": ["var1"],
},
},
{"var1": "hello"}, # required override
),
( # test with override and multiple variables
{
"job_configuration": {
"thing_one": "{{ var1 }}",
"thing_two": "{{ var2 }}",
},
"variables": {
"properties": {
"var1": {
"type": "string",
},
"var2": {"type": "string", "default": "world"},
},
"required": ["var1"],
},
},
{"var1": "hello"}, # required override
),
],
)
async def test_create_deployment_with_job_variables_succeeds(
self,
client,
flow,
session,
template,
overrides,
):
work_pool = await models.workers.create_work_pool(
session=session,
work_pool=schemas.actions.WorkPoolCreate(
name="Test Work Pool", base_job_template=template
),
)
await session.commit()
await models.workers.read_work_queue(
session=session, work_queue_id=work_pool.default_queue_id
)
data = DeploymentCreate(
name="My Deployment",
version="mint",
path="/",
entrypoint="/file.py:flow",
flow_id=flow.id,
tags=["foo"],
parameters={"foo": "bar"},
job_variables=overrides,
work_pool_name=work_pool.name,
).model_dump(mode="json")
response = await client.post("/deployments/", json=data)
assert response.status_code == 201
async def test_create_deployment_can_create_work_queue(
self,
client,
flow,
session,
work_pool,
):
data = DeploymentCreate(
name="My Deployment",
version="mint",
path="/",
entrypoint="/file.py:flow",
flow_id=flow.id,
tags=["foo"],
parameters={"foo": "bar"},
job_variables={"cpu": 24},
work_pool_name=work_pool.name,
work_queue_name="new-work-pool-queue",
).model_dump(mode="json")
response = await client.post("/deployments/", json=data)
assert response.status_code == status.HTTP_201_CREATED
assert response.json()["work_queue_name"] == "new-work-pool-queue"
deployment_id = response.json()["id"]
deployment = await models.deployments.read_deployment(
session=session, deployment_id=deployment_id
)
work_queue = await models.workers.read_work_queue_by_name(
session=session,
work_pool_name=work_pool.name,
work_queue_name="new-work-pool-queue",
)
assert deployment.work_queue_id == work_queue.id
async def test_create_deployment_returns_404_for_non_existent_work_pool(
self,
client,
flow,
session,
work_pool,
):
data = DeploymentCreate(
name="My Deployment",
version="mint",
path="/",
entrypoint="/file.py:flow",
flow_id=flow.id,
tags=["foo"],
parameters={"foo": "bar"},
job_variables={"cpu": 24},
work_pool_name="imaginary-work-pool",
work_queue_name="default",
).model_dump(mode="json")
response = await client.post("/deployments/", json=data)
assert response.status_code == status.HTTP_404_NOT_FOUND
assert response.json()["detail"] == 'Work pool "imaginary-work-pool" not found.'
async def test_create_deployment_rejects_invalid_parameter_schemas(
self,
client,
flow,
work_pool,
):
data = dict(
name="My Deployment",
flow_id=str(flow.id),
work_pool_name=work_pool.name,
enforce_parameter_schema=True,
parameter_openapi_schema={
"type": "object",
"properties": {"foo": {"type": "blork"}},
},
parameters={"foo": 1},
)
response = await client.post(
"/deployments/",
json=data,
)
assert response.status_code == 422
assert "'blork' is not valid under any of the given schemas" in response.text
async def test_create_deployment_does_not_reject_invalid_parameter_schemas_by_default(
self,
client,
flow,
work_pool,
):
data = dict(
name="My Deployment",
flow_id=str(flow.id),
work_pool_name=work_pool.name,
parameter_openapi_schema={
"type": "object",
"properties": {"foo": {"type": "blork"}},
},
parameters={"foo": 1},
)
response = await client.post(
"/deployments/",
json=data,
)
assert response.status_code == 201
async def test_create_deployment_enforces_parameter_schema(
self,
client,
flow,
work_pool,
):
data = dict(
name="My Deployment",
flow_id=str(flow.id),
work_pool_name=work_pool.name,
enforce_parameter_schema=True,
parameter_openapi_schema={
"type": "object",
"properties": {"foo": {"type": "string"}},
},
parameters={"foo": 1},
)
response = await client.post(
"/deployments/",
json=data,
)
assert response.status_code == 422
assert (
"Validation failed for field 'foo'. Failure reason: 1 is not of type"
" 'string'" in response.text
)
async def test_create_deployment_enforces_schema_by_default(
self,
client,
flow,
work_pool,
):
data = DeploymentCreate(
name="My Deployment",
flow_id=flow.id,
work_pool_name=work_pool.name,
parameter_openapi_schema={
"type": "object",
"properties": {"foo": {"type": "string"}},
},
parameters={"foo": 1},
).model_dump(mode="json")
response = await client.post(
"/deployments/",
json=data,
)
assert response.status_code == 422
async def test_create_deployment_parameter_enforcement_allows_partial_parameters(
self,
client,
flow,
work_pool,
):
data = DeploymentCreate(
name="My Deployment",
flow_id=flow.id,
work_pool_name=work_pool.name,
enforce_parameter_schema=True,
parameter_openapi_schema={
"type": "object",
"required": ["person"],
"properties": {
"name": {
"type": "string",
"default": "world",
"position": 1,
},
"person": {
"allOf": [{"$ref": "#/definitions/Person"}],
"position": 0,
},
},
"definitions": {
"Person": {
"type": "object",
"required": ["name"],
"properties": {
"name": {"type": "string"},
"greeting": {
"type": "string",
"default": "Hello",
},
},
}
},
},
parameters={"person": {"greeting": "sup"}},
).model_dump(mode="json")
response = await client.post(
"/deployments/",
json=data,
)
assert response.status_code == 201
async def test_can_pause_deployment_by_upserting_paused(
self,
client,
deployment,
):
assert deployment.paused is False
data = DeploymentCreate( # type: ignore
name=deployment.name,
flow_id=deployment.flow_id,
paused=True,
).model_dump(mode="json")
response = await client.post("/deployments/", json=data)
assert response.status_code == 200
assert response.json()["paused"] is True
async def test_create_deployment_with_concurrency_limit(
self,
client: AsyncClient,
flow: Flow,
):
response = await client.post(
"/deployments/",
json=dict(
name="My Deployment",
flow_id=str(flow.id),
concurrency_limit=3,
),
)
assert response.status_code == status.HTTP_201_CREATED
json_response = response.json()
assert json_response["concurrency_limit"] is None, (
"Deprecated int-only field should be None for backwards-compatibility"
)
global_concurrency_limit = json_response.get("global_concurrency_limit")
assert global_concurrency_limit is not None
assert global_concurrency_limit.get("limit") == 3
assert global_concurrency_limit.get("active") is True
assert (
global_concurrency_limit.get("name") == f"deployment:{json_response['id']}"
)
async def test_create_deployment_retains_concurrency_limit_on_upsert_if_not_specified(
self,
client: AsyncClient,
flow: Flow,
):
"""Ensure that old prefect clients that don't know about concurrency limits can still use them server-side.
This means that if a deployment has a concurrency limit (possibly created through the Cloud UI), but the client
is an old version that doesn't know about concurrency limits, then when using `prefect deploy`, the old client
should not remove the concurrency limit from the existing deployment.
"""
# Create deployment with a concurrency limit
data = {
"name": "Deployment with concurrency limit",
"flow_id": str(flow.id),
"concurrency_limit": 3,
}
response = await client.post("/deployments/", json=data)
assert response.status_code == 201
global_concurrency_limit = response.json().get("global_concurrency_limit")
assert global_concurrency_limit is not None
assert global_concurrency_limit.get("limit") == 3
# Upsert the deployment without specifying a concurrency limit
updated_data = data.copy()
updated_data.pop("concurrency_limit", None)
updated_data["version"] = "1.0.1"
response = await client.post("/deployments/", json=updated_data)
# Ensure that the concurrency limit is still present
assert response.status_code == 200
updated_global_concurrency_limit = response.json().get(
"global_concurrency_limit"
)
assert updated_global_concurrency_limit is not None
assert updated_global_concurrency_limit.get("limit") == 3
async def test_upsert_deployment_can_remove_schedules(
self,
client: AsyncClient,
flow: Flow,
):
# Create deployment with a schedule
data = DeploymentCreate( # type: ignore
name="Deployment with schedules",
flow_id=flow.id,
schedules=[
schemas.actions.DeploymentScheduleCreate( # type: ignore [call-arg]
active=True,
schedule=schemas.schedules.IntervalSchedule(
interval=datetime.timedelta(hours=1)
),
),
],
).model_dump(mode="json")
response = await client.post("/deployments/", json=data)
assert response.status_code == 201
schedules_in_response = response.json().get("schedules")
assert schedules_in_response
# Upsert the deployment without schedules
updated_data = data.copy()
updated_data["schedules"] = []
updated_data["version"] = "1.0.1"
response = await client.post("/deployments/", json=updated_data)
# Ensure that the schedules are removed
assert response.status_code == 200
assert response.json().get("schedules") == []
| TestCreateDeployment |
python | dask__dask | dask/dataframe/dask_expr/_expr.py | {
"start": 93174,
"end": 93250
} | class ____(Unaryop):
operation = operator.neg
_operator_repr = "-"
| Neg |
python | huggingface__transformers | src/transformers/models/clipseg/modeling_clipseg.py | {
"start": 12115,
"end": 15419
} | class ____(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: Union[CLIPSegVisionConfig, CLIPSegTextConfig]):
super().__init__()
self.config = config
self.embed_dim = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = self.embed_dim // self.num_heads
if self.head_dim * self.num_heads != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
f" {self.num_heads})."
)
self.scale = self.head_dim**-0.5
self.dropout = config.attention_dropout
self.is_causal = False
self.k_proj = nn.Linear(self.embed_dim, self.embed_dim)
self.v_proj = nn.Linear(self.embed_dim, self.embed_dim)
self.q_proj = nn.Linear(self.embed_dim, self.embed_dim)
self.out_proj = nn.Linear(self.embed_dim, self.embed_dim)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
causal_attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = False,
) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
"""Input shape: Batch x Time x Channel"""
batch_size, seq_length, embed_dim = hidden_states.shape
queries = self.q_proj(hidden_states)
keys = self.k_proj(hidden_states)
values = self.v_proj(hidden_states)
queries = queries.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2)
keys = keys.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2)
values = values.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2)
# CLIP text model uses both `causal_attention_mask` and `attention_mask`
# in case FA2 kernel is called, `is_causal` should be inferred from `causal_attention_mask`
if self.config._attn_implementation != "flash_attention_2":
if attention_mask is not None and causal_attention_mask is not None:
attention_mask = attention_mask + causal_attention_mask
elif causal_attention_mask is not None:
attention_mask = causal_attention_mask
else:
self.is_causal = causal_attention_mask is not None
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
queries,
keys,
values,
attention_mask,
is_causal=self.is_causal,
scaling=self.scale,
dropout=0.0 if not self.training else self.dropout,
)
attn_output = attn_output.reshape(batch_size, seq_length, embed_dim).contiguous()
attn_output = self.out_proj(attn_output)
if not output_attentions:
attn_weights = None
return attn_output, attn_weights
# Copied from transformers.models.clip.modeling_clip.CLIPMLP with CLIP->CLIPSeg
| CLIPSegAttention |
python | getsentry__sentry | src/sentry/mail/analytics.py | {
"start": 173,
"end": 277
} | class ____(BaseNotificationSent):
pass
analytics.register(EmailNotificationSent)
| EmailNotificationSent |
python | allegroai__clearml | clearml/backend_api/services/v2_23/models.py | {
"start": 109814,
"end": 110944
} | class ____(Request):
"""
Convert company models to public
:param ids: Ids of the models to convert
:type ids: Sequence[str]
"""
_service = "models"
_action = "make_public"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"ids": {
"description": "Ids of the models to convert",
"items": {"type": "string"},
"type": ["array", "null"],
}
},
"type": "object",
}
def __init__(self, ids: Optional[List[str]] = None, **kwargs: Any) -> None:
super(MakePublicRequest, self).__init__(**kwargs)
self.ids = ids
@schema_property("ids")
def ids(self) -> Optional[List[str]]:
return self._property_ids
@ids.setter
def ids(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_ids = None
return
self.assert_isinstance(value, "ids", (list, tuple))
self.assert_isinstance(value, "ids", six.string_types, is_array=True)
self._property_ids = value
| MakePublicRequest |
python | dagster-io__dagster | python_modules/libraries/dagster-airlift/dagster_airlift/in_airflow/base_asset_operator.py | {
"start": 1185,
"end": 13751
} | class ____(BaseOperator, ABC):
"""Interface for an operator which materializes dagster assets.
This operator needs to implement the following methods:
- get_dagster_session: Returns a requests session that can be used to make requests to the Dagster API.
This is where any additional authentication can be added.
- get_dagster_url: Returns the URL for the Dagster instance.
- filter_asset_nodes: Filters asset nodes (which are returned from Dagster's graphql API) to only include those
that should be triggered by the current task.
Optionally, these methods can be overridden as well:
- get_partition_key: Determines the partition key to use to trigger the dagster run. This method will only be
called if the underlying asset is partitioned.
"""
def __init__(
self,
dagster_run_status_poll_interval: int = DEFAULT_DAGSTER_RUN_STATUS_POLL_INTERVAL,
**kwargs,
):
super().__init__(**kwargs)
self.dagster_run_status_poll_interval = dagster_run_status_poll_interval
@abstractmethod
def get_dagster_session(self, context: Context) -> requests.Session:
"""Returns a requests session that can be used to make requests to the Dagster API."""
def _get_validated_session(self, context: Context) -> requests.Session:
session = self.get_dagster_session(context)
dagster_url = self.get_dagster_url(context)
response = session.post(
# Timeout in seconds
f"{dagster_url}/graphql",
json={"query": VERIFICATION_QUERY},
timeout=3,
)
if response.status_code != 200:
raise Exception(
f"Failed to connect to Dagster at {dagster_url}. Response: {response.text}"
)
return session
@abstractmethod
def get_dagster_url(self, context: Context) -> str:
"""Returns the URL for the Dagster instance."""
@abstractmethod
def filter_asset_nodes(
self, context: Context, asset_nodes: Sequence[Mapping[str, Any]]
) -> Iterable[Mapping[str, Any]]:
"""Filters the asset nodes to only include those that should be triggered by the current task."""
def get_partition_key(
self, context: Context, partitioning_info: PartitioningInformation
) -> str:
"""Overrideable method to determine the partition key to use to trigger the dagster run.
This method will only be called if the underlying asset is partitioned.
"""
if not partitioning_info:
return None
return translate_logical_date_to_partition_key(
self.get_airflow_logical_date(context), partitioning_info
)
def get_valid_graphql_response(self, response: Response, key: str) -> Any:
response_json = response.json()
if not response_json.get("data"):
raise Exception(f"Error in GraphQL request. No data key: {response_json}")
if key not in response_json["data"]:
raise Exception(f"Error in GraphQL request. No {key} key: {response_json}")
return response_json["data"][key]
def get_all_asset_nodes(
self, session: requests.Session, dagster_url: str, context: Context
) -> Sequence[Mapping[str, Any]]:
# create graphql client
response = session.post(
# Timeout in seconds
f"{dagster_url}/graphql",
json={"query": ASSET_NODES_QUERY},
timeout=3,
)
return self.get_valid_graphql_response(response, "assetNodes")
def launch_dagster_run(
self,
context: Context,
session: requests.Session,
dagster_url: str,
execution_params: Mapping[str, Any],
) -> str:
response = session.post(
f"{dagster_url}/graphql",
json={
"query": TRIGGER_ASSETS_MUTATION,
"variables": {"executionParams": execution_params},
},
# Timeout in seconds
timeout=10,
)
launch_data = self.get_valid_graphql_response(response, "launchPipelineExecution")
return launch_data["run"]["id"]
def get_dagster_run_obj(
self, session: requests.Session, dagster_url: str, run_id: str
) -> Mapping[str, Any]:
response = session.post(
f"{dagster_url}/graphql",
json={"query": RUNS_QUERY, "variables": {"runId": run_id}},
# Timeout in seconds
timeout=3,
)
return self.get_valid_graphql_response(response, "runOrError")
def get_attribute_from_airflow_context(self, context: Context, attribute: str) -> Any:
if attribute not in context or context[attribute] is None:
raise Exception(f"Attribute {attribute} not found in context.")
return context[attribute]
def get_airflow_dag_run_id(self, context: Context) -> str:
return self.get_attribute_from_airflow_context(context, "dag_run").run_id
def get_airflow_dag_id(self, context: Context) -> str:
return self.get_attribute_from_airflow_context(context, "dag_run").dag_id
def get_airflow_task_id(self, context: Context) -> str:
return self.get_attribute_from_airflow_context(context, "task").task_id
def get_airflow_logical_date(self, context: Context) -> datetime:
return self.get_attribute_from_airflow_context(context, "logical_date")
def default_dagster_run_tags(self, context: Context) -> dict[str, str]:
return {
DAG_ID_TAG_KEY: self.get_airflow_dag_id(context),
DAG_RUN_ID_TAG_KEY: self.get_airflow_dag_run_id(context),
TASK_ID_TAG_KEY: self.get_airflow_task_id(context),
}
def launch_runs_for_task(self, context: Context, dag_id: str, task_id: str) -> None:
"""Launches runs for the given task in Dagster."""
session = self._get_validated_session(context)
dagster_url = self.get_dagster_url(context)
asset_nodes_data = self.get_all_asset_nodes(session, dagster_url, context)
logger.info(f"Got response {asset_nodes_data}")
filtered_asset_nodes = [
asset_node
for asset_node in self.filter_asset_nodes(context, asset_nodes_data)
if _is_asset_node_executable(asset_node)
]
if not filtered_asset_nodes:
raise Exception(f"No asset nodes found to trigger for task {dag_id}.{task_id}")
if (
not len(
{_get_implicit_job_identifier(asset_node) for asset_node in filtered_asset_nodes}
)
== 1
):
raise Exception(
"Could not find an implicit asset job that can trigger all assets in this task. "
"This may mean that you need to upgrade your Dagster version (1.8 or later), which allows all assets to be materialized in a single run, "
"or that the assets are not in the same code location. "
"`dagster-airlift` expects that all assets mapped to a given task exist within the same code location, so that they can be executed by the same run."
)
job_identifier = _get_implicit_job_identifier(next(iter(filtered_asset_nodes)))
asset_key_paths = [asset_node["assetKey"]["path"] for asset_node in filtered_asset_nodes]
logger.info(f"Triggering run for {job_identifier} with assets {asset_key_paths}")
tags = self.default_dagster_run_tags(context)
partitioning_info = PartitioningInformation.from_asset_node_graphql(filtered_asset_nodes)
if partitioning_info:
tags[PARTITION_NAME_TAG] = self.get_partition_key(context, partitioning_info)
logger.info(f"Using tags {tags}")
run_id = self.launch_dagster_run(
context,
session,
dagster_url,
build_dagster_run_execution_params(
tags,
job_identifier,
asset_key_paths=asset_key_paths,
),
)
logger.info("Waiting for dagster run completion...")
self.wait_for_run_and_retries_to_complete(
session=session, dagster_url=dagster_url, run_id=run_id
)
logger.info("All runs completed successfully.")
return None
def wait_for_run_to_complete(
self, session: requests.Session, dagster_url: str, run_id: str
) -> DagsterRunResult:
while True:
response = self.get_dagster_run_obj(session, dagster_url, run_id)
status = response["status"]
if status in ["SUCCESS", "FAILURE", "CANCELED"]:
break
time.sleep(self.dagster_run_status_poll_interval)
tags = {tag["key"]: tag["value"] for tag in response["tags"]}
return DagsterRunResult(status=response["status"], tags=tags)
def wait_for_run_and_retries_to_complete(
self, session: requests.Session, dagster_url: str, run_id: str
) -> None:
run_id_to_check = run_id
while True:
result = self.wait_for_run_to_complete(
session=session, dagster_url=dagster_url, run_id=run_id_to_check
)
if result.succeeded:
break
logger.info(f"Run {run_id_to_check} completed with status '{result.status}'.")
if result.run_will_automatically_retry and result.retried_run_id:
logger.info(
f"Run {run_id_to_check} retried in run {result.retried_run_id}. Waiting for completion..."
)
run_id_to_check = result.retried_run_id
continue
elif result.run_will_automatically_retry:
logger.info(
f"Run {run_id_to_check} failed, but is configured to automatically retry. Waiting for retried run to be created..."
)
continue
else:
raise Exception(f"Run {run_id_to_check} failed, and is not expected to retry.")
return None
def execute(self, context: Context) -> Any:
# https://github.com/apache/airflow/discussions/24463
os.environ["NO_PROXY"] = "*"
dag_id = os.environ["AIRFLOW_CTX_DAG_ID"]
task_id = os.environ["AIRFLOW_CTX_TASK_ID"]
return self.launch_runs_for_task(context, dag_id, task_id)
def _get_implicit_job_identifier(asset_node: Mapping[str, Any]) -> DagsterJobIdentifier:
"""Extracts the implicit job identifier from an asset node.
In dagster 1.8 and later, there is a single implicit asset job constructed across all assets.
Using this job to execute assets allows us to minimize the number of runs we need to launch,
and ensures that assets are executed in topological order.
"""
# In dagster 1.8 and later, there is a single implicit asset job constructed across all assets. Using this job to execute the asset is preferred, because
# it minimizes the number of runs we need to launch, and ensures that assets are executed
implicit_asset_job = next(
iter(
[job for job in asset_node["jobs"] if job["name"].startswith(IMPLICIT_ASSET_JOB_PREFIX)]
),
None,
)
job_to_use = implicit_asset_job or asset_node["jobs"][0]
location_name = job_to_use["repository"]["location"]["name"]
repository_name = job_to_use["repository"]["name"]
job_name = job_to_use["name"]
return (location_name, repository_name, job_name)
def build_dagster_run_execution_params(
tags: Mapping[str, Any],
job_identifier: DagsterJobIdentifier,
asset_key_paths: Sequence[Sequence[str]],
) -> dict[str, Any]:
location_name, repository_name, job_name = job_identifier
return {
"mode": "default",
"executionMetadata": {
"tags": [{"key": key, "value": value} for key, value in tags.items()]
},
"runConfigData": "{}",
"selector": {
"repositoryLocationName": location_name,
"repositoryName": repository_name,
"pipelineName": job_name,
"assetSelection": [{"path": asset_key} for asset_key in asset_key_paths],
"assetCheckSelection": [],
},
}
def _is_asset_node_executable(asset_node: Mapping[str, Any]) -> bool:
return bool(asset_node["jobs"])
def _build_runs_filter_param(tags: Mapping[str, Any]) -> Mapping[str, Any]:
return {"tags": [{"key": key, "value": value} for key, value in tags.items()]}
| BaseDagsterAssetsOperator |
python | sphinx-doc__sphinx | sphinx/transforms/__init__.py | {
"start": 11912,
"end": 12242
} | class ____(ContentsFilter):
"""Used with BuildEnvironment.add_toc_from() to discard cross-file links
within table-of-contents link nodes.
"""
visit_pending_xref = ContentsFilter.ignore_node_but_process_children
def visit_image(self, node: nodes.image) -> None:
raise nodes.SkipNode
| SphinxContentsFilter |
python | pyca__cryptography | tests/hazmat/primitives/test_dsa.py | {
"start": 26357,
"end": 28724
} | class ____:
def test_parameter_numbers_eq(self):
param = dsa.DSAParameterNumbers(1, 2, 3)
assert param == dsa.DSAParameterNumbers(1, 2, 3)
def test_parameter_numbers_ne(self):
param = dsa.DSAParameterNumbers(1, 2, 3)
assert param != dsa.DSAParameterNumbers(1, 2, 4)
assert param != dsa.DSAParameterNumbers(1, 1, 3)
assert param != dsa.DSAParameterNumbers(2, 2, 3)
assert param != object()
def test_public_numbers_eq(self):
pub = dsa.DSAPublicNumbers(1, dsa.DSAParameterNumbers(1, 2, 3))
assert pub == dsa.DSAPublicNumbers(1, dsa.DSAParameterNumbers(1, 2, 3))
def test_public_numbers_ne(self):
pub = dsa.DSAPublicNumbers(1, dsa.DSAParameterNumbers(1, 2, 3))
assert pub != dsa.DSAPublicNumbers(2, dsa.DSAParameterNumbers(1, 2, 3))
assert pub != dsa.DSAPublicNumbers(1, dsa.DSAParameterNumbers(2, 2, 3))
assert pub != dsa.DSAPublicNumbers(1, dsa.DSAParameterNumbers(1, 3, 3))
assert pub != dsa.DSAPublicNumbers(1, dsa.DSAParameterNumbers(1, 2, 4))
assert pub != object()
def test_private_numbers_eq(self):
pub = dsa.DSAPublicNumbers(1, dsa.DSAParameterNumbers(1, 2, 3))
priv = dsa.DSAPrivateNumbers(1, pub)
assert priv == dsa.DSAPrivateNumbers(
1, dsa.DSAPublicNumbers(1, dsa.DSAParameterNumbers(1, 2, 3))
)
def test_private_numbers_ne(self):
pub = dsa.DSAPublicNumbers(1, dsa.DSAParameterNumbers(1, 2, 3))
priv = dsa.DSAPrivateNumbers(1, pub)
assert priv != dsa.DSAPrivateNumbers(
2, dsa.DSAPublicNumbers(1, dsa.DSAParameterNumbers(1, 2, 3))
)
assert priv != dsa.DSAPrivateNumbers(
1, dsa.DSAPublicNumbers(2, dsa.DSAParameterNumbers(1, 2, 3))
)
assert priv != dsa.DSAPrivateNumbers(
1, dsa.DSAPublicNumbers(1, dsa.DSAParameterNumbers(2, 2, 3))
)
assert priv != dsa.DSAPrivateNumbers(
1, dsa.DSAPublicNumbers(1, dsa.DSAParameterNumbers(1, 3, 3))
)
assert priv != dsa.DSAPrivateNumbers(
1, dsa.DSAPublicNumbers(1, dsa.DSAParameterNumbers(1, 2, 4))
)
assert priv != object()
@pytest.mark.supported(
only_if=lambda backend: backend.dsa_supported(),
skip_message="Does not support DSA.",
)
| TestDSANumberEquality |
python | ray-project__ray | python/ray/tests/test_runtime_env_strong_type.py | {
"start": 190,
"end": 261
} | class ____:
nfield1: List[str]
nfield2: bool
@dataclass
| ValueType |
python | anthropics__anthropic-sdk-python | src/anthropic/resources/messages/messages.py | {
"start": 2432,
"end": 54618
} | class ____(SyncAPIResource):
@cached_property
def batches(self) -> Batches:
return Batches(self._client)
@cached_property
def with_raw_response(self) -> MessagesWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/anthropics/anthropic-sdk-python#accessing-raw-response-data-eg-headers
"""
return MessagesWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> MessagesWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/anthropics/anthropic-sdk-python#with_streaming_response
"""
return MessagesWithStreamingResponse(self)
@overload
def create(
self,
*,
max_tokens: int,
messages: Iterable[MessageParam],
model: ModelParam,
metadata: MetadataParam | Omit = omit,
service_tier: Literal["auto", "standard_only"] | Omit = omit,
stop_sequences: SequenceNotStr[str] | Omit = omit,
stream: Literal[False] | Omit = omit,
system: Union[str, Iterable[TextBlockParam]] | Omit = omit,
temperature: float | Omit = omit,
thinking: ThinkingConfigParam | Omit = omit,
tool_choice: ToolChoiceParam | Omit = omit,
tools: Iterable[ToolUnionParam] | Omit = omit,
top_k: int | Omit = omit,
top_p: float | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Message:
"""
Send a structured list of input messages with text and/or image content, and the
model will generate the next message in the conversation.
The Messages API can be used for either single queries or stateless multi-turn
conversations.
Learn more about the Messages API in our
[user guide](https://docs.claude.com/en/docs/initial-setup)
Args:
max_tokens: The maximum number of tokens to generate before stopping.
Note that our models may stop _before_ reaching this maximum. This parameter
only specifies the absolute maximum number of tokens to generate.
Different models have different maximum values for this parameter. See
[models](https://docs.claude.com/en/docs/models-overview) for details.
messages: Input messages.
Our models are trained to operate on alternating `user` and `assistant`
conversational turns. When creating a new `Message`, you specify the prior
conversational turns with the `messages` parameter, and the model then generates
the next `Message` in the conversation. Consecutive `user` or `assistant` turns
in your request will be combined into a single turn.
Each input message must be an object with a `role` and `content`. You can
specify a single `user`-role message, or you can include multiple `user` and
`assistant` messages.
If the final message uses the `assistant` role, the response content will
continue immediately from the content in that message. This can be used to
constrain part of the model's response.
Example with a single `user` message:
```json
[{ "role": "user", "content": "Hello, Claude" }]
```
Example with multiple conversational turns:
```json
[
{ "role": "user", "content": "Hello there." },
{ "role": "assistant", "content": "Hi, I'm Claude. How can I help you?" },
{ "role": "user", "content": "Can you explain LLMs in plain English?" }
]
```
Example with a partially-filled response from Claude:
```json
[
{
"role": "user",
"content": "What's the Greek name for Sun? (A) Sol (B) Helios (C) Sun"
},
{ "role": "assistant", "content": "The best answer is (" }
]
```
Each input message `content` may be either a single `string` or an array of
content blocks, where each block has a specific `type`. Using a `string` for
`content` is shorthand for an array of one content block of type `"text"`. The
following input messages are equivalent:
```json
{ "role": "user", "content": "Hello, Claude" }
```
```json
{ "role": "user", "content": [{ "type": "text", "text": "Hello, Claude" }] }
```
See [input examples](https://docs.claude.com/en/api/messages-examples).
Note that if you want to include a
[system prompt](https://docs.claude.com/en/docs/system-prompts), you can use the
top-level `system` parameter — there is no `"system"` role for input messages in
the Messages API.
There is a limit of 100,000 messages in a single request.
model: The model that will complete your prompt.\n\nSee
[models](https://docs.anthropic.com/en/docs/models-overview) for additional
details and options.
metadata: An object describing metadata about the request.
service_tier: Determines whether to use priority capacity (if available) or standard capacity
for this request.
Anthropic offers different levels of service for your API requests. See
[service-tiers](https://docs.claude.com/en/api/service-tiers) for details.
stop_sequences: Custom text sequences that will cause the model to stop generating.
Our models will normally stop when they have naturally completed their turn,
which will result in a response `stop_reason` of `"end_turn"`.
If you want the model to stop generating when it encounters custom strings of
text, you can use the `stop_sequences` parameter. If the model encounters one of
the custom sequences, the response `stop_reason` value will be `"stop_sequence"`
and the response `stop_sequence` value will contain the matched stop sequence.
stream: Whether to incrementally stream the response using server-sent events.
See [streaming](https://docs.claude.com/en/api/messages-streaming) for details.
system: System prompt.
A system prompt is a way of providing context and instructions to Claude, such
as specifying a particular goal or role. See our
[guide to system prompts](https://docs.claude.com/en/docs/system-prompts).
temperature: Amount of randomness injected into the response.
Defaults to `1.0`. Ranges from `0.0` to `1.0`. Use `temperature` closer to `0.0`
for analytical / multiple choice, and closer to `1.0` for creative and
generative tasks.
Note that even with `temperature` of `0.0`, the results will not be fully
deterministic.
thinking: Configuration for enabling Claude's extended thinking.
When enabled, responses include `thinking` content blocks showing Claude's
thinking process before the final answer. Requires a minimum budget of 1,024
tokens and counts towards your `max_tokens` limit.
See
[extended thinking](https://docs.claude.com/en/docs/build-with-claude/extended-thinking)
for details.
tool_choice: How the model should use the provided tools. The model can use a specific tool,
any available tool, decide by itself, or not use tools at all.
tools: Definitions of tools that the model may use.
If you include `tools` in your API request, the model may return `tool_use`
content blocks that represent the model's use of those tools. You can then run
those tools using the tool input generated by the model and then optionally
return results back to the model using `tool_result` content blocks.
There are two types of tools: **client tools** and **server tools**. The
behavior described below applies to client tools. For
[server tools](https://docs.claude.com/en/docs/agents-and-tools/tool-use/overview#server-tools),
see their individual documentation as each has its own behavior (e.g., the
[web search tool](https://docs.claude.com/en/docs/agents-and-tools/tool-use/web-search-tool)).
Each tool definition includes:
- `name`: Name of the tool.
- `description`: Optional, but strongly-recommended description of the tool.
- `input_schema`: [JSON schema](https://json-schema.org/draft/2020-12) for the
tool `input` shape that the model will produce in `tool_use` output content
blocks.
For example, if you defined `tools` as:
```json
[
{
"name": "get_stock_price",
"description": "Get the current stock price for a given ticker symbol.",
"input_schema": {
"type": "object",
"properties": {
"ticker": {
"type": "string",
"description": "The stock ticker symbol, e.g. AAPL for Apple Inc."
}
},
"required": ["ticker"]
}
}
]
```
And then asked the model "What's the S&P 500 at today?", the model might produce
`tool_use` content blocks in the response like this:
```json
[
{
"type": "tool_use",
"id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV",
"name": "get_stock_price",
"input": { "ticker": "^GSPC" }
}
]
```
You might then run your `get_stock_price` tool with `{"ticker": "^GSPC"}` as an
input, and return the following back to the model in a subsequent `user`
message:
```json
[
{
"type": "tool_result",
"tool_use_id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV",
"content": "259.75 USD"
}
]
```
Tools can be used for workflows that include running client-side tools and
functions, or more generally whenever you want the model to produce a particular
JSON structure of output.
See our [guide](https://docs.claude.com/en/docs/tool-use) for more details.
top_k: Only sample from the top K options for each subsequent token.
Used to remove "long tail" low probability responses.
[Learn more technical details here](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277).
Recommended for advanced use cases only. You usually only need to use
`temperature`.
top_p: Use nucleus sampling.
In nucleus sampling, we compute the cumulative distribution over all the options
for each subsequent token in decreasing probability order and cut it off once it
reaches a particular probability specified by `top_p`. You should either alter
`temperature` or `top_p`, but not both.
Recommended for advanced use cases only. You usually only need to use
`temperature`.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
...
@overload
def create(
self,
*,
max_tokens: int,
messages: Iterable[MessageParam],
model: ModelParam,
stream: Literal[True],
metadata: MetadataParam | Omit = omit,
service_tier: Literal["auto", "standard_only"] | Omit = omit,
stop_sequences: SequenceNotStr[str] | Omit = omit,
system: Union[str, Iterable[TextBlockParam]] | Omit = omit,
temperature: float | Omit = omit,
thinking: ThinkingConfigParam | Omit = omit,
tool_choice: ToolChoiceParam | Omit = omit,
tools: Iterable[ToolUnionParam] | Omit = omit,
top_k: int | Omit = omit,
top_p: float | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Stream[RawMessageStreamEvent]:
"""
Send a structured list of input messages with text and/or image content, and the
model will generate the next message in the conversation.
The Messages API can be used for either single queries or stateless multi-turn
conversations.
Learn more about the Messages API in our
[user guide](https://docs.claude.com/en/docs/initial-setup)
Args:
max_tokens: The maximum number of tokens to generate before stopping.
Note that our models may stop _before_ reaching this maximum. This parameter
only specifies the absolute maximum number of tokens to generate.
Different models have different maximum values for this parameter. See
[models](https://docs.claude.com/en/docs/models-overview) for details.
messages: Input messages.
Our models are trained to operate on alternating `user` and `assistant`
conversational turns. When creating a new `Message`, you specify the prior
conversational turns with the `messages` parameter, and the model then generates
the next `Message` in the conversation. Consecutive `user` or `assistant` turns
in your request will be combined into a single turn.
Each input message must be an object with a `role` and `content`. You can
specify a single `user`-role message, or you can include multiple `user` and
`assistant` messages.
If the final message uses the `assistant` role, the response content will
continue immediately from the content in that message. This can be used to
constrain part of the model's response.
Example with a single `user` message:
```json
[{ "role": "user", "content": "Hello, Claude" }]
```
Example with multiple conversational turns:
```json
[
{ "role": "user", "content": "Hello there." },
{ "role": "assistant", "content": "Hi, I'm Claude. How can I help you?" },
{ "role": "user", "content": "Can you explain LLMs in plain English?" }
]
```
Example with a partially-filled response from Claude:
```json
[
{
"role": "user",
"content": "What's the Greek name for Sun? (A) Sol (B) Helios (C) Sun"
},
{ "role": "assistant", "content": "The best answer is (" }
]
```
Each input message `content` may be either a single `string` or an array of
content blocks, where each block has a specific `type`. Using a `string` for
`content` is shorthand for an array of one content block of type `"text"`. The
following input messages are equivalent:
```json
{ "role": "user", "content": "Hello, Claude" }
```
```json
{ "role": "user", "content": [{ "type": "text", "text": "Hello, Claude" }] }
```
See [input examples](https://docs.claude.com/en/api/messages-examples).
Note that if you want to include a
[system prompt](https://docs.claude.com/en/docs/system-prompts), you can use the
top-level `system` parameter — there is no `"system"` role for input messages in
the Messages API.
There is a limit of 100,000 messages in a single request.
model: The model that will complete your prompt.\n\nSee
[models](https://docs.anthropic.com/en/docs/models-overview) for additional
details and options.
stream: Whether to incrementally stream the response using server-sent events.
See [streaming](https://docs.claude.com/en/api/messages-streaming) for details.
metadata: An object describing metadata about the request.
service_tier: Determines whether to use priority capacity (if available) or standard capacity
for this request.
Anthropic offers different levels of service for your API requests. See
[service-tiers](https://docs.claude.com/en/api/service-tiers) for details.
stop_sequences: Custom text sequences that will cause the model to stop generating.
Our models will normally stop when they have naturally completed their turn,
which will result in a response `stop_reason` of `"end_turn"`.
If you want the model to stop generating when it encounters custom strings of
text, you can use the `stop_sequences` parameter. If the model encounters one of
the custom sequences, the response `stop_reason` value will be `"stop_sequence"`
and the response `stop_sequence` value will contain the matched stop sequence.
system: System prompt.
A system prompt is a way of providing context and instructions to Claude, such
as specifying a particular goal or role. See our
[guide to system prompts](https://docs.claude.com/en/docs/system-prompts).
temperature: Amount of randomness injected into the response.
Defaults to `1.0`. Ranges from `0.0` to `1.0`. Use `temperature` closer to `0.0`
for analytical / multiple choice, and closer to `1.0` for creative and
generative tasks.
Note that even with `temperature` of `0.0`, the results will not be fully
deterministic.
thinking: Configuration for enabling Claude's extended thinking.
When enabled, responses include `thinking` content blocks showing Claude's
thinking process before the final answer. Requires a minimum budget of 1,024
tokens and counts towards your `max_tokens` limit.
See
[extended thinking](https://docs.claude.com/en/docs/build-with-claude/extended-thinking)
for details.
tool_choice: How the model should use the provided tools. The model can use a specific tool,
any available tool, decide by itself, or not use tools at all.
tools: Definitions of tools that the model may use.
If you include `tools` in your API request, the model may return `tool_use`
content blocks that represent the model's use of those tools. You can then run
those tools using the tool input generated by the model and then optionally
return results back to the model using `tool_result` content blocks.
There are two types of tools: **client tools** and **server tools**. The
behavior described below applies to client tools. For
[server tools](https://docs.claude.com/en/docs/agents-and-tools/tool-use/overview#server-tools),
see their individual documentation as each has its own behavior (e.g., the
[web search tool](https://docs.claude.com/en/docs/agents-and-tools/tool-use/web-search-tool)).
Each tool definition includes:
- `name`: Name of the tool.
- `description`: Optional, but strongly-recommended description of the tool.
- `input_schema`: [JSON schema](https://json-schema.org/draft/2020-12) for the
tool `input` shape that the model will produce in `tool_use` output content
blocks.
For example, if you defined `tools` as:
```json
[
{
"name": "get_stock_price",
"description": "Get the current stock price for a given ticker symbol.",
"input_schema": {
"type": "object",
"properties": {
"ticker": {
"type": "string",
"description": "The stock ticker symbol, e.g. AAPL for Apple Inc."
}
},
"required": ["ticker"]
}
}
]
```
And then asked the model "What's the S&P 500 at today?", the model might produce
`tool_use` content blocks in the response like this:
```json
[
{
"type": "tool_use",
"id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV",
"name": "get_stock_price",
"input": { "ticker": "^GSPC" }
}
]
```
You might then run your `get_stock_price` tool with `{"ticker": "^GSPC"}` as an
input, and return the following back to the model in a subsequent `user`
message:
```json
[
{
"type": "tool_result",
"tool_use_id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV",
"content": "259.75 USD"
}
]
```
Tools can be used for workflows that include running client-side tools and
functions, or more generally whenever you want the model to produce a particular
JSON structure of output.
See our [guide](https://docs.claude.com/en/docs/tool-use) for more details.
top_k: Only sample from the top K options for each subsequent token.
Used to remove "long tail" low probability responses.
[Learn more technical details here](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277).
Recommended for advanced use cases only. You usually only need to use
`temperature`.
top_p: Use nucleus sampling.
In nucleus sampling, we compute the cumulative distribution over all the options
for each subsequent token in decreasing probability order and cut it off once it
reaches a particular probability specified by `top_p`. You should either alter
`temperature` or `top_p`, but not both.
Recommended for advanced use cases only. You usually only need to use
`temperature`.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
...
@overload
def create(
self,
*,
max_tokens: int,
messages: Iterable[MessageParam],
model: ModelParam,
stream: bool,
metadata: MetadataParam | Omit = omit,
service_tier: Literal["auto", "standard_only"] | Omit = omit,
stop_sequences: SequenceNotStr[str] | Omit = omit,
system: Union[str, Iterable[TextBlockParam]] | Omit = omit,
temperature: float | Omit = omit,
thinking: ThinkingConfigParam | Omit = omit,
tool_choice: ToolChoiceParam | Omit = omit,
tools: Iterable[ToolUnionParam] | Omit = omit,
top_k: int | Omit = omit,
top_p: float | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Message | Stream[RawMessageStreamEvent]:
"""
Send a structured list of input messages with text and/or image content, and the
model will generate the next message in the conversation.
The Messages API can be used for either single queries or stateless multi-turn
conversations.
Learn more about the Messages API in our
[user guide](https://docs.claude.com/en/docs/initial-setup)
Args:
max_tokens: The maximum number of tokens to generate before stopping.
Note that our models may stop _before_ reaching this maximum. This parameter
only specifies the absolute maximum number of tokens to generate.
Different models have different maximum values for this parameter. See
[models](https://docs.claude.com/en/docs/models-overview) for details.
messages: Input messages.
Our models are trained to operate on alternating `user` and `assistant`
conversational turns. When creating a new `Message`, you specify the prior
conversational turns with the `messages` parameter, and the model then generates
the next `Message` in the conversation. Consecutive `user` or `assistant` turns
in your request will be combined into a single turn.
Each input message must be an object with a `role` and `content`. You can
specify a single `user`-role message, or you can include multiple `user` and
`assistant` messages.
If the final message uses the `assistant` role, the response content will
continue immediately from the content in that message. This can be used to
constrain part of the model's response.
Example with a single `user` message:
```json
[{ "role": "user", "content": "Hello, Claude" }]
```
Example with multiple conversational turns:
```json
[
{ "role": "user", "content": "Hello there." },
{ "role": "assistant", "content": "Hi, I'm Claude. How can I help you?" },
{ "role": "user", "content": "Can you explain LLMs in plain English?" }
]
```
Example with a partially-filled response from Claude:
```json
[
{
"role": "user",
"content": "What's the Greek name for Sun? (A) Sol (B) Helios (C) Sun"
},
{ "role": "assistant", "content": "The best answer is (" }
]
```
Each input message `content` may be either a single `string` or an array of
content blocks, where each block has a specific `type`. Using a `string` for
`content` is shorthand for an array of one content block of type `"text"`. The
following input messages are equivalent:
```json
{ "role": "user", "content": "Hello, Claude" }
```
```json
{ "role": "user", "content": [{ "type": "text", "text": "Hello, Claude" }] }
```
See [input examples](https://docs.claude.com/en/api/messages-examples).
Note that if you want to include a
[system prompt](https://docs.claude.com/en/docs/system-prompts), you can use the
top-level `system` parameter — there is no `"system"` role for input messages in
the Messages API.
There is a limit of 100,000 messages in a single request.
model: The model that will complete your prompt.\n\nSee
[models](https://docs.anthropic.com/en/docs/models-overview) for additional
details and options.
stream: Whether to incrementally stream the response using server-sent events.
See [streaming](https://docs.claude.com/en/api/messages-streaming) for details.
metadata: An object describing metadata about the request.
service_tier: Determines whether to use priority capacity (if available) or standard capacity
for this request.
Anthropic offers different levels of service for your API requests. See
[service-tiers](https://docs.claude.com/en/api/service-tiers) for details.
stop_sequences: Custom text sequences that will cause the model to stop generating.
Our models will normally stop when they have naturally completed their turn,
which will result in a response `stop_reason` of `"end_turn"`.
If you want the model to stop generating when it encounters custom strings of
text, you can use the `stop_sequences` parameter. If the model encounters one of
the custom sequences, the response `stop_reason` value will be `"stop_sequence"`
and the response `stop_sequence` value will contain the matched stop sequence.
system: System prompt.
A system prompt is a way of providing context and instructions to Claude, such
as specifying a particular goal or role. See our
[guide to system prompts](https://docs.claude.com/en/docs/system-prompts).
temperature: Amount of randomness injected into the response.
Defaults to `1.0`. Ranges from `0.0` to `1.0`. Use `temperature` closer to `0.0`
for analytical / multiple choice, and closer to `1.0` for creative and
generative tasks.
Note that even with `temperature` of `0.0`, the results will not be fully
deterministic.
thinking: Configuration for enabling Claude's extended thinking.
When enabled, responses include `thinking` content blocks showing Claude's
thinking process before the final answer. Requires a minimum budget of 1,024
tokens and counts towards your `max_tokens` limit.
See
[extended thinking](https://docs.claude.com/en/docs/build-with-claude/extended-thinking)
for details.
tool_choice: How the model should use the provided tools. The model can use a specific tool,
any available tool, decide by itself, or not use tools at all.
tools: Definitions of tools that the model may use.
If you include `tools` in your API request, the model may return `tool_use`
content blocks that represent the model's use of those tools. You can then run
those tools using the tool input generated by the model and then optionally
return results back to the model using `tool_result` content blocks.
There are two types of tools: **client tools** and **server tools**. The
behavior described below applies to client tools. For
[server tools](https://docs.claude.com/en/docs/agents-and-tools/tool-use/overview#server-tools),
see their individual documentation as each has its own behavior (e.g., the
[web search tool](https://docs.claude.com/en/docs/agents-and-tools/tool-use/web-search-tool)).
Each tool definition includes:
- `name`: Name of the tool.
- `description`: Optional, but strongly-recommended description of the tool.
- `input_schema`: [JSON schema](https://json-schema.org/draft/2020-12) for the
tool `input` shape that the model will produce in `tool_use` output content
blocks.
For example, if you defined `tools` as:
```json
[
{
"name": "get_stock_price",
"description": "Get the current stock price for a given ticker symbol.",
"input_schema": {
"type": "object",
"properties": {
"ticker": {
"type": "string",
"description": "The stock ticker symbol, e.g. AAPL for Apple Inc."
}
},
"required": ["ticker"]
}
}
]
```
And then asked the model "What's the S&P 500 at today?", the model might produce
`tool_use` content blocks in the response like this:
```json
[
{
"type": "tool_use",
"id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV",
"name": "get_stock_price",
"input": { "ticker": "^GSPC" }
}
]
```
You might then run your `get_stock_price` tool with `{"ticker": "^GSPC"}` as an
input, and return the following back to the model in a subsequent `user`
message:
```json
[
{
"type": "tool_result",
"tool_use_id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV",
"content": "259.75 USD"
}
]
```
Tools can be used for workflows that include running client-side tools and
functions, or more generally whenever you want the model to produce a particular
JSON structure of output.
See our [guide](https://docs.claude.com/en/docs/tool-use) for more details.
top_k: Only sample from the top K options for each subsequent token.
Used to remove "long tail" low probability responses.
[Learn more technical details here](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277).
Recommended for advanced use cases only. You usually only need to use
`temperature`.
top_p: Use nucleus sampling.
In nucleus sampling, we compute the cumulative distribution over all the options
for each subsequent token in decreasing probability order and cut it off once it
reaches a particular probability specified by `top_p`. You should either alter
`temperature` or `top_p`, but not both.
Recommended for advanced use cases only. You usually only need to use
`temperature`.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
...
@required_args(["max_tokens", "messages", "model"], ["max_tokens", "messages", "model", "stream"])
def create(
self,
*,
max_tokens: int,
messages: Iterable[MessageParam],
model: ModelParam,
metadata: MetadataParam | Omit = omit,
service_tier: Literal["auto", "standard_only"] | Omit = omit,
stop_sequences: SequenceNotStr[str] | Omit = omit,
stream: Literal[False] | Literal[True] | Omit = omit,
system: Union[str, Iterable[TextBlockParam]] | Omit = omit,
temperature: float | Omit = omit,
thinking: ThinkingConfigParam | Omit = omit,
tool_choice: ToolChoiceParam | Omit = omit,
tools: Iterable[ToolUnionParam] | Omit = omit,
top_k: int | Omit = omit,
top_p: float | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Message | Stream[RawMessageStreamEvent]:
if not stream and not is_given(timeout) and self._client.timeout == DEFAULT_TIMEOUT:
timeout = self._client._calculate_nonstreaming_timeout(
max_tokens, MODEL_NONSTREAMING_TOKENS.get(model, None)
)
if model in DEPRECATED_MODELS:
warnings.warn(
f"The model '{model}' is deprecated and will reach end-of-life on {DEPRECATED_MODELS[model]}.\nPlease migrate to a newer model. Visit https://docs.anthropic.com/en/docs/resources/model-deprecations for more information.",
DeprecationWarning,
stacklevel=3,
)
return self._post(
"/v1/messages",
body=maybe_transform(
{
"max_tokens": max_tokens,
"messages": messages,
"model": model,
"metadata": metadata,
"service_tier": service_tier,
"stop_sequences": stop_sequences,
"stream": stream,
"system": system,
"temperature": temperature,
"thinking": thinking,
"tool_choice": tool_choice,
"tools": tools,
"top_k": top_k,
"top_p": top_p,
},
message_create_params.MessageCreateParamsStreaming
if stream
else message_create_params.MessageCreateParamsNonStreaming,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Message,
stream=stream or False,
stream_cls=Stream[RawMessageStreamEvent],
)
def stream(
self,
*,
max_tokens: int,
messages: Iterable[MessageParam],
model: ModelParam,
metadata: MetadataParam | Omit = omit,
container: Optional[str] | Omit = omit,
service_tier: Literal["auto", "standard_only"] | Omit = omit,
stop_sequences: SequenceNotStr[str] | Omit = omit,
system: Union[str, Iterable[TextBlockParam]] | Omit = omit,
temperature: float | Omit = omit,
top_k: int | Omit = omit,
top_p: float | Omit = omit,
thinking: ThinkingConfigParam | Omit = omit,
tool_choice: ToolChoiceParam | Omit = omit,
tools: Iterable[ToolUnionParam] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> MessageStreamManager:
"""Create a Message stream"""
if model in DEPRECATED_MODELS:
warnings.warn(
f"The model '{model}' is deprecated and will reach end-of-life on {DEPRECATED_MODELS[model]}.\nPlease migrate to a newer model. Visit https://docs.anthropic.com/en/docs/resources/model-deprecations for more information.",
DeprecationWarning,
stacklevel=3,
)
extra_headers = {
"X-Stainless-Helper-Method": "stream",
"X-Stainless-Stream-Helper": "messages",
**(extra_headers or {}),
}
make_request = partial(
self._post,
"/v1/messages",
body=maybe_transform(
{
"max_tokens": max_tokens,
"messages": messages,
"model": model,
"metadata": metadata,
"container": container,
"service_tier": service_tier,
"stop_sequences": stop_sequences,
"system": system,
"temperature": temperature,
"top_k": top_k,
"top_p": top_p,
"tools": tools,
"thinking": thinking,
"tool_choice": tool_choice,
"stream": True,
},
message_create_params.MessageCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Message,
stream=True,
stream_cls=Stream[RawMessageStreamEvent],
)
return MessageStreamManager(make_request)
def count_tokens(
self,
*,
messages: Iterable[MessageParam],
model: ModelParam,
system: Union[str, Iterable[TextBlockParam]] | Omit = omit,
thinking: ThinkingConfigParam | Omit = omit,
tool_choice: ToolChoiceParam | Omit = omit,
tools: Iterable[MessageCountTokensToolParam] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> MessageTokensCount:
"""
Count the number of tokens in a Message.
The Token Count API can be used to count the number of tokens in a Message,
including tools, images, and documents, without creating it.
Learn more about token counting in our
[user guide](https://docs.claude.com/en/docs/build-with-claude/token-counting)
Args:
messages: Input messages.
Our models are trained to operate on alternating `user` and `assistant`
conversational turns. When creating a new `Message`, you specify the prior
conversational turns with the `messages` parameter, and the model then generates
the next `Message` in the conversation. Consecutive `user` or `assistant` turns
in your request will be combined into a single turn.
Each input message must be an object with a `role` and `content`. You can
specify a single `user`-role message, or you can include multiple `user` and
`assistant` messages.
If the final message uses the `assistant` role, the response content will
continue immediately from the content in that message. This can be used to
constrain part of the model's response.
Example with a single `user` message:
```json
[{ "role": "user", "content": "Hello, Claude" }]
```
Example with multiple conversational turns:
```json
[
{ "role": "user", "content": "Hello there." },
{ "role": "assistant", "content": "Hi, I'm Claude. How can I help you?" },
{ "role": "user", "content": "Can you explain LLMs in plain English?" }
]
```
Example with a partially-filled response from Claude:
```json
[
{
"role": "user",
"content": "What's the Greek name for Sun? (A) Sol (B) Helios (C) Sun"
},
{ "role": "assistant", "content": "The best answer is (" }
]
```
Each input message `content` may be either a single `string` or an array of
content blocks, where each block has a specific `type`. Using a `string` for
`content` is shorthand for an array of one content block of type `"text"`. The
following input messages are equivalent:
```json
{ "role": "user", "content": "Hello, Claude" }
```
```json
{ "role": "user", "content": [{ "type": "text", "text": "Hello, Claude" }] }
```
See [input examples](https://docs.claude.com/en/api/messages-examples).
Note that if you want to include a
[system prompt](https://docs.claude.com/en/docs/system-prompts), you can use the
top-level `system` parameter — there is no `"system"` role for input messages in
the Messages API.
There is a limit of 100,000 messages in a single request.
model: The model that will complete your prompt.\n\nSee
[models](https://docs.anthropic.com/en/docs/models-overview) for additional
details and options.
system: System prompt.
A system prompt is a way of providing context and instructions to Claude, such
as specifying a particular goal or role. See our
[guide to system prompts](https://docs.claude.com/en/docs/system-prompts).
thinking: Configuration for enabling Claude's extended thinking.
When enabled, responses include `thinking` content blocks showing Claude's
thinking process before the final answer. Requires a minimum budget of 1,024
tokens and counts towards your `max_tokens` limit.
See
[extended thinking](https://docs.claude.com/en/docs/build-with-claude/extended-thinking)
for details.
tool_choice: How the model should use the provided tools. The model can use a specific tool,
any available tool, decide by itself, or not use tools at all.
tools: Definitions of tools that the model may use.
If you include `tools` in your API request, the model may return `tool_use`
content blocks that represent the model's use of those tools. You can then run
those tools using the tool input generated by the model and then optionally
return results back to the model using `tool_result` content blocks.
There are two types of tools: **client tools** and **server tools**. The
behavior described below applies to client tools. For
[server tools](https://docs.claude.com/en/docs/agents-and-tools/tool-use/overview#server-tools),
see their individual documentation as each has its own behavior (e.g., the
[web search tool](https://docs.claude.com/en/docs/agents-and-tools/tool-use/web-search-tool)).
Each tool definition includes:
- `name`: Name of the tool.
- `description`: Optional, but strongly-recommended description of the tool.
- `input_schema`: [JSON schema](https://json-schema.org/draft/2020-12) for the
tool `input` shape that the model will produce in `tool_use` output content
blocks.
For example, if you defined `tools` as:
```json
[
{
"name": "get_stock_price",
"description": "Get the current stock price for a given ticker symbol.",
"input_schema": {
"type": "object",
"properties": {
"ticker": {
"type": "string",
"description": "The stock ticker symbol, e.g. AAPL for Apple Inc."
}
},
"required": ["ticker"]
}
}
]
```
And then asked the model "What's the S&P 500 at today?", the model might produce
`tool_use` content blocks in the response like this:
```json
[
{
"type": "tool_use",
"id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV",
"name": "get_stock_price",
"input": { "ticker": "^GSPC" }
}
]
```
You might then run your `get_stock_price` tool with `{"ticker": "^GSPC"}` as an
input, and return the following back to the model in a subsequent `user`
message:
```json
[
{
"type": "tool_result",
"tool_use_id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV",
"content": "259.75 USD"
}
]
```
Tools can be used for workflows that include running client-side tools and
functions, or more generally whenever you want the model to produce a particular
JSON structure of output.
See our [guide](https://docs.claude.com/en/docs/tool-use) for more details.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self._post(
"/v1/messages/count_tokens",
body=maybe_transform(
{
"messages": messages,
"model": model,
"system": system,
"thinking": thinking,
"tool_choice": tool_choice,
"tools": tools,
},
message_count_tokens_params.MessageCountTokensParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=MessageTokensCount,
)
| Messages |
python | python-jsonschema__jsonschema | jsonschema/tests/test_validators.py | {
"start": 67115,
"end": 67298
} | class ____(ValidatorTestMixin, TestCase):
Validator = validators.Draft7Validator
valid: tuple[dict, dict] = ({}, {})
invalid = {"type": "integer"}, "foo"
| TestDraft7Validator |
python | walkccc__LeetCode | solutions/3558. Number of Ways to Assign Edge Weights I/3558.py | {
"start": 0,
"end": 538
} | class ____:
def assignEdgeWeights(self, edges: list[list[int]]) -> int:
MOD = 1_000_000_007
n = len(edges) + 1
graph = [[] for _ in range(n + 1)]
for u, v in edges:
graph[u].append(v)
graph[v].append(u)
q = collections.deque([1])
seen = {1}
step = 0
while q:
for _ in range(len(q)):
u = q.popleft()
for v in graph[u]:
if v not in seen:
q.append(v)
seen.add(v)
step += 1
return pow(2, step - 2, MOD) if step > 0 else 0
| Solution |
python | great-expectations__great_expectations | tests/datasource/fluent/test_invalid_datasource.py | {
"start": 5546,
"end": 10356
} | class ____:
def test_connection_raises_informative_error(
self,
invalid_ds_cfg: dict,
invalid_datasource_factory: InvalidDSFactory,
):
invalid_ds = invalid_datasource_factory(invalid_ds_cfg)
print(invalid_ds)
with pytest.raises(TestConnectionError) as conn_err:
invalid_ds.test_connection()
print(f"{conn_err.value!r}\n >-- caused by -->\n{conn_err.value.__cause__!r}")
assert invalid_ds.config_error == conn_err.value.__cause__
def test_get_batch_list_raises_informative_error(
self,
invalid_ds_cfg: dict,
invalid_datasource_factory: Callable[
[dict[Literal["name", "type", "assets"] | Any, Any]], InvalidDatasource
],
):
invalid_ds = invalid_datasource_factory(invalid_ds_cfg)
with pytest.raises(TypeError) as err:
invalid_ds.get_batch({}) # type: ignore[arg-type] # expect error
assert invalid_ds.config_error == err.value.__cause__
def test_random_attribute_access_raises_informative_error(
self, invalid_ds_cfg: dict, invalid_datasource_factory: InvalidDSFactory
):
invalid_ds = invalid_datasource_factory(invalid_ds_cfg)
with pytest.raises(TypeError) as err:
_ = invalid_ds.random_attribute
assert invalid_ds.config_error == err.value.__cause__
@pytest.mark.parametrize("attr_name", ["name", "id", "type", "assets"])
def test_base_datasource_attribute_does_not_error(
self,
invalid_ds_cfg: dict,
invalid_datasource_factory: InvalidDSFactory,
attr_name: str,
):
invalid_ds = invalid_datasource_factory(invalid_ds_cfg)
attr_value = getattr(invalid_ds, attr_name)
print(attr_name, attr_value)
def test_get_asset_raises_warning(
self,
invalid_ds_cfg: dict,
invalid_datasource_factory: Callable[
[dict[Literal["name", "type", "assets"] | Any, Any]], InvalidDatasource
],
):
invalid_ds = invalid_datasource_factory(invalid_ds_cfg)
for asset in invalid_ds.assets:
with pytest.warns(GxInvalidDatasourceWarning):
invalid_asset = invalid_ds.get_asset(asset.name)
assert invalid_asset, "No asset was returned"
with pytest.raises(TestConnectionError):
invalid_asset.test_connection()
def test_extra_fields_are_ignored(
self,
datasource_fields: set[str],
data_asset_fields: set[str],
invalid_ds_cfg: dict,
invalid_datasource_factory: Callable[
[dict[Literal["name", "type", "assets"] | Any, Any]], InvalidDatasource
],
):
"""
Ensure that extra fields are ignored when creating the InvalidDatasource instance.
These fields could include secrets or other sensitive information that should not be included
in the InvalidDatasource instance.
Standard fields such as `type`, `name`, `id` etc. should be included in the InvalidDatasource instance and should
never be sensitive.
""" # noqa: E501 # FIXME CoP
print(f"Datasource config:\n{pf(invalid_ds_cfg)}")
invalid_ds = invalid_datasource_factory(invalid_ds_cfg)
ds_dict = invalid_ds._json_dict()
print(f"\nInvalidDatasource dict:\n{pf(ds_dict)}")
assert set(ds_dict.keys()) >= {
"name",
"type",
}, "Expected standard fields to be present"
extra_ds_fields = set(invalid_ds_cfg.keys()) - datasource_fields
for field in extra_ds_fields:
assert field not in ds_dict, f"Expected `{field}` to be ignored"
for asset in invalid_ds.assets:
asset_dict = asset.dict()
extra_asset_fields = set(asset_dict.keys()) - data_asset_fields
for field in extra_asset_fields:
assert field not in asset_dict, f"Expected asset `{field}` to be ignored"
@pytest.fixture
def rand_invalid_datasource_with_assets(
invalid_datasource_factory: InvalidDSFactory,
) -> InvalidDatasource:
random_ds_type = random.choice([t for t in DataSourceManager.type_lookup.type_names()])
invalid_ds = invalid_datasource_factory(
{
"name": "my invalid ds",
"type": random_ds_type,
"connection_string": "postgresql+psycopg2://postgres:@localhost/test_database",
"assets": [
{"name": "definitely_invalid", "type": "NOT_A_VALID_TYPE"},
{"name": "maybe_valid", "type": "table", "table_name": "my_table"},
{"name": "maybe_valid_2", "type": "csv", "sep": "|"},
{"name": "missing type"},
],
}
)
return invalid_ds
| TestInvalidDatasource |
python | openai__openai-python | examples/responses/streaming_tools.py | {
"start": 821,
"end": 1264
} | class ____(BaseModel):
table_name: Table
columns: List[Column]
conditions: List[Condition]
order_by: OrderBy
client = OpenAI()
with client.responses.stream(
model="gpt-4o-2024-08-06",
input="look up all my orders in november of last year that were fulfilled but not delivered on time",
tools=[
openai.pydantic_function_tool(Query),
],
) as stream:
for event in stream:
rich.print(event)
| Query |
python | lepture__authlib | tests/django/test_oauth1/models.py | {
"start": 275,
"end": 766
} | class ____(Model):
user = ForeignKey(User, on_delete=CASCADE)
client_id = CharField(max_length=48, unique=True, db_index=True)
client_secret = CharField(max_length=48, blank=True)
default_redirect_uri = TextField(blank=False, default="")
def get_default_redirect_uri(self):
return self.default_redirect_uri
def get_client_secret(self):
return self.client_secret
def get_rsa_public_key(self):
return read_file_path("rsa_public.pem")
| Client |
python | python-openxml__python-docx | tests/oxml/test_xmlchemy.py | {
"start": 571,
"end": 3801
} | class ____:
def it_can_find_the_first_of_its_children_named_in_a_sequence(self, first_fixture):
element, tagnames, matching_child = first_fixture
assert element.first_child_found_in(*tagnames) is matching_child
def it_can_insert_an_element_before_named_successors(self, insert_fixture):
element, child, tagnames, expected_xml = insert_fixture
element.insert_element_before(child, *tagnames)
assert element.xml == expected_xml
def it_can_remove_all_children_with_name_in_sequence(self, remove_fixture):
element, tagnames, expected_xml = remove_fixture
element.remove_all(*tagnames)
assert element.xml == expected_xml
# fixtures ---------------------------------------------
@pytest.fixture(
params=[
("biu", "iu", "i"),
("bu", "iu", "u"),
("bi", "u", None),
("b", "iu", None),
("iu", "biu", "i"),
("", "biu", None),
]
)
def first_fixture(self, request):
present, matching, match = request.param
element = self.rPr_bldr(present).element
tagnames = self.nsptags(matching)
matching_child = element.find(qn("w:%s" % match)) if match else None
return element, tagnames, matching_child
@pytest.fixture(
params=[
("iu", "b", "iu", "biu"),
("u", "b", "iu", "bu"),
("", "b", "iu", "b"),
("bu", "i", "u", "biu"),
("bi", "u", "", "biu"),
]
)
def insert_fixture(self, request):
present, new, successors, after = request.param
element = self.rPr_bldr(present).element
child = {"b": a_b(), "i": an_i(), "u": a_u()}[new].with_nsdecls().element
tagnames = [("w:%s" % char) for char in successors]
expected_xml = self.rPr_bldr(after).xml()
return element, child, tagnames, expected_xml
@pytest.fixture(
params=[
("biu", "b", "iu"),
("biu", "bi", "u"),
("bbiiuu", "i", "bbuu"),
("biu", "i", "bu"),
("biu", "bu", "i"),
("bbiiuu", "", "bbiiuu"),
("biu", "u", "bi"),
("biu", "ui", "b"),
("bbiiuu", "bi", "uu"),
("bu", "i", "bu"),
("", "ui", ""),
]
)
def remove_fixture(self, request):
present, remove, after = request.param
element = self.rPr_bldr(present).element
tagnames = self.nsptags(remove)
expected_xml = self.rPr_bldr(after).xml()
return element, tagnames, expected_xml
# fixture components ---------------------------------------------
def nsptags(self, letters):
return [("w:%s" % letter) for letter in letters]
def rPr_bldr(self, children):
rPr_bldr = an_rPr().with_nsdecls()
for char in children:
if char == "b":
rPr_bldr.with_child(a_b())
elif char == "i":
rPr_bldr.with_child(an_i())
elif char == "u":
rPr_bldr.with_child(a_u())
else:
raise NotImplementedError("got '%s'" % char)
return rPr_bldr
| DescribeBaseOxmlElement |
python | falconry__falcon | tests/test_typing.py | {
"start": 296,
"end": 365
} | class ____(falcon.Request):
context_type = RichContext
| FancyRequest |
python | scipy__scipy | scipy/stats/_continuous_distns.py | {
"start": 302765,
"end": 304784
} | class ____(rv_continuous):
r"""A skewed Cauchy random variable.
%(before_notes)s
See Also
--------
cauchy : Cauchy distribution
Notes
-----
The probability density function for `skewcauchy` is:
.. math::
f(x) = \frac{1}{\pi \left(\frac{x^2}{\left(a\, \text{sign}(x) + 1
\right)^2} + 1 \right)}
for a real number :math:`x` and skewness parameter :math:`-1 < a < 1`.
When :math:`a=0`, the distribution reduces to the usual Cauchy
distribution.
%(after_notes)s
References
----------
.. [1] "Skewed generalized *t* distribution", Wikipedia
https://en.wikipedia.org/wiki/Skewed_generalized_t_distribution#Skewed_Cauchy_distribution
%(example)s
"""
def _argcheck(self, a):
return np.abs(a) < 1
def _shape_info(self):
return [_ShapeInfo("a", False, (-1.0, 1.0), (False, False))]
def _pdf(self, x, a):
return 1 / (np.pi * (x**2 / (a * np.sign(x) + 1)**2 + 1))
def _cdf(self, x, a):
return np.where(x <= 0,
(1 - a) / 2 + (1 - a) / np.pi * np.arctan(x / (1 - a)),
(1 - a) / 2 + (1 + a) / np.pi * np.arctan(x / (1 + a)))
def _ppf(self, x, a):
i = x < self._cdf(0, a)
return np.where(i,
np.tan(np.pi / (1 - a) * (x - (1 - a) / 2)) * (1 - a),
np.tan(np.pi / (1 + a) * (x - (1 - a) / 2)) * (1 + a))
def _stats(self, a, moments='mvsk'):
return np.nan, np.nan, np.nan, np.nan
def _fitstart(self, data):
# Use 0 as the initial guess of the skewness shape parameter.
# For the location and scale, estimate using the median and
# quartiles.
if isinstance(data, CensoredData):
data = data._uncensor()
p25, p50, p75 = np.percentile(data, [25, 50, 75])
return 0.0, p50, (p75 - p25)/2
skewcauchy = skewcauchy_gen(name='skewcauchy')
| skewcauchy_gen |
python | ansible__ansible | lib/ansible/_internal/_yaml/_dumper.py | {
"start": 999,
"end": 2601
} | class ____(_BaseDumper):
"""A simple stub class that allows us to add representers for our custom types."""
@classmethod
def _register_representers(cls) -> None:
cls.add_multi_representer(AnsibleTaggedObject, cls.represent_ansible_tagged_object)
cls.add_multi_representer(Tripwire, cls.represent_tripwire)
cls.add_multi_representer(c.Mapping, cls.represent_dict)
cls.add_multi_representer(c.Collection, cls.represent_list)
cls.add_multi_representer(_jinja_common.VaultExceptionMarker, cls.represent_vault_exception_marker)
def get_node_from_ciphertext(self, data: object) -> ScalarNode | None:
if ciphertext := VaultHelper.get_ciphertext(data, with_tags=False):
return self.represent_scalar('!vault', ciphertext, style='|')
return None
def represent_vault_exception_marker(self, data: _jinja_common.VaultExceptionMarker) -> ScalarNode:
if node := self.get_node_from_ciphertext(data):
return node
data.trip()
def represent_ansible_tagged_object(self, data: AnsibleTaggedObject) -> Node:
if _internal.is_intermediate_mapping(data):
return self.represent_dict(data)
if _internal.is_intermediate_iterable(data):
return self.represent_list(data)
if node := self.get_node_from_ciphertext(data):
return node
return self.represent_data(AnsibleTagHelper.as_native_type(data)) # automatically decrypts encrypted strings
def represent_tripwire(self, data: Tripwire) -> t.NoReturn:
data.trip()
| AnsibleDumper |
python | coleifer__peewee | tests/libs/mock.py | {
"start": 31066,
"end": 34319
} | class ____(CallableMixin, NonCallableMock):
"""
Create a new `Mock` object. `Mock` takes several optional arguments
that specify the behaviour of the Mock object:
* `spec`: This can be either a list of strings or an existing object (a
class or instance) that acts as the specification for the mock object. If
you pass in an object then a list of strings is formed by calling dir on
the object (excluding unsupported magic attributes and methods). Accessing
any attribute not in this list will raise an `AttributeError`.
If `spec` is an object (rather than a list of strings) then
`mock.__class__` returns the class of the spec object. This allows mocks
to pass `isinstance` tests.
* `spec_set`: A stricter variant of `spec`. If used, attempting to *set*
or get an attribute on the mock that isn't on the object passed as
`spec_set` will raise an `AttributeError`.
* `side_effect`: A function to be called whenever the Mock is called. See
the `side_effect` attribute. Useful for raising exceptions or
dynamically changing return values. The function is called with the same
arguments as the mock, and unless it returns `DEFAULT`, the return
value of this function is used as the return value.
Alternatively `side_effect` can be an exception class or instance. In
this case the exception will be raised when the mock is called.
If `side_effect` is an iterable then each call to the mock will return
the next value from the iterable. If any of the members of the iterable
are exceptions they will be raised instead of returned.
* `return_value`: The value returned when the mock is called. By default
this is a new Mock (created on first access). See the
`return_value` attribute.
* `wraps`: Item for the mock object to wrap. If `wraps` is not None then
calling the Mock will pass the call through to the wrapped object
(returning the real result). Attribute access on the mock will return a
Mock object that wraps the corresponding attribute of the wrapped object
(so attempting to access an attribute that doesn't exist will raise an
`AttributeError`).
If the mock has an explicit `return_value` set then calls are not passed
to the wrapped object and the `return_value` is returned instead.
* `name`: If the mock has a name then it will be used in the repr of the
mock. This can be useful for debugging. The name is propagated to child
mocks.
Mocks can also be called with arbitrary keyword arguments. These will be
used to set attributes on the mock after it is created.
"""
def _dot_lookup(thing, comp, import_path):
try:
return getattr(thing, comp)
except AttributeError:
__import__(import_path)
return getattr(thing, comp)
def _importer(target):
components = target.split('.')
import_path = components.pop(0)
thing = __import__(import_path)
for comp in components:
import_path += ".%s" % comp
thing = _dot_lookup(thing, comp, import_path)
return thing
def _is_started(patcher):
# XXXX horrible
return hasattr(patcher, 'is_local')
| Mock |
python | cython__cython | Cython/Compiler/ExprNodes.py | {
"start": 492370,
"end": 495282
} | class ____(ExprNode):
# C++ typeid operator applied to a type or variable
#
# operand ExprNode
# arg_type ExprNode
# is_variable boolean
subexprs = ['operand']
arg_type = None
is_variable = None
is_temp = 1
def get_type_info_type(self, env):
env_module = env
while not env_module.is_module_scope:
env_module = env_module.outer_scope
typeinfo_module = env_module.find_module('libcpp.typeinfo', self.pos)
typeinfo_entry = typeinfo_module.lookup('type_info')
return PyrexTypes.CFakeReferenceType(PyrexTypes.c_const_or_volatile_type(typeinfo_entry.type, is_const=True))
cpp_message = 'typeid operator'
def analyse_types(self, env):
if not self.type:
self.type = PyrexTypes.error_type # default value if it isn't analysed successfully
self.cpp_check(env)
type_info = self.get_type_info_type(env)
if not type_info:
self.error("The 'libcpp.typeinfo' module must be cimported to use the typeid() operator")
return self
if self.operand is None:
return self # already analysed, no need to repeat
self.type = type_info
as_type = self.operand.analyse_as_specialized_type(env)
if as_type:
self.arg_type = as_type
self.is_type = True
self.operand = None # nothing further uses self.operand - will only cause problems if its used in code generation
else:
self.arg_type = self.operand.analyse_types(env)
self.is_type = False
self.operand = None # nothing further uses self.operand - will only cause problems if its used in code generation
if self.arg_type.type.is_pyobject:
self.error("Cannot use typeid on a Python object")
return self
elif self.arg_type.type.is_void:
self.error("Cannot use typeid on void")
return self
elif not self.arg_type.type.is_complete():
self.error("Cannot use typeid on incomplete type '%s'" % self.arg_type.type)
return self
env.use_utility_code(UtilityCode.load_cached("CppExceptionConversion", "CppSupport.cpp"))
return self
def error(self, mess):
error(self.pos, mess)
self.type = PyrexTypes.error_type
self.result_code = "<error>"
def check_const(self):
return True
def calculate_result_code(self):
return self.temp_code
def generate_result_code(self, code):
if self.is_type:
arg_code = self.arg_type.empty_declaration_code()
else:
arg_code = self.arg_type.result()
translate_cpp_exception(code, self.pos,
"%s = typeid(%s);" % (self.temp_code, arg_code),
None, None, self.in_nogil_context)
| TypeidNode |
python | Lightning-AI__lightning | src/lightning/fabric/fabric.py | {
"start": 3085,
"end": 56963
} | class ____:
r"""Fabric accelerates your PyTorch training or inference code with minimal changes required.
Key Features:
- Automatic placement of models and data onto the device.
- Automatic support for mixed and double precision (smaller memory footprint).
- Seamless switching between hardware (CPU, GPU, TPU) and distributed training strategies
(data-parallel training, sharded training, etc.).
- Automated spawning of processes, no launch utilities required.
- Multi-node support.
Args:
accelerator: The hardware to run on. Possible choices are:
``"cpu"``, ``"cuda"``, ``"mps"``, ``"gpu"``, ``"tpu"``, ``"auto"``.
Defaults to ``"auto"``.
strategy: Strategy for how to run across multiple devices. Possible choices are:
``"dp"``, ``"ddp"``, ``"ddp_spawn"``, ``"deepspeed"``, ``"fsdp"``, ``"auto"``.
Defaults to ``"auto"``.
devices: Number of devices to train on (``int``), which GPUs to train on (``list`` or ``str``), or ``"auto"``.
The value applies per node. Defaults to ``"auto"``.
num_nodes: Number of GPU nodes for distributed training. Defaults to ``1``.
precision: Double precision (``"64"``), full precision (``"32"``), half precision AMP (``"16-mixed"``),
or bfloat16 precision AMP (``"bf16-mixed"``). If ``None``, defaults will be used based on the device.
plugins: One or several custom plugins as a single plugin or list of plugins.
callbacks: A single callback or a list of callbacks. A callback can contain any arbitrary methods that
can be invoked through :meth:`~lightning.fabric.fabric.Fabric.call` by the user.
loggers: A single logger or a list of loggers. See :meth:`~lightning.fabric.fabric.Fabric.log` for more
information.
Example::
# Basic usage
fabric = Fabric(accelerator="gpu", devices=2)
# Set up model and optimizer
model = MyModel()
optimizer = torch.optim.Adam(model.parameters())
model, optimizer = fabric.setup(model, optimizer)
# Training loop
for batch in dataloader:
optimizer.zero_grad()
loss = model(batch)
fabric.backward(loss)
optimizer.step()
"""
def __init__(
self,
*,
accelerator: Union[str, Accelerator] = "auto",
strategy: Union[str, Strategy] = "auto",
devices: Union[list[int], str, int] = "auto",
num_nodes: int = 1,
precision: Optional[_PRECISION_INPUT] = None,
plugins: Optional[Union[_PLUGIN_INPUT, list[_PLUGIN_INPUT]]] = None,
callbacks: Optional[Union[list[Any], Any]] = None,
loggers: Optional[Union[Logger, list[Logger]]] = None,
) -> None:
self._connector = _Connector(
accelerator=accelerator,
strategy=strategy,
devices=devices,
num_nodes=num_nodes,
precision=precision,
plugins=plugins,
)
self._strategy: Strategy = self._connector.strategy
self._accelerator: Accelerator = self._connector.accelerator
self._precision: Precision = self._strategy.precision
self._callbacks = self._configure_callbacks(callbacks)
loggers = loggers if loggers is not None else []
self._loggers = loggers if isinstance(loggers, list) else [loggers]
self._models_setup: int = 0
self._launched: bool = False
self._prepare_run_method()
if _is_using_cli():
# when the CLI is used to launch the script, we need to set up the environment (init processes) here so
# that the user can immediately use all functionality in strategies
self._strategy.setup_environment()
self._launched = True
@property
def accelerator(self) -> Accelerator:
return self._accelerator
@property
def strategy(self) -> Strategy:
return self._strategy
@property
def device(self) -> torch.device:
"""The current device this process runs on.
Use this to create tensors directly on the device if needed.
"""
return self._strategy.root_device
@property
def global_rank(self) -> int:
"""The global index of the current process across all devices and nodes."""
return getattr(self._strategy, "global_rank", 0)
@property
def local_rank(self) -> int:
"""The index of the current process among the processes running on the local node."""
return getattr(self._strategy, "local_rank", 0)
@property
def node_rank(self) -> int:
"""The index of the current node."""
return getattr(self._strategy, "node_rank", 0)
@property
def world_size(self) -> int:
"""The total number of processes running across all devices and nodes."""
return getattr(self._strategy, "world_size", 1)
@property
def is_global_zero(self) -> bool:
"""Whether this rank is rank zero."""
return self._strategy.is_global_zero
@property
def loggers(self) -> list[Logger]:
"""Returns all loggers passed to Fabric."""
return self._loggers
@property
def logger(self) -> Logger:
"""Returns the first logger in the list passed to Fabric, which is considered the main logger."""
return self._loggers[0]
def run(self, *args: Any, **kwargs: Any) -> Any:
"""All the code inside this run method gets accelerated by Fabric.
You can pass arbitrary arguments to this function when overriding it.
"""
def setup(
self,
module: nn.Module,
*optimizers: Optimizer,
scheduler: Optional["_LRScheduler"] = None,
move_to_device: bool = True,
_reapply_compile: bool = True,
) -> Any: # no specific return because the way we want our API to look does not play well with mypy
r"""Set up a model and its optimizers for accelerated training.
Args:
module: A :class:`torch.nn.Module` to set up.
*optimizers: The optimizer(s) to set up. Can be zero or more optimizers.
scheduler: An optional learning rate scheduler to set up. Must be provided after optimizers if used.
move_to_device: If set ``True`` (default), moves the model to the correct device. Set this to ``False``
and alternatively use :meth:`to_device` manually.
_reapply_compile: If ``True`` (default), and the model was ``torch.compile``d before, the
corresponding :class:`~torch._dynamo.OptimizedModule` wrapper will be removed and reapplied with the
same settings after the model was set up by the strategy (e.g., after the model was wrapped by DDP,
FSDP etc.). Set it to ``False`` if compiling DDP/FSDP is causing issues.
Returns:
If no optimizers are passed, returns the wrapped module. If optimizers are passed, returns a tuple
containing the wrapped module and optimizers, and optionally the scheduler if provided, in the same
order they were passed in.
Note:
For certain strategies like FSDP, you may need to set up the model first using :meth:`setup_module`,
then create the optimizer, and finally set up the optimizer using :meth:`setup_optimizers`.
Example::
# Basic usage
model, optimizer = fabric.setup(model, optimizer)
# With multiple optimizers and scheduler
model, opt1, opt2, scheduler = fabric.setup(model, opt1, opt2, scheduler=scheduler)
# Model only
model = fabric.setup(model)
"""
self._validate_setup(module, optimizers)
module, compile_kwargs = _unwrap_compiled(module) if _reapply_compile else (module, None)
original_module = module
module = self._precision.convert_module(module)
if move_to_device:
module = self._move_model_to_device(model=module, optimizers=list(optimizers))
# Let accelerator/plugin wrap and connect the models and optimizers
if optimizers:
module, optimizers, scheduler = self._strategy.setup_module_and_optimizers( # type: ignore[assignment]
module, list(optimizers), scheduler
)
else:
module = self._strategy.setup_module(module)
if compile_kwargs is not None:
module = _to_compiled(module, compile_kwargs)
module = _FabricModule(module, self._strategy, original_module=original_module)
# Update the _DeviceDtypeModuleMixin's device parameter
# NOTE: for sharded strategies or manual device placement, there's no single root device
_update_properties(
module, device=self.device if move_to_device else next(module.parameters(), torch.tensor(0)).device
)
optimizers = [_FabricOptimizer(optimizer, self._strategy, self._callbacks) for optimizer in optimizers]
self._models_setup += 1
if hasattr(original_module, "_fabric"): # this is probably a LightningModule
original_module._fabric = self
original_module._fabric_optimizers = optimizers
if original_module not in self._callbacks:
self._callbacks.append(original_module)
self.call("on_after_setup", fabric=self, module=module)
if optimizers:
# join both types in a tuple for API convenience
return (module, *optimizers, scheduler) if scheduler is not None else (module, *optimizers)
return module
def setup_module(
self, module: nn.Module, move_to_device: bool = True, _reapply_compile: bool = True
) -> _FabricModule:
r"""Set up a model for accelerated training or inference.
This is the same as calling ``.setup(model)`` with no optimizers. It is useful for inference or for certain
strategies like `FSDP` that require setting up the module before the optimizer can be created and set up.
See also :meth:`setup_optimizers`.
Args:
module: A :class:`torch.nn.Module` to set up.
move_to_device: If set ``True`` (default), moves the model to the correct device. Set this to ``False``
and alternatively use :meth:`to_device` manually.
_reapply_compile: If ``True`` (default), and the model was ``torch.compile``d before, the
corresponding :class:`~torch._dynamo.OptimizedModule` wrapper will be removed and reapplied with the
same settings after the model was set up by the strategy (e.g., after the model was wrapped by DDP,
FSDP etc.). Set it to ``False`` if compiling DDP/FSDP is causing issues.
Returns:
The wrapped model as a :class:`~lightning.fabric.wrappers._FabricModule`.
Example::
# Set up model first (useful for FSDP)
model = fabric.setup_module(model)
# Then create and set up optimizer
optimizer = torch.optim.Adam(model.parameters())
optimizer = fabric.setup_optimizers(optimizer)
"""
self._validate_setup_module(module)
module, compile_kwargs = _unwrap_compiled(module) if _reapply_compile else (module, None)
original_module = module
module = self._precision.convert_module(module)
if move_to_device:
module = self._move_model_to_device(model=module, optimizers=[])
# Let strategy wrap and connect the module alone
module = self._strategy.setup_module(module)
if compile_kwargs is not None:
module = _to_compiled(module, compile_kwargs)
module = _FabricModule(module, self._strategy, original_module=original_module)
# Update the _DeviceDtypeModuleMixin's device parameter
# NOTE: for sharded strategies or manual device placement, there's no single root device
_update_properties(
module, device=self.device if move_to_device else next(module.parameters(), torch.tensor(0)).device
)
if hasattr(original_module, "_fabric"): # this is probably a LightningModule
original_module._fabric = self
if original_module not in self._callbacks:
self._callbacks.append(original_module)
self._models_setup += 1
return module
def setup_optimizers(self, *optimizers: Optimizer) -> Union[_FabricOptimizer, tuple[_FabricOptimizer, ...]]:
r"""Set up one or more optimizers for accelerated training.
Some strategies do not allow setting up model and optimizer independently. For them, you should call
``.setup(model, optimizer, ...)`` instead to jointly set them up.
Args:
*optimizers: One or more optimizers to set up. Must provide at least one optimizer.
Returns:
If a single optimizer is passed, returns the wrapped optimizer. If multiple optimizers are passed,
returns a tuple of wrapped optimizers in the same order they were passed in.
Raises:
RuntimeError: If using DeepSpeed or XLA strategies, which require joint model-optimizer setup.
Note:
This method cannot be used with DeepSpeed or XLA strategies. Use :meth:`setup` instead for those strategies.
Example::
# Single optimizer
optimizer = fabric.setup_optimizers(optimizer)
# Multiple optimizers
opt1, opt2 = fabric.setup_optimizers(opt1, opt2)
"""
self._validate_setup_optimizers(optimizers)
optimizers = [self._strategy.setup_optimizer(optimizer) for optimizer in optimizers]
optimizers = [
_FabricOptimizer(optimizer=optimizer, strategy=self._strategy, callbacks=self._callbacks)
for optimizer in optimizers
]
return optimizers[0] if len(optimizers) == 1 else tuple(optimizers)
def setup_dataloaders(
self, *dataloaders: DataLoader, use_distributed_sampler: bool = True, move_to_device: bool = True
) -> Union[DataLoader, list[DataLoader]]:
r"""Set up one or multiple dataloaders for accelerated training. If you need different settings for each
dataloader, call this method individually for each one.
Args:
*dataloaders: One or more PyTorch :class:`~torch.utils.data.DataLoader` instances to set up.
use_distributed_sampler: If set ``True`` (default), automatically wraps or replaces the sampler on the
dataloader(s) for distributed training. If you have a custom sampler defined, set this argument
to ``False``.
move_to_device: If set ``True`` (default), moves the data returned by the dataloader(s) automatically to
the correct device. Set this to ``False`` and alternatively use :meth:`to_device` manually on the
returned data.
Returns:
If a single dataloader is passed, returns the wrapped dataloader. If multiple dataloaders are passed,
returns a list of wrapped dataloaders in the same order they were passed in.
Example::
# Single dataloader
train_loader = fabric.setup_dataloaders(train_loader)
# Multiple dataloaders
train_loader, val_loader = fabric.setup_dataloaders(train_loader, val_loader)
"""
self._validate_setup_dataloaders(dataloaders)
dataloaders = [
self._setup_dataloader(
dataloader, use_distributed_sampler=use_distributed_sampler, move_to_device=move_to_device
)
for dataloader in dataloaders
]
return dataloaders[0] if len(dataloaders) == 1 else dataloaders
def _setup_dataloader(
self, dataloader: DataLoader, use_distributed_sampler: bool = True, move_to_device: bool = True
) -> DataLoader:
r"""Set up a single dataloader for accelerated training.
Args:
dataloader: The dataloader to accelerate.
use_distributed_sampler: If set ``True`` (default), automatically wraps or replaces the sampler on the
dataloader for distributed training. If you have a custom sampler defined, set this argument to
``False``.
move_to_device: If set ``True`` (default), moves the data returned by the dataloader automatically to
the correct device. Set this to ``False`` and alternatively use :meth:`to_device` manually on the
returned data.
Returns:
The wrapped dataloader.
"""
if use_distributed_sampler and self._requires_distributed_sampler(dataloader):
sampler = self._get_distributed_sampler(dataloader, **self._strategy.distributed_sampler_kwargs)
# the dataloader needs to be re-instantiated because we want to update the sampler
dataloader = _update_dataloader(dataloader, sampler)
# add worker_init_fn for correct seeding in worker processes
_auto_add_worker_init_fn(dataloader, self.global_rank)
dataloader = self._strategy.process_dataloader(dataloader)
device = self.device if move_to_device and not isinstance(self._strategy, XLAStrategy) else None
fabric_dataloader = _FabricDataLoader(dataloader=dataloader, device=device)
fabric_dataloader = cast(DataLoader, fabric_dataloader)
return fabric_dataloader
def backward(self, tensor: Tensor, *args: Any, model: Optional[_FabricModule] = None, **kwargs: Any) -> None:
r"""Replaces ``loss.backward()`` in your training loop. Handles precision automatically for you.
Args:
tensor: The tensor (loss) to back-propagate gradients from.
*args: Optional positional arguments passed to the underlying backward function.
model: Optional model instance for plugins that require the model for backward(). Required when using
DeepSpeed strategy with multiple models.
**kwargs: Optional named keyword arguments passed to the underlying backward function.
Note:
When using ``strategy="deepspeed"`` and multiple models were set up, it is required to pass in the
model as argument here.
Example::
loss = criterion(output, target)
fabric.backward(loss)
# With DeepSpeed and multiple models
fabric.backward(loss, model=model)
"""
module = model._forward_module if model is not None else model
module, _ = _unwrap_compiled(module)
if isinstance(self._strategy, DeepSpeedStrategy):
if model is None:
if self._models_setup == 0:
raise RuntimeError("No models were set up for backward. Did you forget to call `fabric.setup()`?")
if self._models_setup > 1:
raise ValueError(
"When using multiple models + deepspeed, please provide the model used to perform"
" the optimization: `self.backward(loss, model=model)`"
)
module = self._strategy.model
else:
# requires to attach the current `DeepSpeedEngine` for the `_FabricOptimizer.step` call.
self._strategy._deepspeed_engine = module
lightning.fabric.wrappers._in_fabric_backward = True
try:
self._strategy.backward(tensor, module, *args, **kwargs)
finally:
lightning.fabric.wrappers._in_fabric_backward = False
def clip_gradients(
self,
module: Union[torch.nn.Module, _FabricModule],
optimizer: Union[Optimizer, _FabricOptimizer],
clip_val: Optional[Union[float, int]] = None,
max_norm: Optional[Union[float, int]] = None,
norm_type: Union[float, int] = 2.0,
error_if_nonfinite: bool = True,
) -> Optional[torch.Tensor]:
"""Clip the gradients of the model to a given max value or max norm.
Args:
module: The module whose parameters should be clipped.
optimizer: The optimizer referencing the parameters to be clipped.
clip_val: If passed, gradients will be clipped to this value. Cannot be used together with ``max_norm``.
max_norm: If passed, clips the gradients in such a way that the p-norm of the resulting parameters is
no larger than the given value. Cannot be used together with ``clip_val``.
norm_type: The type of norm if ``max_norm`` was passed. Can be ``'inf'`` for infinity norm.
Defaults to 2-norm.
error_if_nonfinite: An error is raised if the total norm of the gradients is NaN or infinite.
Only applies when ``max_norm`` is used.
Returns:
The total norm of the gradients (before clipping was applied) as a scalar tensor if ``max_norm`` was
passed, otherwise ``None``.
Raises:
ValueError: If both ``clip_val`` and ``max_norm`` are provided, or if neither is provided.
Example::
# Clip by value
fabric.clip_gradients(model, optimizer, clip_val=1.0)
# Clip by norm
total_norm = fabric.clip_gradients(model, optimizer, max_norm=1.0)
"""
if clip_val is not None and max_norm is not None:
raise ValueError(
"Only one of `clip_val` or `max_norm` can be set as this specifies the underlying clipping algorithm!"
)
if clip_val is not None:
self.strategy.clip_gradients_value(_unwrap_objects(module), _unwrap_objects(optimizer), clip_val=clip_val)
return None
if max_norm is not None:
return self.strategy.clip_gradients_norm(
_unwrap_objects(module),
_unwrap_objects(optimizer),
max_norm=max_norm,
norm_type=norm_type,
error_if_nonfinite=error_if_nonfinite,
)
raise ValueError("You have to specify either `clip_val` or `max_norm` to do gradient clipping!")
def autocast(self) -> AbstractContextManager:
"""A context manager to automatically convert operations for the chosen precision.
Use this only if the `forward` method of your model does not cover all operations you wish to run with the
chosen precision setting.
"""
return self._precision.forward_context()
@overload
def to_device(self, obj: nn.Module) -> nn.Module: ...
@overload
def to_device(self, obj: Tensor) -> Tensor: ...
@overload
def to_device(self, obj: Any) -> Any: ...
def to_device(self, obj: Union[nn.Module, Tensor, Any]) -> Union[nn.Module, Tensor, Any]:
r"""Move a :class:`torch.nn.Module` or a collection of tensors to the current device, if it is not already on
that device.
Args:
obj: An object to move to the device. Can be an instance of :class:`torch.nn.Module`, a tensor, or a
(nested) collection of tensors (e.g., a dictionary).
Returns:
A reference to the object that was moved to the new device.
"""
if isinstance(obj, nn.Module):
self._accelerator.setup_device(self.device)
self._strategy.module_to_device(obj)
return obj
return move_data_to_device(obj, device=self.device)
def print(self, *args: Any, **kwargs: Any) -> None:
r"""Print something only on the first process. If running on multiple machines, it will print from the first
process in each machine.
Arguments passed to this method are forwarded to the Python built-in :func:`print` function.
"""
if self.local_rank == 0:
print(*args, **kwargs)
def barrier(self, name: Optional[str] = None) -> None:
"""Wait for all processes to enter this call.
Use this to synchronize all parallel processes, but only if necessary, otherwise the overhead of synchronization
will cause your program to slow down. This method needs to be called on all processes. Failing to do so will
cause your program to stall forever.
"""
self._validate_launched()
self._strategy.barrier(name=name)
def broadcast(self, obj: TBroadcast, src: int = 0) -> TBroadcast:
r"""Send a tensor from one process to all others.
This method needs to be called on all processes. Failing to do so will cause your program to stall forever.
Args:
obj: The object to broadcast to all other members. Any serializable object is supported, but it is
most efficient with the object being a :class:`~torch.Tensor`.
src: The (global) rank of the process that should send the data to all others.
Return:
The transferred data, the same value on every rank.
"""
self._validate_launched()
return self._strategy.broadcast(obj, src=src)
def all_gather(
self, data: Union[Tensor, dict, list, tuple], group: Optional[Any] = None, sync_grads: bool = False
) -> Union[Tensor, dict, list, tuple]:
"""Gather tensors or collections of tensors from multiple processes.
This method needs to be called on all processes and the tensors need to have the same shape across all
processes, otherwise your program will stall forever.
Args:
data: int, float, tensor of shape (batch, ...), or a (possibly nested) collection thereof.
group: the process group to gather results from. Defaults to all processes (world).
sync_grads: flag that allows users to synchronize gradients for the ``all_gather`` operation
Return:
A tensor of shape (world_size, batch, ...), or if the input was a collection
the output will also be a collection with tensors of this shape. For the special case where
world_size is 1, no additional dimension is added to the tensor(s).
"""
self._validate_launched()
group = group if group is not None else torch.distributed.group.WORLD
data = convert_to_tensors(data, device=self.device)
return apply_to_collection(data, Tensor, self._strategy.all_gather, group=group, sync_grads=sync_grads)
def all_reduce(
self,
data: Union[Tensor, dict, list, tuple],
group: Optional[Any] = None,
reduce_op: Optional[Union[ReduceOp, str]] = "mean",
) -> Union[Tensor, dict, list, tuple]:
"""Reduce tensors or collections of tensors from multiple processes.
The reduction on tensors is applied in-place, meaning the result will be placed back into the input tensor.
This method needs to be called on all processes and the tensors need to have the same shape across all
processes, otherwise your program will stall forever.
Args:
data: int, float, tensor of shape (batch, ...), or a (possibly nested) collection thereof. Tensor will be
modified in-place.
group: the process group to reduce results across. Defaults to all processes (world).
reduce_op: the reduction operation. Defaults to 'mean'. Can also be a string 'sum' or ReduceOp.
Some strategies may limit the choices here.
Return:
A tensor of the same shape as the input with values reduced pointwise across processes. The same is
applied to tensors in a collection if a collection is given as input.
"""
self._validate_launched()
group = group if group is not None else torch.distributed.group.WORLD
data = convert_to_tensors(data, device=self.device)
return apply_to_collection(data, Tensor, self._strategy.all_reduce, group=group, reduce_op=reduce_op)
@contextmanager
def rank_zero_first(self, local: bool = False) -> Generator:
r"""The code block under this context manager gets executed first on the main process (rank 0) and only when
completed, the other processes get to run the code in parallel.
Args:
local: Set this to ``True`` if the **local** rank should be the one going first. Useful if you are
downloading data and the filesystem isn't shared between the nodes.
Example::
with fabric.rank_zero_first():
dataset = MNIST("datasets/", download=True)
"""
rank = self.local_rank if local else self.global_rank
with _InfiniteBarrier() as barrier:
if rank > 0:
barrier()
yield
if rank == 0:
barrier()
def no_backward_sync(self, module: _FabricModule, enabled: bool = True) -> AbstractContextManager:
r"""Skip gradient synchronization during backward to avoid redundant communication overhead.
Use this context manager when performing gradient accumulation to speed up training with multiple devices.
Both the model's ``.forward()`` and the ``fabric.backward()`` call need to run under this context.
Args:
module: The module for which to control the gradient synchronization. Must be a module that was
set up with :meth:`setup` or :meth:`setup_module`.
enabled: Whether the context manager is enabled or not. ``True`` means skip the sync, ``False`` means do not
skip.
Returns:
A context manager that controls gradient synchronization.
Raises:
TypeError: If the module was not set up with Fabric first.
Note:
For strategies that don't support gradient sync control, a warning is emitted and the context manager
becomes a no-op. For single-device strategies, it is always a no-op.
Example::
# Accumulate gradients over 8 batches
for batch_idx, batch in enumerate(dataloader):
with fabric.no_backward_sync(model, enabled=(batch_idx % 8 != 0)):
output = model(batch)
loss = criterion(output, target)
fabric.backward(loss)
if batch_idx % 8 == 0:
optimizer.step()
optimizer.zero_grad()
"""
module, _ = _unwrap_compiled(module)
if not isinstance(module, _FabricModule):
raise TypeError(
"You need to set up the model first before you can call `fabric.no_backward_sync()`:"
" `model = fabric.setup(model, ...)`"
)
if isinstance(self._strategy, (SingleDeviceStrategy, XLAStrategy)):
return nullcontext()
if self._strategy._backward_sync_control is None:
rank_zero_warn(
f"The `{self._strategy.__class__.__name__}` does not support skipping the gradient synchronization."
f" Remove `.no_backward_sync()` from your code or choose a different strategy.",
category=PossibleUserWarning,
)
return nullcontext()
forward_module, _ = _unwrap_compiled(module._forward_module)
return self._strategy._backward_sync_control.no_backward_sync(forward_module, enabled)
def sharded_model(self) -> AbstractContextManager:
r"""Instantiate a model under this context manager to prepare it for model-parallel sharding.
.. deprecated:: This context manager is deprecated in favor of :meth:`init_module`, use it instead.
"""
rank_zero_deprecation("`Fabric.sharded_model()` is deprecated in favor of `Fabric.init_module()`.")
self._validate_launched()
if isinstance(self.strategy, _Sharded):
return self.strategy.module_sharded_context()
return nullcontext()
def init_tensor(self) -> AbstractContextManager:
"""Tensors that you instantiate under this context manager will be created on the device right away and have
the right data type depending on the precision setting in Fabric."""
return self._strategy.tensor_init_context()
def init_module(self, empty_init: Optional[bool] = None) -> AbstractContextManager:
"""Instantiate the model and its parameters under this context manager to reduce peak memory usage.
The parameters get created on the device and with the right data type right away without wasting memory being
allocated unnecessarily.
Args:
empty_init: Whether to initialize the model with empty weights (uninitialized memory).
If ``None``, the strategy will decide. Some strategies may not support all options.
Set this to ``True`` if you are loading a checkpoint into a large model.
"""
self._validate_launched()
return self._strategy.module_init_context(empty_init=empty_init)
def save(
self,
path: Union[str, Path],
state: dict[str, Union[nn.Module, Optimizer, Any]],
filter: Optional[dict[str, Callable[[str, Any], bool]]] = None,
) -> None:
r"""Save checkpoint contents to a file.
How and which processes save gets determined by the `strategy`. For example, the `ddp` strategy
saves checkpoints only on process 0, while the `fsdp` strategy saves files from every rank.
This method must be called on all processes!
Args:
path: A path to where the file(s) should be saved.
state: A dictionary with contents to be saved. If the dict contains modules or optimizers, their
state-dict will be retrieved and converted automatically.
filter: An optional dictionary containing filter callables that return a boolean indicating whether the
given item should be saved (``True``) or filtered out (``False``). Each filter key should match a
state key, where its filter will be applied to the ``state_dict`` generated.
Raises:
TypeError: If filter is not a dictionary or contains non-callable values.
ValueError: If filter keys don't match state keys.
Example::
state = {"model": model, "optimizer": optimizer, "epoch": epoch}
fabric.save("checkpoint.pth", state)
# With filter
def param_filter(name, param):
return "bias" not in name # Save only non-bias parameters
fabric.save("checkpoint.pth", state, filter={"model": param_filter})
"""
if filter is not None:
if not isinstance(filter, dict):
raise TypeError(f"Filter should be a dictionary, given {filter!r}")
if not set(filter).issubset(state):
raise ValueError(
f"The filter keys {filter.keys() - state} are not present in the state keys {set(state)}."
)
for k, v in filter.items():
if not callable(v):
raise TypeError(f"Expected `fabric.save(filter=...)` for key {k!r} to be a callable, given {v!r}")
self._strategy.save_checkpoint(path=path, state=_unwrap_objects(state), filter=filter)
self.barrier()
def load(
self,
path: Union[str, Path],
state: Optional[dict[str, Union[nn.Module, Optimizer, Any]]] = None,
strict: bool = True,
) -> dict[str, Any]:
"""Load a checkpoint from a file and restore the state of objects (modules, optimizers, etc.)
How and which processes load gets determined by the `strategy`.
This method must be called on all processes!
Args:
path: A path to where the file is located.
state: A dictionary of objects whose state will be restored in-place from the checkpoint path.
If no state is given, then the checkpoint will be returned in full.
strict: Whether to enforce that the keys in `state` match the keys in the checkpoint.
Returns:
The remaining items that were not restored into the given state dictionary. If no state dictionary is
given, the full checkpoint will be returned.
Example::
# Load full checkpoint
checkpoint = fabric.load("checkpoint.pth")
# Load into existing objects
state = {"model": model, "optimizer": optimizer}
remainder = fabric.load("checkpoint.pth", state)
epoch = remainder.get("epoch", 0)
"""
unwrapped_state = _unwrap_objects(state)
remainder = self._strategy.load_checkpoint(path=path, state=unwrapped_state, strict=strict)
self.barrier()
if state is not None:
# We need to unwrap objects (see above) but this creates a new dictionary. In-place updates
# (for user metadata) wouldn't show up in the original dict, so we need to copy the data back.
for k in list(unwrapped_state.keys()):
obj, _ = _unwrap_compiled(state[k])
if isinstance(obj, (_FabricModule, _FabricOptimizer, _FabricDataLoader)):
continue
state[k] = unwrapped_state[k]
return remainder
def load_raw(self, path: Union[str, Path], obj: Union[nn.Module, Optimizer], strict: bool = True) -> None:
"""Load the state of a module or optimizer from a single state-dict file.
Use this for loading a raw PyTorch model checkpoint created without Fabric.
This is conceptually equivalent to ``obj.load_state_dict(torch.load(path))``, but is agnostic to the strategy
being used.
Args:
path: A path to where the file is located
obj: A :class:`~torch.nn.Module` or :class:`~torch.optim.Optimizer` instance.
strict: Whether to enforce that the keys in the module's state-dict match the keys in the checkpoint.
Does not apply to optimizers.
"""
obj = _unwrap_objects(obj)
self._strategy.load_checkpoint(path=path, state=obj, strict=strict)
def launch(self, function: Callable[["Fabric"], Any] = _do_nothing, *args: Any, **kwargs: Any) -> Any:
"""Launch and initialize all the processes needed for distributed execution.
Args:
function: Optional function to launch when using a spawn/fork-based strategy, for example, when using the
XLA strategy (``accelerator="tpu"``). The function must accept at least one argument, to which
the Fabric object itself will be passed. If not provided, only process initialization will be performed.
*args: Optional positional arguments to be passed to the function.
**kwargs: Optional keyword arguments to be passed to the function.
Returns:
Returns the output of the function that ran in worker process with rank 0.
Raises:
RuntimeError: If called when script was launched through the CLI.
TypeError: If function is provided but not callable, or if function doesn't accept required arguments.
Note:
The ``launch()`` method should only be used if you intend to specify accelerator, devices, and so on in
the code (programmatically). If you are launching with the Lightning CLI, ``fabric run ...``, remove
``launch()`` from your code.
The ``launch()`` is a no-op when called multiple times and no function is passed in.
Example::
def train_function(fabric):
model, optimizer = fabric.setup(model, optimizer)
# ... training code ...
fabric = Fabric(accelerator="tpu", devices=8)
fabric.launch(train_function)
"""
if _is_using_cli():
raise RuntimeError(
"This script was launched through the CLI, and processes have already been created. Calling "
" `.launch()` again is not allowed."
)
if function is not _do_nothing:
if not callable(function):
raise TypeError(
f"`Fabric.launch(...)` needs to be a callable, but got {function}."
" HINT: do `.launch(your_fn)` instead of `.launch(your_fn())`"
)
if not inspect.signature(function).parameters:
raise TypeError(
f"`Fabric.launch(function={function})` needs to take at least one argument. The launcher will"
" pass in the `Fabric` object so you can use it inside the function."
)
elif isinstance(self.strategy.launcher, (_MultiProcessingLauncher, _XLALauncher)):
raise TypeError(
f"To spawn processes with the `{type(self.strategy).__name__}` strategy, `.launch()` needs to be called"
" with a function that contains the code to launch in processes."
)
return self._wrap_and_launch(function, self, *args, **kwargs)
def _filter_kwargs_for_callback(self, method: Callable, kwargs: dict[str, Any]) -> dict[str, Any]:
"""Filter keyword arguments to only include those that match the callback method's signature.
Args:
method: The callback method to inspect
kwargs: The keyword arguments to filter
Returns:
A filtered dictionary of keyword arguments that match the method's signature
"""
try:
sig = inspect.signature(method)
except (ValueError, TypeError):
# If we can't inspect the signature, pass all kwargs to maintain backward compatibility
return kwargs
filtered_kwargs = {}
for name, param in sig.parameters.items():
# If the method accepts **kwargs, pass all original kwargs directly
if param.kind == inspect.Parameter.VAR_KEYWORD:
return kwargs
# If the parameter exists in the incoming kwargs, add it to filtered_kwargs
if name in kwargs:
filtered_kwargs[name] = kwargs[name]
return filtered_kwargs
def call(self, hook_name: str, *args: Any, **kwargs: Any) -> None:
r"""Trigger the callback methods with the given name and arguments.
Not all objects registered via ``Fabric(callbacks=...)`` must implement a method with the given name. The ones
that have a matching method name will get called.
Args:
hook_name: The name of the callback method.
*args: Optional positional arguments that get passed down to the callback method.
**kwargs: Optional keyword arguments that get passed down to the callback method. Keyword arguments
that are not present in the callback's signature will be filtered out automatically, allowing
callbacks to have different signatures for the same hook.
Example::
class MyCallback:
def on_train_epoch_end(self, results):
...
fabric = Fabric(callbacks=[MyCallback()])
fabric.call("on_train_epoch_end", results={...})
"""
for callback in self._callbacks:
method = getattr(callback, hook_name, None)
if method is None:
continue
if not callable(method):
rank_zero_warn(
f"Skipping the callback `{type(callback).__name__}.{hook_name}` because it is not callable."
)
continue
filtered_kwargs = self._filter_kwargs_for_callback(method, kwargs)
method(*args, **filtered_kwargs)
def log(self, name: str, value: Any, step: Optional[int] = None) -> None:
"""Log a scalar to all loggers that were added to Fabric.
Args:
name: The name of the metric to log.
value: The metric value to collect. If the value is a :class:`torch.Tensor`, it gets detached from the
graph automatically.
step: Optional step number. Most Logger implementations auto-increment the step value by one with every
log call. You can specify your own value here.
"""
self.log_dict(metrics={name: value}, step=step)
def log_dict(self, metrics: Mapping[str, Any], step: Optional[int] = None) -> None:
"""Log multiple scalars at once to all loggers that were added to Fabric.
Args:
metrics: A dictionary where the key is the name of the metric and the value the scalar to be logged.
Any :class:`torch.Tensor` in the dictionary get detached from the graph automatically.
step: Optional step number. Most Logger implementations auto-increment this value by one with every
log call. You can specify your own value here.
"""
metrics = convert_tensors_to_scalars(metrics)
for logger in self._loggers:
logger.log_metrics(metrics=metrics, step=step)
@staticmethod
def seed_everything(seed: Optional[int] = None, workers: Optional[bool] = None, verbose: bool = True) -> int:
r"""Helper function to seed everything without explicitly importing Lightning.
See :func:`~lightning.fabric.utilities.seed.seed_everything` for more details.
"""
if workers is None:
# Lightning sets `workers=False` by default to avoid breaking reproducibility, but since this is a new
# release, we can afford to do it.
workers = True
return seed_everything(seed=seed, workers=workers, verbose=verbose)
def _wrap_and_launch(self, to_run: Callable, *args: Any, **kwargs: Any) -> Any:
self._launched = True
to_run = partial(self._wrap_with_setup, to_run)
if (launcher := self._strategy.launcher) is not None:
return launcher.launch(to_run, *args, **kwargs)
return to_run(*args, **kwargs)
def _wrap_with_setup(self, to_run: Callable, *args: Any, **kwargs: Any) -> Any:
self._strategy.setup_environment()
with _replace_dunder_methods(DataLoader, "dataset"), _replace_dunder_methods(BatchSampler):
return to_run(*args, **kwargs)
def _move_model_to_device(self, model: nn.Module, optimizers: list[Optimizer]) -> nn.Module:
try:
initial_name, initial_param = next(model.named_parameters())
except StopIteration:
pass
else:
initial_device = initial_param.device
count = 0
first_name, first_device = None, None
for name, param in model.named_parameters():
if param.device != initial_device:
count += 1
if first_name is None:
first_name = name
first_device = param.device
if count > 0:
rank_zero_warn(
f"The model passed to `Fabric.setup()` has {count} parameters on different devices (for example"
f" {first_name!r} on {first_device} and {initial_name!r} on {initial_device}). Since"
" `move_to_device=True`, all parameters will be moved to the new device. If this is not"
" desired, set `Fabric.setup(..., move_to_device=False)`.",
category=PossibleUserWarning,
)
if isinstance(self._strategy, XLAStrategy):
# When the user creates the optimizer, they reference the parameters on the CPU.
# However, when running with TPU the parameters get copied and the reference in the optimizer
# remains invalid. We need to update the references to point to the parameter tensors on the device.
params_before_move = dict(model.named_parameters())
model = self.to_device(model)
# XLA makes a copy on the parameters, so the device is not the same before and after to_device.
params_on_device = dict(model.named_parameters())
mapping = {param: params_on_device[name] for name, param in params_before_move.items()}
for optimizer in optimizers:
for param_group in optimizer.param_groups:
param_group["params"] = [mapping.get(p, p) for p in param_group["params"]]
else:
model = self.to_device(model)
return model
def _requires_distributed_sampler(self, dataloader: DataLoader) -> bool:
return (
getattr(self.strategy, "distributed_sampler_kwargs", None) is not None
and not isinstance(dataloader.sampler, DistributedSampler)
and not has_iterable_dataset(dataloader)
)
@staticmethod
def _get_distributed_sampler(dataloader: DataLoader, **kwargs: Any) -> DistributedSampler:
kwargs.setdefault("shuffle", isinstance(dataloader.sampler, RandomSampler))
kwargs.setdefault("seed", int(os.getenv("PL_GLOBAL_SEED", 0)))
if isinstance(dataloader.sampler, (RandomSampler, SequentialSampler)):
return DistributedSampler(dataloader.dataset, **kwargs)
return DistributedSamplerWrapper(dataloader.sampler, **kwargs)
def _prepare_run_method(self) -> None:
if is_overridden("run", self, Fabric) and _is_using_cli():
raise TypeError(
"Overriding `Fabric.run()` and launching from the CLI is not allowed. Run the script normally,"
" or change your code to directly call `fabric = Fabric(...); fabric.setup(...)` etc."
)
# wrap the run method, so we can inject setup logic or spawn processes for the user
setattr(self, "run", partial(self._wrap_and_launch, self.run))
def _validate_launched(self) -> None:
if not self._launched and not isinstance(self._strategy, (SingleDeviceStrategy, DataParallelStrategy)):
raise RuntimeError(
"To use Fabric with more than one device, you must call `.launch()` or use the CLI:"
" `fabric run --help`."
)
def _validate_setup(self, module: nn.Module, optimizers: Sequence[Optimizer]) -> None:
self._validate_launched()
if isinstance(module, _FabricModule):
raise ValueError("A model should be passed only once to the `setup` method.")
if any(isinstance(opt, _FabricOptimizer) for opt in optimizers):
raise ValueError("An optimizer should be passed only once to the `setup` method.")
if isinstance(self._strategy, FSDPStrategy) and any(
_has_meta_device_parameters_or_buffers(optimizer) for optimizer in optimizers
):
raise RuntimeError(
"The optimizer has references to the model's meta-device parameters. Materializing them is"
" is currently not supported unless you to set up the model and optimizer(s) separately."
" Create and set up the model first through `model = fabric.setup_module(model)`. Then create the"
" optimizer and set it up: `optimizer = fabric.setup_optimizers(optimizer)`."
)
def _validate_setup_module(self, module: nn.Module) -> None:
self._validate_launched()
if isinstance(module, _FabricModule):
raise ValueError("A model should be passed only once to the `setup_module` method.")
def _validate_setup_optimizers(self, optimizers: Sequence[Optimizer]) -> None:
self._validate_launched()
if isinstance(self._strategy, (DeepSpeedStrategy, XLAStrategy)):
raise RuntimeError(
f"The `{type(self._strategy).__name__}` requires the model and optimizer(s) to be set up jointly"
" through `.setup(model, optimizer, ...)`."
)
if not optimizers:
raise ValueError("`setup_optimizers` requires at least one optimizer as input.")
if any(isinstance(opt, _FabricOptimizer) for opt in optimizers):
raise ValueError("An optimizer should be passed only once to the `setup_optimizers` method.")
if any(_has_meta_device_parameters_or_buffers(optimizer) for optimizer in optimizers):
raise RuntimeError(
"The optimizer has references to the model's meta-device parameters. Materializing them is"
" is currently not supported. Create the optimizer after setting up the model, then call"
" `fabric.setup_optimizers(optimizer)`."
)
def _validate_setup_dataloaders(self, dataloaders: Sequence[DataLoader]) -> None:
self._validate_launched()
if not dataloaders:
raise ValueError("`setup_dataloaders` requires at least one dataloader as input.")
if any(isinstance(dl, _FabricDataLoader) for dl in dataloaders):
raise ValueError("A dataloader should be passed only once to the `setup_dataloaders` method.")
if any(not isinstance(dl, DataLoader) for dl in dataloaders):
raise TypeError("Only PyTorch DataLoader are currently supported in `setup_dataloaders`.")
@staticmethod
def _configure_callbacks(callbacks: Optional[Union[list[Any], Any]]) -> list[Any]:
callbacks = callbacks if callbacks is not None else []
callbacks = callbacks if isinstance(callbacks, list) else [callbacks]
callbacks.extend(_load_external_callbacks("lightning.fabric.callbacks_factory"))
return callbacks
| Fabric |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-gcs/source_gcs/config.py | {
"start": 348,
"end": 1059
} | class ____(BaseModel):
class Config(OneOfOptionConfig):
title = "Authenticate via Google (OAuth)"
auth_type: Literal["Client"] = Field("Client", const=True)
client_id: str = Field(
title="Client ID",
description="Client ID",
airbyte_secret=True,
)
client_secret: str = Field(
title="Client Secret",
description="Client Secret",
airbyte_secret=True,
)
access_token: str = Field(
title="Access Token",
description="Access Token",
airbyte_secret=True,
)
refresh_token: str = Field(
title="Access Token",
description="Access Token",
airbyte_secret=True,
)
| OAuthCredentials |
python | bokeh__bokeh | tests/unit/bokeh/document/test_events__document.py | {
"start": 5170,
"end": 8391
} | class ____:
def test_init_defaults(self) -> None:
doc = Document()
e = bde.ModelChangedEvent(doc, "model", "attr", "new")
assert e.document == doc
assert e.setter is None
assert e.callback_invoker is None
assert e.model == "model"
assert e.attr == "attr"
assert e.new == "new"
assert e.callback_invoker is None
def test_kind(self) -> None:
assert bde.ModelChangedEvent.kind == "ModelChanged"
# TODO (bev) tests for generate
def test_dispatch(self) -> None:
doc = Document()
e = bde.ModelChangedEvent(doc, "model", "attr", "new")
e.dispatch(FakeEmptyDispatcher())
d = FakeFullDispatcher()
e.dispatch(d)
assert d.called == ['_document_changed', '_document_patched', '_document_model_changed']
def test_combine_ignores_except_title_changd_event(self) -> None:
doc = Document()
e = bde.ModelChangedEvent(doc, "model", "attr", "new")
e2 = bde.DocumentPatchedEvent(doc, "setter", "invoker")
assert e.combine(e2) is False
def test_combine_ignores_different_setter(self) -> None:
doc = Document()
e = bde.ModelChangedEvent(doc, "model", "attr", "new", "setter")
e2 = bde.ModelChangedEvent(doc, "model", "attr", "new2", "setter2")
assert e.combine(e2) is False
def test_combine_ignores_different_doc(self) -> None:
doc = Document()
e = bde.ModelChangedEvent(doc, "model", "attr", "new")
e2 = bde.ModelChangedEvent("doc2", "model", "attr", "new2")
assert e.combine(e2) is False
def test_combine_ignores_different_model(self) -> None:
doc = Document()
e = bde.ModelChangedEvent(doc, "model", "attr", "new")
e2 = bde.ModelChangedEvent(doc, "model2", "attr", "new2")
assert e.combine(e2) is False
def test_combine_ignores_different_attr(self) -> None:
doc = Document()
e = bde.ModelChangedEvent(doc, "model", "attr", "new")
e2 = bde.ModelChangedEvent(doc, "model", "attr2", "new2")
assert e.combine(e2) is False
def test_combine_with_matching_model_changed_event(self) -> None:
doc = Document()
e = bde.ModelChangedEvent(doc, "model", "attr", "new", callback_invoker="invoker")
e2 = bde.ModelChangedEvent(doc, "model", "attr", "new2", callback_invoker="invoker2")
assert e.combine(e2) is True
assert e.new == "new2"
assert e.callback_invoker == "invoker2"
@patch("bokeh.document.events.ColumnsStreamedEvent.combine")
def test_combine_with_defers(self, mock_combine: MagicMock) -> None:
mock_combine.return_value = False
doc = Document()
m = SomeModel()
e = bde.ColumnsStreamedEvent(doc, m, "data", dict(foo=1), 200, "setter", "invoker")
e2 = bde.ColumnsStreamedEvent(doc, m, "data", dict(foo=2), 300, "setter", "invoker")
assert e.combine(e2) is False
assert mock_combine.call_count == 1
assert mock_combine.call_args[0] == (e2,)
assert mock_combine.call_args[1] == {}
# ColumnDataChangedEvent ------------------------------------------------------
| TestModelChangedEvent |
python | sphinx-doc__sphinx | sphinx/ext/autodoc/_legacy_class_based/_sentinels.py | {
"start": 335,
"end": 627
} | class ____:
"""A special value for :exclude-members: that never matches to any member."""
def __contains__(self, item: Any) -> bool:
return False
ALL = _All()
EMPTY = _Empty()
UNINITIALIZED_ATTR = object()
INSTANCEATTR = object()
SLOTSATTR = object()
SUPPRESS = object()
| _Empty |
python | python__mypy | mypy/nodes.py | {
"start": 40030,
"end": 46975
} | class ____(SymbolNode):
"""A variable.
It can refer to global/local variable or a data attribute.
"""
__slots__ = (
"_name",
"_fullname",
"info",
"type",
"setter_type",
"final_value",
"is_self",
"is_cls",
"is_ready",
"is_inferred",
"is_initialized_in_class",
"is_staticmethod",
"is_classmethod",
"is_property",
"is_settable_property",
"is_classvar",
"is_abstract_var",
"is_final",
"is_index_var",
"final_unset_in_class",
"final_set_in_init",
"is_suppressed_import",
"explicit_self_type",
"from_module_getattr",
"has_explicit_value",
"allow_incompatible_override",
"invalid_partial_type",
)
__match_args__ = ("name", "type", "final_value")
def __init__(self, name: str, type: mypy.types.Type | None = None) -> None:
super().__init__()
self._name = name # Name without module prefix
# TODO: Should be Optional[str]
self._fullname = "" # Name with module prefix
# TODO: Should be Optional[TypeInfo]
self.info = VAR_NO_INFO
self.type: mypy.types.Type | None = type # Declared or inferred type, or None
# The setter type for settable properties.
self.setter_type: mypy.types.CallableType | None = None
# Is this the first argument to an ordinary method (usually "self")?
self.is_self = False
# Is this the first argument to a classmethod (typically "cls")?
self.is_cls = False
self.is_ready = True # If inferred, is the inferred type available?
self.is_inferred = self.type is None
# Is this initialized explicitly to a non-None value in class body?
self.is_initialized_in_class = False
self.is_staticmethod = False
self.is_classmethod = False
self.is_property = False
self.is_settable_property = False
self.is_classvar = False
self.is_abstract_var = False
self.is_index_var = False
# Set to true when this variable refers to a module we were unable to
# parse for some reason (eg a silenced module)
self.is_suppressed_import = False
# Was this "variable" (rather a constant) defined as Final[...]?
self.is_final = False
# If constant value is a simple literal,
# store the literal value (unboxed) for the benefit of
# tools like mypyc.
self.final_value: int | float | complex | bool | str | None = None
# Where the value was set (only for class attributes)
self.final_unset_in_class = False
self.final_set_in_init = False
# This is True for a variable that was declared on self with an explicit type:
# class C:
# def __init__(self) -> None:
# self.x: int
# This case is important because this defines a new Var, even if there is one
# present in a superclass (without explicit type this doesn't create a new Var).
# See SemanticAnalyzer.analyze_member_lvalue() for details.
self.explicit_self_type = False
# If True, this is an implicit Var created due to module-level __getattr__.
self.from_module_getattr = False
# Var can be created with an explicit value `a = 1` or without one `a: int`,
# we need a way to tell which one is which.
self.has_explicit_value = False
# If True, subclasses can override this with an incompatible type.
self.allow_incompatible_override = False
# If True, this means we didn't manage to infer full type and fall back to
# something like list[Any]. We may decide to not use such types as context.
self.invalid_partial_type = False
@property
def name(self) -> str:
return self._name
@property
def fullname(self) -> str:
return self._fullname
def __repr__(self) -> str:
name = self.fullname or self.name
return f"<Var {name!r} at {hex(id(self))}>"
def accept(self, visitor: NodeVisitor[T]) -> T:
return visitor.visit_var(self)
def serialize(self) -> JsonDict:
# TODO: Leave default values out?
# NOTE: Sometimes self.is_ready is False here, but we don't care.
data: JsonDict = {
".class": "Var",
"name": self._name,
"fullname": self._fullname,
"type": None if self.type is None else self.type.serialize(),
"setter_type": None if self.setter_type is None else self.setter_type.serialize(),
"flags": get_flags(self, VAR_FLAGS),
}
if self.final_value is not None:
data["final_value"] = self.final_value
return data
@classmethod
def deserialize(cls, data: JsonDict) -> Var:
assert data[".class"] == "Var"
name = data["name"]
type = None if data["type"] is None else mypy.types.deserialize_type(data["type"])
setter_type = (
None
if data["setter_type"] is None
else mypy.types.deserialize_type(data["setter_type"])
)
v = Var(name, type)
assert (
setter_type is None
or isinstance(setter_type, mypy.types.ProperType)
and isinstance(setter_type, mypy.types.CallableType)
)
v.setter_type = setter_type
v.is_ready = False # Override True default set in __init__
v._fullname = data["fullname"]
set_flags(v, data["flags"])
v.final_value = data.get("final_value")
return v
def write(self, data: WriteBuffer) -> None:
write_tag(data, VAR)
write_str(data, self._name)
mypy.types.write_type_opt(data, self.type)
mypy.types.write_type_opt(data, self.setter_type)
write_str(data, self._fullname)
write_flags(data, self, VAR_FLAGS)
write_literal(data, self.final_value)
write_tag(data, END_TAG)
@classmethod
def read(cls, data: ReadBuffer) -> Var:
name = read_str(data)
typ = mypy.types.read_type_opt(data)
v = Var(name, typ)
setter_type: mypy.types.CallableType | None = None
tag = read_tag(data)
if tag != LITERAL_NONE:
assert tag == mypy.types.CALLABLE_TYPE
setter_type = mypy.types.CallableType.read(data)
v.setter_type = setter_type
v.is_ready = False # Override True default set in __init__
v._fullname = read_str(data)
read_flags(data, v, VAR_FLAGS)
tag = read_tag(data)
if tag == LITERAL_COMPLEX:
v.final_value = complex(read_float_bare(data), read_float_bare(data))
elif tag != LITERAL_NONE:
v.final_value = read_literal(data, tag)
assert read_tag(data) == END_TAG
return v
| Var |
python | dagster-io__dagster | python_modules/libraries/dagster-shared/dagster_shared/record/__init__.py | {
"start": 13176,
"end": 16822
} | class ____:
"""Marker class to be used when overriding new in @record_custom classes to prevent
type errors when calling super().__new__.
"""
if TYPE_CHECKING:
def __new__(cls, **kwargs) -> Self: ...
# let type checker know these objects are sortable (by way of being a namedtuple)
def __lt__(self, other) -> bool: ...
def has_generated_new(obj) -> bool:
return obj.__new__.__name__ in (_DEFAULTS_NEW, _CHECKED_NEW)
def get_record_annotations(obj) -> Mapping[str, type]:
check.invariant(is_record(obj), "Only works for @record decorated classes")
return getattr(obj, _RECORD_ANNOTATIONS_FIELD)
def get_record_defaults(obj) -> Mapping[str, Any]:
check.invariant(is_record(obj), "Only works for @record decorated classes")
return getattr(obj, _RECORD_DEFAULTS_FIELD)
def get_original_class(obj):
check.invariant(is_record(obj), "Only works for @record decorated classes")
return getattr(obj, _ORIGINAL_CLASS_FIELD)
def as_dict(obj) -> Mapping[str, Any]:
"""Creates a dict representation of the record based on the fields."""
check.invariant(is_record(obj), "Only works for @record decorated classes")
return {key: value for key, value in zip(obj._fields, obj.__hidden_iter__())}
def as_dict_for_new(obj) -> Mapping[str, Any]:
"""Creates a dict representation of the record with field_to_new_mapping applied."""
check.invariant(is_record(obj), "Only works for @record decorated classes")
remap = getattr(obj, _REMAPPING_FIELD)
from_obj = {}
for k, v in as_dict(obj).items():
if k in remap:
from_obj[remap[k]] = v
else:
from_obj[k] = v
return from_obj
def copy(record: TVal, **kwargs) -> TVal:
"""Create a new instance of this record type using its constructor with
the original records values overrode with new values specified as key args.
"""
return record.__class__(
**{
**as_dict_for_new(record),
**kwargs,
}
)
def replace(obj: TVal, **kwargs) -> TVal:
"""Create a new instance of this record type using the record constructor directly,
(bypassing any custom __new__ impl) with the original records values overrode with
new values specified by keyword args.
This emulates the behavior of namedtuple _replace.
Example:
@record
class Config:
timeout: int
retries: int
config = Config(timeout=30, retries=3)
# Update single field
faster_config = replace(config, timeout=10)
# Update multiple fields
production_config = replace(config, timeout=60, retries=5)
# Add items to lists (use spread operator to avoid mutation)
@record
class Results:
errors: list[str]
warnings: list[str]
results = Results(errors=[], warnings=["deprecated API"])
# Add new error
updated_results = replace(results, errors=[*results.errors, "network timeout"])
# Add multiple items
final_results = replace(
updated_results,
errors=[*updated_results.errors, "connection failed"],
warnings=[*updated_results.warnings, "slow query"]
)
"""
check.invariant(is_record(obj))
cls = obj.__class__
# if we have runtime type checking, go through that to vet new field values
if hasattr(cls, _CHECKED_NEW):
target = _CHECKED_NEW
else:
target = _NAMED_TUPLE_BASE_NEW_FIELD
return getattr(cls, target)(
obj.__class__,
**{**as_dict(obj), **kwargs},
)
| IHaveNew |
python | django__django | django/core/serializers/base.py | {
"start": 7162,
"end": 12645
} | class ____:
"""
A deserialized model.
Basically a container for holding the pre-saved deserialized data along
with the many-to-many data saved with the object.
Call ``save()`` to save the object (with the many-to-many data) to the
database; call ``save(save_m2m=False)`` to save just the object fields
(and not touch the many-to-many stuff.)
"""
def __init__(self, obj, m2m_data=None, deferred_fields=None):
self.object = obj
self.m2m_data = m2m_data
self.deferred_fields = deferred_fields
def __repr__(self):
return "<%s: %s(pk=%s)>" % (
self.__class__.__name__,
self.object._meta.label,
self.object.pk,
)
def save(self, save_m2m=True, using=None, **kwargs):
# Call save on the Model baseclass directly. This bypasses any
# model-defined save. The save is also forced to be raw.
# raw=True is passed to any pre/post_save signals.
models.Model.save_base(self.object, using=using, raw=True, **kwargs)
if self.m2m_data and save_m2m:
for accessor_name, object_list in self.m2m_data.items():
getattr(self.object, accessor_name).set(object_list)
# prevent a second (possibly accidental) call to save() from saving
# the m2m data twice.
self.m2m_data = None
def save_deferred_fields(self, using=None):
self.m2m_data = {}
for field, field_value in self.deferred_fields.items():
opts = self.object._meta
label = opts.app_label + "." + opts.model_name
if isinstance(field.remote_field, models.ManyToManyRel):
try:
values = deserialize_m2m_values(
field, field_value, using, handle_forward_references=False
)
except M2MDeserializationError as e:
raise DeserializationError.WithData(
e.original_exc, label, self.object.pk, e.pk
)
self.m2m_data[field.name] = values
elif isinstance(field.remote_field, models.ManyToOneRel):
try:
value = deserialize_fk_value(
field, field_value, using, handle_forward_references=False
)
except Exception as e:
raise DeserializationError.WithData(
e, label, self.object.pk, field_value
)
setattr(self.object, field.attname, value)
self.save()
def build_instance(Model, data, db):
"""
Build a model instance.
If the model instance doesn't have a primary key and the model supports
natural keys, try to retrieve it from the database.
"""
default_manager = Model._meta.default_manager
pk = data.get(Model._meta.pk.attname)
if (
pk is None
and hasattr(default_manager, "get_by_natural_key")
and hasattr(Model, "natural_key")
):
obj = Model(**data)
obj._state.db = db
natural_key = obj.natural_key()
try:
data[Model._meta.pk.attname] = Model._meta.pk.to_python(
default_manager.db_manager(db).get_by_natural_key(*natural_key).pk
)
except Model.DoesNotExist:
pass
return Model(**data)
def deserialize_m2m_values(field, field_value, using, handle_forward_references):
model = field.remote_field.model
if hasattr(model._default_manager, "get_by_natural_key"):
def m2m_convert(value):
if hasattr(value, "__iter__") and not isinstance(value, str):
return (
model._default_manager.db_manager(using)
.get_by_natural_key(*value)
.pk
)
else:
return model._meta.pk.to_python(value)
else:
def m2m_convert(v):
return model._meta.pk.to_python(v)
try:
pks_iter = iter(field_value)
except TypeError as e:
raise M2MDeserializationError(e, field_value)
try:
values = []
for pk in pks_iter:
values.append(m2m_convert(pk))
return values
except Exception as e:
if isinstance(e, ObjectDoesNotExist) and handle_forward_references:
return DEFER_FIELD
else:
raise M2MDeserializationError(e, pk)
def deserialize_fk_value(field, field_value, using, handle_forward_references):
if field_value is None:
return None
model = field.remote_field.model
default_manager = model._default_manager
field_name = field.remote_field.field_name
if (
hasattr(default_manager, "get_by_natural_key")
and hasattr(field_value, "__iter__")
and not isinstance(field_value, str)
):
try:
obj = default_manager.db_manager(using).get_by_natural_key(*field_value)
except ObjectDoesNotExist:
if handle_forward_references:
return DEFER_FIELD
else:
raise
value = getattr(obj, field_name)
# If this is a natural foreign key to an object that has a FK/O2O as
# the foreign key, use the FK value.
if model._meta.pk.remote_field:
value = value.pk
return value
return model._meta.get_field(field_name).to_python(field_value)
| DeserializedObject |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/hooks/cloud_batch.py | {
"start": 6740,
"end": 8287
} | class ____(GoogleBaseHook):
"""
Async hook for the Google Cloud Batch service.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account.
"""
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
):
self._client: BatchServiceAsyncClient | None = None
super().__init__(gcp_conn_id=gcp_conn_id, impersonation_chain=impersonation_chain, **kwargs)
def get_conn(self):
if self._client is None:
self._client = BatchServiceAsyncClient(
credentials=self.get_credentials(), client_info=CLIENT_INFO
)
return self._client
async def get_batch_job(
self,
job_name: str,
) -> Job:
client = self.get_conn()
return await client.get_job(name=f"{job_name}")
| CloudBatchAsyncHook |
python | justquick__django-activity-stream | actstream/drf/serializers.py | {
"start": 3390,
"end": 3635
} | class ____(DEFAULT_SERIALIZER):
"""
Serializer for actstream.Follow models in the "following" activity feeds
"""
follow_object = get_grf()
class Meta:
model = Follow
fields = ['follow_object']
| FollowingSerializer |
python | pytorch__pytorch | torch/_higher_order_ops/wrap.py | {
"start": 6859,
"end": 14663
} | class ____(HigherOrderOperator):
"""
This operator is supposed to be used only with torch.compile stack. This
accepts a Fx graph module which needs to be checkpointed. This operator adds
"recomputable" tag to the nodes of the Fx graph that should be recomputed.
The goal is to:
1. Avoid using Dynamo to trace through saved tensor hooks.
2. For selective checkpointing case, let AOTAutograd trace through
saved tensor hooks but has special logic with TorchDispatchMode to override
the usual saved_tensor_hooks fn logic in order to tag the nodes.
3. Rely on the partitioners to actually duplicate the nodes.
This sits well in the torch.compile stack, because by the time graph
reaches partitioner, inductor has already run its functionalization of rng
ops (by setting fixed seed for each random op, see `replace_random_passes`).
Therefore, the duplication of nodes, by design, respects the rng states in
the forward and recomputed forward in backward.
"""
def __init__(self) -> None:
super().__init__("tag_activation_checkpoint", cacheable=False)
@staticmethod
def divide_kwargs(kwargs):
"""
checkpoint fn can have mixed kwargs between checkpointed fn and
checkpoint fn itself. For example
>> def gn(x, y, z=None):
>> a = torch.matmul(x, y)
>> if z is not None:
>> return torch.matmul(a, z)
>> return a
>> def fn(x, y, z):
>> return torch.cos(checkpoint(gn, x, y, use_reentrant=False, z=z))
In the above case, z belongs to checkpointed function gn, but
use_reentrant belongs to the checkpoint function. This function splits
the kwargs into checkpoint_kwargs and gmod_kwargs (or
checkpointed_fn_kwargs).
We do sorting to ensure same graph from run to run for better
debuggability. It is not required for correctness.
"""
from torch.utils.checkpoint import checkpoint
ckpt_signature = inspect.signature(checkpoint)
checkpoint_keys = set()
for name in ckpt_signature.parameters:
if name in ("function", "args", "kwargs"):
continue
checkpoint_keys.add(name)
# `preserve_rng_state` is not a regular kwarg
checkpoint_keys.add("preserve_rng_state")
checkpoint_kwargs = {
name: kwargs[name] for name in kwargs if name in checkpoint_keys
}
gmod_kwargs = {
name: kwargs[name] for name in kwargs if name not in checkpoint_keys
}
return checkpoint_kwargs, gmod_kwargs
@staticmethod
def tag_nodes(gmod, is_sac):
from torch.utils.checkpoint import CheckpointPolicy
unique_graph_id = next(uid)
for node in gmod.graph.nodes:
if node.op in ("call_function", "call_method", "call_module"):
node.meta["ac_graph_id"] = unique_graph_id
if is_sac:
# For selective checkpointing, we will populate this tag later in _CachingTorchDispatchMode.
node.meta["recompute"] = None
else:
# Under vanilla activation checkpointing, all nodes should be recomputed.
node.meta["recompute"] = CheckpointPolicy.PREFER_RECOMPUTE
return gmod
def __call__(self, gmod, *args, **kwargs):
dispatch_key_set = torch._ops._compute_keyset(
args, kwargs, self.non_fallthrough_keys
)
dispatch_key = dispatch_key_set.highestPriorityTypeId()
if dispatch_key == torch._C.DispatchKey.PreDispatch:
return super().__call__(gmod, *args, **kwargs)
return tag_activation_checkpoint_impl(gmod, *args, **kwargs)
tag_activation_checkpoint = TagActivationCheckpoint()
def tag_activation_checkpoint_impl(gmod, *args, **kwargs):
import torch.fx.traceback as fx_traceback
from torch.fx import Interpreter
if "_checkpoint_context_fn" in gmod.meta:
warning_once(
log,
"""
Detected that context_fn is passed to torch.utils.checkpoint under torch.compile.
Please make sure the checkpointed region does not contain in-place ops (e.g. torch.relu_).
""",
)
# use_reentrant is set to False because this op is going to be traced.
# And we ensure that AOT Autograd traces through the non reentrant
# version of checkpointing.
kwargs["use_reentrant"] = False
# preserve_rng_state is set to False because we want to prevent AOTAutograd from tracing through
# `torch.random.fork_rng` op (which is not supported yet under CUDA).
# This doesn't mean that we don't preserve RNG state. Instead, we will always preserve RNG state
# regardless of this flag (by doing RNG functionalization via `replace_random_passes` in Inductor
# instead of in AOTAutograd).
kwargs["preserve_rng_state"] = False
kwargs["context_fn"] = gmod.meta["_checkpoint_context_fn"]
# We first tag all nodes as "recompute" in this graph, and then we undo the "recompute" tag
# for specific nodes in _CachingTorchDispatchMode in torch/utils/checkpoint.py.
gmod = TagActivationCheckpoint.tag_nodes(gmod, is_sac=True)
# Using interpreter allows preservation of metadata through torch.compile stack.
with fx_traceback.preserve_node_meta():
from torch.utils.checkpoint import checkpoint
return checkpoint(Interpreter(gmod).run, *args, **kwargs)
else:
gmod = TagActivationCheckpoint.tag_nodes(gmod, is_sac=False)
# Using interpreter allows preservation of metadata through torch.compile stack.
# TODO: We want to use the same `checkpoint(Interpreter(gmod).run, *args, **kwargs)` here
# as the `context_fn != None` case, but that depends on in-place op support in TorchDispatchMode + torch.compile.
# (for details on in-place op issue, run `test_compile_selective_checkpoint_inplace_op` unit test)
with fx_traceback.preserve_node_meta():
return Interpreter(gmod).run(*args)
@tag_activation_checkpoint.py_impl(ProxyTorchDispatchMode)
def proxy_mode_key(
proxy_mode: ProxyTorchDispatchMode,
gmod: GraphModule,
*args: Any,
**kwargs: Any,
) -> tuple[torch.Tensor]:
import torch.fx.traceback as fx_traceback
from torch.fx import Interpreter
assert proxy_mode.pre_dispatch, (
"post-dispatch mode should have inlined in the Autograd key"
)
example_out = tag_activation_checkpoint(gmod, *args, **kwargs)
proxy_args = pytree.tree_map(proxy_mode.tracer.unwrap_proxy, args) # type: ignore[union-attr]
proxy_kwargs = pytree.tree_map(proxy_mode.tracer.unwrap_proxy, kwargs) # type: ignore[union-attr]
qualname = proxy_mode.tracer.get_fresh_qualname("wrap_body") # type: ignore[union-attr]
# TODO (tmanlaibaatar) don't we need flat_apply here??
# Dynamo already traced the gmod body without kwargs
flat_args, _ = pytree.tree_flatten(args)
with fx_traceback.preserve_node_meta():
gmod_aten = reenter_make_fx(Interpreter(gmod).run)(*flat_args)
gmod_aten.meta["_checkpoint_context_fn"] = gmod.meta["_checkpoint_context_fn"]
proxy_mode.tracer.root.register_module(qualname, gmod_aten) # type: ignore[union-attr]
proxy_gmod = proxy_mode.tracer.unwrap_proxy(gmod_aten) # type: ignore[union-attr, call-overload]
out_proxy = proxy_mode.tracer.create_proxy(
"call_function",
tag_activation_checkpoint,
(proxy_gmod, *proxy_args),
proxy_kwargs,
)
return track_tensor_tree(
example_out, out_proxy, constant=None, tracer=proxy_mode.tracer
)
| TagActivationCheckpoint |
python | tensorflow__tensorflow | tensorflow/python/autograph/tests/call_to_named_tuple_test.py | {
"start": 1032,
"end": 1227
} | class ____(collections.namedtuple('TestNamedTuple', ('a',))):
def foo(self):
return self.a + 1
def namedtuple_subclass(x):
nt = NamedTupleSubclass(x)
return nt.foo()
| NamedTupleSubclass |
python | pypa__warehouse | tests/unit/legacy/api/xmlrpc/test_cache.py | {
"start": 945,
"end": 2598
} | class ____:
def test_redis_cache(self, monkeypatch):
strict_redis_obj = pretend.stub()
strict_redis_cls = pretend.stub(
from_url=pretend.call_recorder(lambda url, db=None: strict_redis_obj)
)
monkeypatch.setattr(redis, "StrictRedis", strict_redis_cls)
redis_lru_obj = pretend.stub(
fetch=pretend.call_recorder(
lambda func, args, kwargs, key, tag, expires: func(*args, **kwargs)
),
purge=pretend.call_recorder(lambda tag: None),
)
redis_lru_cls = pretend.call_recorder(
lambda redis_conn, **kwargs: redis_lru_obj
)
monkeypatch.setattr(
warehouse.legacy.api.xmlrpc.cache, "RedisLru", redis_lru_cls
)
purger = pretend.call_recorder(lambda tags: None)
service = RedisXMLRPCCache("redis://localhost:6379", purger)
assert strict_redis_cls.from_url.calls == [
pretend.call("redis://localhost:6379", db=0)
]
assert redis_lru_cls.calls == [
pretend.call(
strict_redis_obj, name="lru", expires=None, metric_reporter=None
)
]
assert service.fetch(
func_test, (1, 2), {"kwarg0": 3, "kwarg1": 4}, None, None, None
) == [[1, 2], {"kwarg0": 3, "kwarg1": 4}]
assert service.purge(None) is None
assert redis_lru_obj.fetch.calls == [
pretend.call(
func_test, (1, 2), {"kwarg0": 3, "kwarg1": 4}, None, None, None
)
]
assert redis_lru_obj.purge.calls == [pretend.call(None)]
| TestRedisXMLRPCCache |
python | jazzband__django-oauth-toolkit | oauth2_provider/admin.py | {
"start": 1688,
"end": 2724
} | class ____(admin.ModelAdmin):
list_display = ("token", "user", "application")
raw_id_fields = ("user", "access_token")
search_fields = ("token",) + (("user__email",) if has_email else ())
list_filter = ("application",)
application_model = get_application_model()
access_token_model = get_access_token_model()
grant_model = get_grant_model()
id_token_model = get_id_token_model()
refresh_token_model = get_refresh_token_model()
application_admin_class = get_application_admin_class()
access_token_admin_class = get_access_token_admin_class()
grant_admin_class = get_grant_admin_class()
id_token_admin_class = get_id_token_admin_class()
refresh_token_admin_class = get_refresh_token_admin_class()
admin.site.register(application_model, application_admin_class)
admin.site.register(access_token_model, access_token_admin_class)
admin.site.register(grant_model, grant_admin_class)
admin.site.register(id_token_model, id_token_admin_class)
admin.site.register(refresh_token_model, refresh_token_admin_class)
| RefreshTokenAdmin |
python | django-extensions__django-extensions | tests/management/commands/test_sqlcreate.py | {
"start": 1166,
"end": 5200
} | class ____(TestCase):
"""Tests for sqlcreate command."""
@override_settings(DATABASES={"default": MYSQL_DATABASE_SETTINGS})
@patch("sys.stderr", new_callable=StringIO)
@patch("sys.stdout", new_callable=StringIO)
@patch("django_extensions.management.commands.sqlcreate.socket")
def test_should_print_SQL_create_database_statement_for_mysql(
self, m_socket, m_stdout, m_stderr
):
m_socket.gethostname.return_value = "tumbleweed"
expected_error = """-- WARNING!: https://docs.djangoproject.com/en/dev/ref/databases/#collation-settings
-- Please read this carefully! Collation will be set to utf8_bin to have case-sensitive data.
"""
expected_statement = """CREATE DATABASE dbatabase CHARACTER SET utf8 COLLATE utf8_bin;
GRANT ALL PRIVILEGES ON dbatabase.* to 'foo'@'tumbleweed' identified by 'bar';
"""
call_command("sqlcreate")
self.assertEqual(expected_statement, m_stdout.getvalue())
self.assertEqual(expected_error, m_stderr.getvalue())
@override_settings(DATABASES={"default": POSTGRESQL_DATABASE_SETTINGS})
@patch("sys.stdout", new_callable=StringIO)
def test_should_print_SQL_create_database_statement_for_postgresql(self, m_stdout):
expected_statement = """CREATE USER foo WITH ENCRYPTED PASSWORD 'bar' CREATEDB;
CREATE DATABASE database WITH ENCODING 'UTF-8' OWNER "foo";
GRANT ALL PRIVILEGES ON DATABASE database TO foo;
"""
call_command("sqlcreate")
self.assertEqual(expected_statement, m_stdout.getvalue())
@override_settings(DATABASES={"default": POSTGRESQL_DATABASE_SETTINGS_SOCKET_MODE})
@patch("sys.stdout", new_callable=StringIO)
def test_should_print_SQL_create_database_statement_only_for_postgresql_when_unix_domain_socket_mode_is_used(
self, m_stdout
):
expected_statement = """-- Assuming that unix domain socket connection mode is being used because
-- USER or PASSWORD are blank in Django DATABASES configuration.
CREATE DATABASE database WITH ENCODING 'UTF-8';
"""
call_command("sqlcreate")
self.assertEqual(expected_statement, m_stdout.getvalue())
@override_settings(DATABASES={"default": POSTGRESQL_DATABASE_SETTINGS})
@patch("sys.stdout", new_callable=StringIO)
def test_should_print_SQL_drop_and_create_database_statement_for_postgresql(
self, m_stdout
):
expected_statement = """DROP DATABASE IF EXISTS database;
DROP USER IF EXISTS foo;
CREATE USER foo WITH ENCRYPTED PASSWORD 'bar' CREATEDB;
CREATE DATABASE database WITH ENCODING 'UTF-8' OWNER "foo";
GRANT ALL PRIVILEGES ON DATABASE database TO foo;
"""
call_command("sqlcreate", "--drop")
self.assertEqual(expected_statement, m_stdout.getvalue())
@override_settings(DATABASES={"default": SQLITE3_DATABASE_SETTINGS})
@patch("sys.stderr", new_callable=StringIO)
def test_should_print_stderr_for_sqlite3(self, m_stderr):
expected_error = (
"-- manage.py migrate will automatically create a sqlite3 database file.\n"
)
call_command("sqlcreate")
self.assertEqual(expected_error, m_stderr.getvalue())
@override_settings(
DATABASES={
"unknown": {
"ENGINE": "django.db.backends.unknown",
"NAME": "database",
"USER": "foo",
}
}
)
@patch("sys.stderr", new_callable=StringIO)
@patch("sys.stdout", new_callable=StringIO)
def test_should_print_stderr_and_standard_create_database_statement_for_unsupported_engine(
self, m_stdout, m_stderr
):
expected_error = "-- Don't know how to handle 'django.db.backends.unknown' falling back to SQL.\n"
expected_statement = """CREATE DATABASE database;
GRANT ALL PRIVILEGES ON DATABASE database to foo;
"""
call_command("sqlcreate", "--database=unknown")
self.assertEqual(expected_error, m_stderr.getvalue())
self.assertEqual(expected_statement, m_stdout.getvalue())
| SqlCreateTests |
python | scipy__scipy | scipy/_lib/_docscrape.py | {
"start": 19424,
"end": 23807
} | class ____(NumpyDocString):
extra_public_methods = ["__call__"]
def __init__(self, cls, doc=None, modulename="", func_doc=FunctionDoc, config=None):
if not inspect.isclass(cls) and cls is not None:
raise ValueError(f"Expected a class or None, but got {cls!r}")
self._cls = cls
if "sphinx" in sys.modules:
from sphinx.ext.autodoc import ALL
else:
ALL = object()
if config is None:
config = {}
self.show_inherited_members = config.get("show_inherited_class_members", True)
if modulename and not modulename.endswith("."):
modulename += "."
self._mod = modulename
if doc is None:
if cls is None:
raise ValueError("No class or documentation string given")
doc = pydoc.getdoc(cls)
NumpyDocString.__init__(self, doc)
_members = config.get("members", [])
if _members is ALL:
_members = None
_exclude = config.get("exclude-members", [])
if config.get("show_class_members", True) and _exclude is not ALL:
def splitlines_x(s):
if not s:
return []
else:
return s.splitlines()
for field, items in [
("Methods", self.methods),
("Attributes", self.properties),
]:
if not self[field]:
doc_list = []
for name in sorted(items):
if name in _exclude or (_members and name not in _members):
continue
try:
doc_item = pydoc.getdoc(getattr(self._cls, name))
doc_list.append(Parameter(name, "", splitlines_x(doc_item)))
except AttributeError:
pass # method doesn't exist
self[field] = doc_list
@property
def methods(self):
if self._cls is None:
return []
return [
name
for name, func in inspect.getmembers(self._cls)
if (
(not name.startswith("_") or name in self.extra_public_methods)
and isinstance(func, Callable)
and self._is_show_member(name)
)
]
@property
def properties(self):
if self._cls is None:
return []
return [
name
for name, func in inspect.getmembers(self._cls)
if (
not name.startswith("_")
and not self._should_skip_member(name, self._cls)
and (
func is None
or isinstance(func, property | cached_property)
or inspect.isdatadescriptor(func)
)
and self._is_show_member(name)
)
]
@staticmethod
def _should_skip_member(name, klass):
return (
# Namedtuples should skip everything in their ._fields as the
# docstrings for each of the members is: "Alias for field number X"
issubclass(klass, tuple)
and hasattr(klass, "_asdict")
and hasattr(klass, "_fields")
and name in klass._fields
)
def _is_show_member(self, name):
return (
# show all class members
self.show_inherited_members
# or class member is not inherited
or name in self._cls.__dict__
)
def get_doc_object(
obj,
what=None,
doc=None,
config=None,
class_doc=ClassDoc,
func_doc=FunctionDoc,
obj_doc=ObjDoc,
):
if what is None:
if inspect.isclass(obj):
what = "class"
elif inspect.ismodule(obj):
what = "module"
elif isinstance(obj, Callable):
what = "function"
else:
what = "object"
if config is None:
config = {}
if what == "class":
return class_doc(obj, func_doc=func_doc, doc=doc, config=config)
elif what in ("function", "method"):
return func_doc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return obj_doc(obj, doc, config=config) | ClassDoc |
python | pypa__warehouse | tests/unit/admin/views/test_prohibited_project_names.py | {
"start": 2671,
"end": 4288
} | class ____:
def test_no_project(self):
request = pretend.stub(GET={})
with pytest.raises(HTTPBadRequest):
views.confirm_prohibited_project_names(request)
def test_nothing_to_delete(self, db_request):
db_request.GET["project"] = "foo"
result = views.confirm_prohibited_project_names(db_request)
assert result == {
"prohibited_project_names": {"project": "foo", "comment": ""},
"existing": {
"project": None,
"releases": [],
"files": [],
"roles": [],
"releases_by_date": defaultdict(list),
},
}
def test_stuff_to_delete(self, db_request):
db_request.user = UserFactory.create()
project = ProjectFactory.create()
release = ReleaseFactory.create(project=project)
file_ = FileFactory.create(release=release, filename="who cares")
role = RoleFactory.create(project=project, user=db_request.user)
db_request.GET["project"] = project.name
result = views.confirm_prohibited_project_names(db_request)
assert result == {
"prohibited_project_names": {"project": project.name, "comment": ""},
"existing": {
"project": project,
"releases": [release],
"files": [file_],
"roles": [role],
"releases_by_date": defaultdict(
list, {release.created.strftime("%Y-%m-%d"): [release]}
),
},
}
| TestConfirmProhibitedProjectName |
python | ray-project__ray | python/ray/_private/accelerators/npu.py | {
"start": 354,
"end": 2879
} | class ____(AcceleratorManager):
"""Ascend NPU accelerators."""
@staticmethod
def get_resource_name() -> str:
return "NPU"
@staticmethod
def get_visible_accelerator_ids_env_var() -> str:
return ASCEND_RT_VISIBLE_DEVICES_ENV_VAR
@staticmethod
def get_current_process_visible_accelerator_ids() -> Optional[List[str]]:
ascend_visible_devices = os.environ.get(
NPUAcceleratorManager.get_visible_accelerator_ids_env_var(), None
)
if ascend_visible_devices is None:
return None
if ascend_visible_devices == "":
return []
if ascend_visible_devices == "NoDevFiles":
return []
return list(ascend_visible_devices.split(","))
@staticmethod
def get_current_node_num_accelerators() -> int:
"""Attempt to detect the number of NPUs on this machine.
NPU chips are represented as devices within `/dev/`, either as `/dev/davinci?`.
Returns:
The number of NPUs if any were detected, otherwise 0.
"""
try:
import acl
device_count, ret = acl.rt.get_device_count()
if ret == 0:
return device_count
except Exception as e:
logger.debug("Could not import AscendCL: %s", e)
try:
npu_files = glob.glob("/dev/davinci[0-9]*")
return len(npu_files)
except Exception as e:
logger.debug("Failed to detect number of NPUs: %s", e)
return 0
@staticmethod
def get_current_node_accelerator_type() -> Optional[str]:
"""Get the type of the Ascend NPU on the current node.
Returns:
A string of the type, such as "Ascend910A", "Ascend910B", "Ascend310P1".
"""
try:
import acl
return acl.get_soc_name()
except Exception:
logger.exception("Failed to detect NPU type.")
return None
@staticmethod
def validate_resource_request_quantity(
quantity: float,
) -> Tuple[bool, Optional[str]]:
return (True, None)
@staticmethod
def set_current_process_visible_accelerator_ids(
visible_npu_devices: List[str],
) -> None:
if os.environ.get(NOSET_ASCEND_RT_VISIBLE_DEVICES_ENV_VAR):
return
os.environ[
NPUAcceleratorManager.get_visible_accelerator_ids_env_var()
] = ",".join([str(i) for i in visible_npu_devices])
| NPUAcceleratorManager |
python | tornadoweb__tornado | tornado/test/websocket_test.py | {
"start": 27539,
"end": 28067
} | class ____(WebSocketBaseTestCase):
def get_app(self):
class PingHandler(TestWebSocketHandler):
def on_ping(self, data):
self.write_message("got ping")
return Application([("/", PingHandler)])
@gen_test
def test_client_ping(self):
ws = yield self.ws_connect("/", ping_interval=0.01, ping_timeout=0)
for i in range(3):
response = yield ws.read_message()
self.assertEqual(response, "got ping")
ws.close()
| ClientPeriodicPingTest |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.