language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
mkdocs__mkdocs
|
mkdocs/tests/config/config_options_legacy_tests.py
|
{
"start": 38766,
"end": 39007
}
|
class ____(TestCase):
def test_defined(self):
class Schema:
option = c.Private()
with self.expect_error(option="For internal use only."):
self.get_config(Schema, {'option': 'somevalue'})
|
PrivateTest
|
python
|
allegroai__clearml
|
clearml/automation/optimization.py
|
{
"start": 46087,
"end": 49447
}
|
class ____(SearchStrategy):
"""
Grid search strategy controller. Full grid sampling of every hyperparameter combination.
"""
def __init__(
self,
base_task_id: str,
hyper_parameters: Sequence[Parameter],
objective_metric: Objective,
execution_queue: str,
num_concurrent_workers: int,
pool_period_min: float = 2.0,
time_limit_per_job: Optional[float] = None,
compute_time_limit: Optional[float] = None,
max_iteration_per_job: Optional[int] = None,
total_max_jobs: Optional[int] = None,
**_: Any
) -> ():
"""
Initialize a grid search optimizer
:param str base_task_id: The Task ID.
:param list hyper_parameters: The list of parameter objects to optimize over.
:param Objective objective_metric: The Objective metric to maximize / minimize.
:param str execution_queue: The execution queue to use for launching Tasks (experiments).
:param int num_concurrent_workers: The maximum number of concurrent running machines.
:param float pool_period_min: The time between two consecutive pools (minutes).
:param float time_limit_per_job: The maximum execution time per single job in minutes. When the time limit is
exceeded job is aborted. (Optional)
:param float compute_time_limit: The maximum compute time in minutes. When time limit is exceeded,
all jobs aborted. (Optional)
:param int max_iteration_per_job: The maximum iterations (of the Objective metric)
per single job, When exceeded, the job is aborted.
:param int total_max_jobs: The total maximum jobs for the optimization process. The default is ``None``, for
unlimited.
"""
super(GridSearch, self).__init__(
base_task_id=base_task_id,
hyper_parameters=hyper_parameters,
objective_metric=objective_metric,
execution_queue=execution_queue,
num_concurrent_workers=num_concurrent_workers,
pool_period_min=pool_period_min,
time_limit_per_job=time_limit_per_job,
compute_time_limit=compute_time_limit,
max_iteration_per_job=max_iteration_per_job,
total_max_jobs=total_max_jobs,
**_
)
self._param_iterator = None
def create_job(self) -> Optional[ClearmlJob]:
"""
Create a new job if needed. Return the newly created job. If no job needs to be created, return ``None``.
:return: A newly created ClearmlJob object, or None if no ClearmlJob is created.
"""
try:
parameters = self._next_configuration()
except StopIteration:
return None
return self.helper_create_job(base_task_id=self._base_task_id, parameter_override=parameters)
def _next_configuration(self) -> Mapping[str, str]:
def param_iterator_fn() -> Generator[Dict[str, Any], None, None]:
hyper_params_values = [p.to_list() for p in self._hyper_parameters]
for state in product(*hyper_params_values):
yield dict(kv for d in state for kv in d.items())
if not self._param_iterator:
self._param_iterator = param_iterator_fn()
return next(self._param_iterator)
|
GridSearch
|
python
|
getsentry__sentry
|
src/sentry/hybridcloud/models/outbox.py
|
{
"start": 1774,
"end": 16531
}
|
class ____(Model):
sharding_columns: Iterable[str]
coalesced_columns: Iterable[str]
def should_skip_shard(self) -> bool:
if self.shard_scope == OutboxScope.ORGANIZATION_SCOPE:
return self.shard_identifier in options.get(
"hybrid_cloud.authentication.disabled_organization_shards"
)
if self.shard_scope == OutboxScope.USER_SCOPE:
return self.shard_identifier in options.get(
"hybrid_cloud.authentication.disabled_user_shards"
)
return False
@classmethod
def from_outbox_name(cls, name: str) -> type[Self]:
from django.apps import apps
app_name, model_name = name.split(".")
outbox_model = apps.get_model(app_name, model_name)
assert issubclass(outbox_model, cls)
return outbox_model
@classmethod
def next_object_identifier(cls) -> int:
using = router.db_for_write(cls)
with transaction.atomic(using=using):
with connections[using].cursor() as cursor:
cursor.execute("SELECT nextval(%s)", [f"{cls._meta.db_table}_id_seq"])
return cursor.fetchone()[0]
@classmethod
def find_scheduled_shards(cls, low: int = 0, hi: int | None = None) -> list[Mapping[str, Any]]:
q = cls.objects.values(*cls.sharding_columns).filter(
scheduled_for__lte=timezone.now(), id__gte=low
)
if hi is not None:
q = q.filter(id__lt=hi)
return list(
{k: row[k] for k in cls.sharding_columns}
for row in q.annotate(
scheduled_for=Min("scheduled_for"),
max_id=Max("id"),
).order_by("scheduled_for", "max_id")
)
@classmethod
def prepare_next_from_shard(cls, row: Mapping[str, Any]) -> Self | None:
using = router.db_for_write(cls)
try:
with transaction.atomic(using=using, savepoint=False):
next_outbox: OutboxBase | None
next_outbox = (
cls(**row)
.selected_messages_in_shard()
.order_by("id")
.select_for_update(nowait=True)
.first()
)
if not next_outbox:
return None
# We rely on 'proof of failure by remaining' to handle retries -- basically, by scheduling this shard, we
# expect all objects to be drained before the next schedule comes around, or else we will run again.
# Note that the system does not strongly protect against concurrent processing -- this is expected in the
# case of drains, for instance.
now = timezone.now()
next_outbox.selected_messages_in_shard().update(
scheduled_for=next_outbox.next_schedule(now), scheduled_from=now
)
return next_outbox
except OperationalError as e:
# If concurrent locking is happening on the table, gracefully pass and allow
# that work to process.
if isinstance(e.__cause__, psycopg2.errors.LockNotAvailable):
return None
else:
raise
def key_from(self, attrs: Iterable[str]) -> Mapping[str, Any]:
return {k: _ensure_not_null(k, getattr(self, k)) for k in attrs}
def selected_messages_in_shard(
self, latest_shard_row: OutboxBase | None = None
) -> models.QuerySet[Self]:
filters: Mapping[str, Any] = (
{} if latest_shard_row is None else dict(id__lte=latest_shard_row.id)
)
return self.objects.filter(**self.key_from(self.sharding_columns), **filters)
def select_coalesced_messages(self) -> models.QuerySet[Self]:
return self.objects.filter(**self.key_from(self.coalesced_columns))
class Meta:
abstract = True
__relocation_scope__ = RelocationScope.Excluded
# Different shard_scope, shard_identifier pairings of messages are always deliverable in parallel
shard_scope = BoundedPositiveIntegerField(choices=OutboxScope.as_choices(), null=False)
shard_identifier = BoundedBigIntegerField(null=False)
# Objects of equal scope, shard_identifier, category, and object_identifier are coalesced in processing.
category = BoundedPositiveIntegerField(choices=OutboxCategory.as_choices(), null=False)
object_identifier = BoundedBigIntegerField(null=False)
# payload is used for webhook payloads.
payload = models.JSONField(null=True)
# The point at which this object was scheduled, used as a diff from scheduled_for to determine the intended delay.
scheduled_from = models.DateTimeField(null=False, default=timezone.now)
# The point at which this object is intended to be replicated, used for backoff purposes. Keep in mind that
# the largest back off effectively applies to the entire 'shard' key.
scheduled_for = models.DateTimeField(null=False, default=THE_PAST)
# Initial creation date for the outbox which should not be modified. Used for lag time calculation.
date_added = models.DateTimeField(
null=False, default=timezone.now, db_default=Now(), editable=False
)
def last_delay(self) -> datetime.timedelta:
return max(self.scheduled_for - self.scheduled_from, datetime.timedelta(seconds=1))
def next_schedule(self, now: datetime.datetime) -> datetime.datetime:
return now + min((self.last_delay() * 2), datetime.timedelta(hours=1))
def save(self, *args: Any, **kwargs: Any) -> None:
if not OutboxScope.scope_has_category(self.shard_scope, self.category):
raise InvalidOutboxError(
f"Outbox.category {self.category} ({OutboxCategory(self.category).name}) not configured for scope {self.shard_scope} ({OutboxScope(self.shard_scope).name})"
)
if _outbox_context.flushing_enabled:
transaction.on_commit(lambda: self.drain_shard(), using=router.db_for_write(type(self)))
tags = {"category": OutboxCategory(self.category).name}
metrics.incr("outbox.saved", 1, tags=tags)
super().save(*args, **kwargs)
@contextlib.contextmanager
def process_shard(self, latest_shard_row: OutboxBase | None) -> Generator[OutboxBase | None]:
flush_all: bool = not bool(latest_shard_row)
next_shard_row: OutboxBase | None
using: str = db.router.db_for_write(type(self))
with transaction.atomic(using=using), django_test_transaction_water_mark(using=using):
try:
next_shard_row = (
self.selected_messages_in_shard(latest_shard_row=latest_shard_row)
.select_for_update(nowait=flush_all)
.first()
)
except OperationalError as e:
if isinstance(e.__cause__, psycopg2.errors.LockNotAvailable):
# If a non task flush process is running already, allow it to proceed without contention.
next_shard_row = None
else:
raise
yield next_shard_row
@contextlib.contextmanager
def process_coalesced(
self,
is_synchronous_flush: bool,
) -> Generator[OutboxBase | None]:
coalesced: OutboxBase | None = self.select_coalesced_messages().last()
first_coalesced: OutboxBase | None = self.select_coalesced_messages().first() or coalesced
tags: dict[str, int | str] = {"category": "None", "synchronous": int(is_synchronous_flush)}
if coalesced is not None:
tags["category"] = OutboxCategory(self.category).name
assert first_coalesced, "first_coalesced incorrectly set for non-empty coalesce group"
metrics.timing(
"outbox.coalesced_net_queue_time",
datetime.datetime.now(tz=datetime.UTC).timestamp()
- first_coalesced.date_added.timestamp(),
tags=tags,
)
yield coalesced
# If the context block didn't raise we mark messages as completed by deleting them.
if coalesced is not None:
assert first_coalesced, "first_coalesced incorrectly set for non-empty coalesce group"
deleted_count = 0
# Use a fetch and delete loop as doing cleanup in a single query
# causes timeouts with large datasets. Fetch in batches of 50 and
# Apply the ID condition in python as filtering rows in postgres
# leads to timeouts.
while True:
batch = self.select_coalesced_messages().values_list("id", flat=True)[:50]
delete_ids = [item_id for item_id in batch if item_id < coalesced.id]
if not len(delete_ids):
break
self.objects.filter(id__in=delete_ids).delete()
deleted_count += len(delete_ids)
# Only process the highest id after the others have been batch processed.
# It's not guaranteed that the ordering of the batch processing is in order,
# meaning that failures during deletion could leave an old, staler outbox
# alive.
if not self.should_skip_shard():
deleted_count += 1
coalesced.delete()
metrics.incr("outbox.processed", deleted_count, tags=tags)
metrics.timing(
"outbox.processing_lag",
datetime.datetime.now(tz=datetime.UTC).timestamp()
- first_coalesced.scheduled_from.timestamp(),
tags=tags,
)
metrics.timing(
"outbox.coalesced_net_processing_time",
datetime.datetime.now(tz=datetime.UTC).timestamp()
- first_coalesced.date_added.timestamp(),
tags=tags,
)
def _set_span_data_for_coalesced_message(self, span: Span, message: OutboxBase) -> None:
tag_for_outbox = OutboxScope.get_tag_name(message.shard_scope)
span.set_tag(tag_for_outbox, message.shard_identifier)
span.set_data("outbox_id", message.id)
span.set_data("outbox_shard_id", message.shard_identifier)
span.set_tag("outbox_category", OutboxCategory(message.category).name)
span.set_tag("outbox_scope", OutboxScope(message.shard_scope).name)
def process(self, is_synchronous_flush: bool) -> bool:
with self.process_coalesced(is_synchronous_flush=is_synchronous_flush) as coalesced:
if coalesced is not None and not self.should_skip_shard():
with (
metrics.timer(
"outbox.send_signal.duration",
tags={
"category": OutboxCategory(coalesced.category).name,
"synchronous": int(is_synchronous_flush),
},
),
sentry_sdk.start_span(op="outbox.process") as span,
):
self._set_span_data_for_coalesced_message(span=span, message=coalesced)
try:
coalesced.send_signal()
except Exception as e:
raise OutboxFlushError(
f"Could not flush shard category={coalesced.category} ({OutboxCategory(coalesced.category).name})",
coalesced,
) from e
return True
return False
@abc.abstractmethod
def send_signal(self) -> None:
pass
def drain_shard(
self, flush_all: bool = False, _test_processing_barrier: threading.Barrier | None = None
) -> None:
in_test_assert_no_transaction(
"drain_shard should only be called outside of any active transaction!"
)
try:
# When we are flushing in a local context, we don't care about outboxes created concurrently --
# at best our logic depends on previously created outboxes.
latest_shard_row: OutboxBase | None = None
if not flush_all:
latest_shard_row = self.selected_messages_in_shard().last()
# If we're not flushing all possible shards, and we don't see any immediate values,
# drop.
if latest_shard_row is None:
return
shard_row: OutboxBase | None
while True:
with self.process_shard(latest_shard_row) as shard_row:
if shard_row is None:
break
if _test_processing_barrier:
_test_processing_barrier.wait()
processed = shard_row.process(is_synchronous_flush=not flush_all)
if _test_processing_barrier:
_test_processing_barrier.wait()
if not processed:
break
except DatabaseError as e:
raise OutboxDatabaseError(
f"Failed to process Outbox, {OutboxCategory(self.category).name} due to database error",
) from e
@classmethod
def get_shard_depths_descending(cls, limit: int | None = 10) -> list[dict[str, int | str]]:
"""
Queries all outbox shards for their total depth, aggregated by their
sharding columns as specified by the outbox class implementation.
:param limit: Limits the query to the top N rows with the greatest shard
depth. If limit is None, the entire set of rows will be returned.
:return: A list of dictionaries, containing shard depths and shard
relevant column values.
"""
if limit is not None:
assert limit > 0, "Limit must be a positive integer if specified"
base_depth_query = (
cls.objects.values(*cls.sharding_columns).annotate(depth=Count("*")).order_by("-depth")
)
if limit is not None:
base_depth_query = base_depth_query[0:limit]
aggregated_shard_information = list()
for shard_row in base_depth_query:
shard_information = {
shard_column: shard_row[shard_column] for shard_column in cls.sharding_columns
}
shard_information["depth"] = shard_row["depth"]
aggregated_shard_information.append(shard_information)
return aggregated_shard_information
@classmethod
def get_total_outbox_count(cls) -> int:
return cls.objects.count()
# Outboxes bound from region silo -> control silo
|
OutboxBase
|
python
|
marshmallow-code__marshmallow
|
tests/test_deserialization.py
|
{
"start": 60254,
"end": 80619
}
|
class ____:
def test_deserialize_to_dict(self):
user_dict = {"name": "Monty", "age": "42.3"}
result = SimpleUserSchema().load(user_dict)
assert result["name"] == "Monty"
assert math.isclose(result["age"], 42.3)
def test_deserialize_with_missing_values(self):
user_dict = {"name": "Monty"}
result = SimpleUserSchema().load(user_dict)
# 'age' is not included in result
assert result == {"name": "Monty"}
def test_deserialize_many(self):
users_data = [{"name": "Mick", "age": "914"}, {"name": "Keith", "age": "8442"}]
result = SimpleUserSchema(many=True).load(users_data)
assert isinstance(result, list)
user = result[0]
assert user["age"] == int(users_data[0]["age"])
def test_exclude(self):
schema = SimpleUserSchema(exclude=("age",), unknown=EXCLUDE)
result = schema.load({"name": "Monty", "age": 42})
assert "name" in result
assert "age" not in result
def test_nested_single_deserialization_to_dict(self):
class SimpleBlogSerializer(Schema):
title = fields.String()
author = fields.Nested(SimpleUserSchema, unknown=EXCLUDE)
blog_dict = {
"title": "Gimme Shelter",
"author": {"name": "Mick", "age": "914", "email": "mick@stones.com"},
}
result = SimpleBlogSerializer().load(blog_dict)
author = result["author"]
assert author["name"] == "Mick"
assert author["age"] == 914
assert "email" not in author
def test_nested_list_deserialization_to_dict(self):
class SimpleBlogSerializer(Schema):
title = fields.String()
authors = fields.Nested(SimpleUserSchema, many=True)
blog_dict = {
"title": "Gimme Shelter",
"authors": [
{"name": "Mick", "age": "914"},
{"name": "Keith", "age": "8442"},
],
}
result = SimpleBlogSerializer().load(blog_dict)
assert isinstance(result["authors"], list)
author = result["authors"][0]
assert author["name"] == "Mick"
assert author["age"] == 914
def test_nested_single_none_not_allowed(self):
class PetSchema(Schema):
name = fields.Str()
class OwnerSchema(Schema):
pet = fields.Nested(PetSchema(), allow_none=False)
sch = OwnerSchema()
errors = sch.validate({"pet": None})
assert "pet" in errors
assert errors["pet"] == ["Field may not be null."]
def test_nested_many_non_not_allowed(self):
class PetSchema(Schema):
name = fields.Str()
class StoreSchema(Schema):
pets = fields.Nested(PetSchema, allow_none=False, many=True)
sch = StoreSchema()
errors = sch.validate({"pets": None})
assert "pets" in errors
assert errors["pets"] == ["Field may not be null."]
def test_nested_single_required_missing(self):
class PetSchema(Schema):
name = fields.Str()
class OwnerSchema(Schema):
pet = fields.Nested(PetSchema(), required=True)
sch = OwnerSchema()
errors = sch.validate({})
assert "pet" in errors
assert errors["pet"] == ["Missing data for required field."]
def test_nested_many_required_missing(self):
class PetSchema(Schema):
name = fields.Str()
class StoreSchema(Schema):
pets = fields.Nested(PetSchema, required=True, many=True)
sch = StoreSchema()
errors = sch.validate({})
assert "pets" in errors
assert errors["pets"] == ["Missing data for required field."]
def test_nested_only_basestring(self):
class ANestedSchema(Schema):
pk = fields.Str()
class MainSchema(Schema):
pk = fields.Str()
child = fields.Pluck(ANestedSchema, "pk")
sch = MainSchema()
result = sch.load({"pk": "123", "child": "456"})
assert result["child"]["pk"] == "456"
def test_nested_only_basestring_with_list_data(self):
class ANestedSchema(Schema):
pk = fields.Str()
class MainSchema(Schema):
pk = fields.Str()
children = fields.Pluck(ANestedSchema, "pk", many=True)
sch = MainSchema()
result = sch.load({"pk": "123", "children": ["456", "789"]})
assert result["children"][0]["pk"] == "456"
assert result["children"][1]["pk"] == "789"
def test_nested_none_deserialization(self):
class SimpleBlogSerializer(Schema):
title = fields.String()
author = fields.Nested(SimpleUserSchema, allow_none=True)
blog_dict = {"title": "Gimme Shelter", "author": None}
result = SimpleBlogSerializer().load(blog_dict)
assert result["author"] is None
assert result["title"] == blog_dict["title"]
def test_deserialize_with_attribute_param(self):
class AliasingUserSerializer(Schema):
username = fields.Email(attribute="email")
years = fields.Integer(attribute="age")
data = {"username": "foo@bar.com", "years": "42"}
result = AliasingUserSerializer().load(data)
assert result["email"] == "foo@bar.com"
assert result["age"] == 42
# regression test for https://github.com/marshmallow-code/marshmallow/issues/450
def test_deserialize_with_attribute_param_symmetry(self):
class MySchema(Schema):
foo = fields.Raw(attribute="bar.baz")
schema = MySchema()
dump_data = schema.dump({"bar": {"baz": 42}})
assert dump_data == {"foo": 42}
load_data = schema.load({"foo": 42})
assert load_data == {"bar": {"baz": 42}}
def test_deserialize_with_attribute_param_error_returns_field_name_not_attribute_name(
self,
):
class AliasingUserSerializer(Schema):
username = fields.Email(attribute="email")
years = fields.Integer(attribute="age")
data = {"username": "foobar.com", "years": "42"}
with pytest.raises(ValidationError) as excinfo:
AliasingUserSerializer().load(data)
errors = excinfo.value.messages
assert errors["username"] == ["Not a valid email address."]
def test_deserialize_with_attribute_param_error_returns_data_key_not_attribute_name(
self,
):
class AliasingUserSerializer(Schema):
name = fields.String(data_key="Name")
username = fields.Email(attribute="email", data_key="UserName")
years = fields.Integer(attribute="age", data_key="Years")
data = {"Name": "Mick", "UserName": "foobar.com", "Years": "abc"}
with pytest.raises(ValidationError) as excinfo:
AliasingUserSerializer().load(data)
errors = excinfo.value.messages
assert errors["UserName"] == ["Not a valid email address."]
assert errors["Years"] == ["Not a valid integer."]
def test_deserialize_with_data_key_param(self):
class AliasingUserSerializer(Schema):
name = fields.String(data_key="Name")
username = fields.Email(attribute="email", data_key="UserName")
years = fields.Integer(data_key="Years")
data = {"Name": "Mick", "UserName": "foo@bar.com", "years": "42"}
result = AliasingUserSerializer(unknown=EXCLUDE).load(data)
assert result["name"] == "Mick"
assert result["email"] == "foo@bar.com"
assert "years" not in result
def test_deserialize_with_data_key_as_empty_string(self):
class MySchema(Schema):
name = fields.Raw(data_key="")
schema = MySchema()
assert schema.load({"": "Grace"}) == {"name": "Grace"}
def test_deserialize_with_dump_only_param(self):
class AliasingUserSerializer(Schema):
name = fields.String()
years = fields.Integer(dump_only=True)
size = fields.Integer(dump_only=True, load_only=True)
nicknames = fields.List(fields.Str(), dump_only=True)
data = {
"name": "Mick",
"years": "42",
"size": "12",
"nicknames": ["Your Majesty", "Brenda"],
}
result = AliasingUserSerializer(unknown=EXCLUDE).load(data)
assert result["name"] == "Mick"
assert "years" not in result
assert "size" not in result
assert "nicknames" not in result
def test_deserialize_with_missing_param_value(self):
bdate = dt.datetime(2017, 9, 29)
class AliasingUserSerializer(Schema):
name = fields.String()
birthdate = fields.DateTime(load_default=bdate)
data = {"name": "Mick"}
result = AliasingUserSerializer().load(data)
assert result["name"] == "Mick"
assert result["birthdate"] == bdate
def test_deserialize_with_missing_param_callable(self):
bdate = dt.datetime(2017, 9, 29)
class AliasingUserSerializer(Schema):
name = fields.String()
birthdate = fields.DateTime(load_default=lambda: bdate)
data = {"name": "Mick"}
result = AliasingUserSerializer().load(data)
assert result["name"] == "Mick"
assert result["birthdate"] == bdate
def test_deserialize_with_missing_param_none(self):
class AliasingUserSerializer(Schema):
name = fields.String()
years = fields.Integer(load_default=None, allow_none=True)
data = {"name": "Mick"}
result = AliasingUserSerializer().load(data)
assert result["name"] == "Mick"
assert result["years"] is None
def test_deserialization_raises_with_errors(self):
bad_data = {"email": "invalid-email", "colors": "burger", "age": -1}
v = Validator()
with pytest.raises(ValidationError) as excinfo:
v.load(bad_data)
errors = excinfo.value.messages
assert "email" in errors
assert "colors" in errors
assert "age" in errors
def test_deserialization_raises_with_errors_with_multiple_validators(self):
bad_data = {"email": "invalid-email", "colors": "burger", "age": -1}
v = Validators()
with pytest.raises(ValidationError) as excinfo:
v.load(bad_data)
errors = excinfo.value.messages
assert "email" in errors
assert "colors" in errors
assert "age" in errors
def test_deserialization_many_raises_errors(self):
bad_data = [
{"email": "foo@bar.com", "colors": "red", "age": 18},
{"email": "bad", "colors": "pizza", "age": -1},
]
v = Validator(many=True)
with pytest.raises(ValidationError):
v.load(bad_data)
def test_validation_errors_are_stored(self):
def validate_field(val):
raise ValidationError("Something went wrong")
class MySchema(Schema):
foo = fields.Raw(validate=validate_field)
with pytest.raises(ValidationError) as excinfo:
MySchema().load({"foo": 42})
errors = excinfo.value.messages
assert "Something went wrong" in errors["foo"]
def test_multiple_errors_can_be_stored_for_a_field(self):
def validate1(n):
raise ValidationError("error one")
def validate2(n):
raise ValidationError("error two")
class MySchema(Schema):
foo = fields.Raw(required=True, validate=[validate1, validate2])
with pytest.raises(ValidationError) as excinfo:
MySchema().load({"foo": "bar"})
errors = excinfo.value.messages
assert type(errors["foo"]) is list
assert len(errors["foo"]) == 2
def test_multiple_errors_can_be_stored_for_an_email_field(self):
def validate(val):
raise ValidationError("Invalid value.")
class MySchema(Schema):
email = fields.Email(validate=[validate])
with pytest.raises(ValidationError) as excinfo:
MySchema().load({"email": "foo"})
errors = excinfo.value.messages
assert len(errors["email"]) == 2
assert "Not a valid email address." in errors["email"][0]
def test_multiple_errors_can_be_stored_for_a_url_field(self):
def validator(val):
raise ValidationError("Not a valid URL.")
class MySchema(Schema):
url = fields.Url(validate=[validator])
with pytest.raises(ValidationError) as excinfo:
MySchema().load({"url": "foo"})
errors = excinfo.value.messages
assert len(errors["url"]) == 2
assert "Not a valid URL." in errors["url"][0]
def test_required_value_only_passed_to_validators_if_provided(self):
class MySchema(Schema):
foo = fields.Raw(required=True, validate=lambda f: False)
with pytest.raises(ValidationError) as excinfo:
MySchema().load({})
errors = excinfo.value.messages
# required value missing
assert len(errors["foo"]) == 1
assert "Missing data for required field." in errors["foo"]
@pytest.mark.parametrize("partial_schema", [True, False])
def test_partial_deserialization(self, partial_schema):
class MySchema(Schema):
foo = fields.Raw(required=True)
bar = fields.Raw(required=True)
data = {"foo": 3}
if partial_schema:
result = MySchema(partial=True).load(data)
else:
result = MySchema().load(data, partial=True)
assert result["foo"] == 3
assert "bar" not in result
def test_partial_fields_deserialization(self):
class MySchema(Schema):
foo = fields.Raw(required=True)
bar = fields.Raw(required=True)
baz = fields.Raw(required=True)
with pytest.raises(ValidationError) as excinfo:
MySchema().load({"foo": 3}, partial=tuple())
data, errors = excinfo.value.valid_data, excinfo.value.messages
assert data["foo"] == 3
assert "bar" in errors
assert "baz" in errors
data = MySchema().load({"foo": 3}, partial=("bar", "baz"))
assert isinstance(data, dict)
assert data["foo"] == 3
assert "bar" not in data
assert "baz" not in data
data = MySchema(partial=True).load({"foo": 3}, partial=("bar", "baz"))
assert isinstance(data, dict)
assert data["foo"] == 3
assert "bar" not in data
assert "baz" not in data
def test_partial_fields_validation(self):
class MySchema(Schema):
foo = fields.Raw(required=True)
bar = fields.Raw(required=True)
baz = fields.Raw(required=True)
errors = MySchema().validate({"foo": 3}, partial=tuple())
assert "bar" in errors
assert "baz" in errors
errors = MySchema().validate({"foo": 3}, partial=("bar", "baz"))
assert errors == {}
errors = MySchema(partial=True).validate({"foo": 3}, partial=("bar", "baz"))
assert errors == {}
def test_unknown_fields_deserialization(self):
class MySchema(Schema):
foo = fields.Integer()
data = MySchema(unknown=EXCLUDE).load({"foo": 3, "bar": 5})
assert data["foo"] == 3
assert "bar" not in data
data = MySchema(unknown=INCLUDE).load({"foo": 3, "bar": 5}, unknown=EXCLUDE)
assert data["foo"] == 3
assert "bar" not in data
data = MySchema(unknown=EXCLUDE).load({"foo": 3, "bar": 5}, unknown=INCLUDE)
assert data["foo"] == 3
assert data["bar"]
data = MySchema(unknown=INCLUDE).load({"foo": 3, "bar": 5})
assert data["foo"] == 3
assert data["bar"]
with pytest.raises(ValidationError, match="foo"):
MySchema(unknown=INCLUDE).load({"foo": "asd", "bar": 5})
data = MySchema(unknown=INCLUDE, many=True).load(
[{"foo": 1}, {"foo": 3, "bar": 5}]
)
assert "foo" in data[1]
assert "bar" in data[1]
with pytest.raises(ValidationError) as excinfo:
MySchema().load({"foo": 3, "bar": 5})
err = excinfo.value
assert "bar" in err.messages
assert err.messages["bar"] == ["Unknown field."]
with pytest.raises(ValidationError) as excinfo:
MySchema(many=True).load([{"foo": "abc"}, {"foo": 3, "bar": 5}])
err = excinfo.value
assert 0 in err.messages
assert "foo" in err.messages[0]
assert err.messages[0]["foo"] == ["Not a valid integer."]
assert 1 in err.messages
assert "bar" in err.messages[1]
assert err.messages[1]["bar"] == ["Unknown field."]
def test_unknown_fields_deserialization_precedence(self):
class MySchema(Schema):
class Meta:
unknown = INCLUDE
foo = fields.Integer()
data = MySchema().load({"foo": 3, "bar": 5})
assert data["foo"] == 3
assert data["bar"] == 5
data = MySchema(unknown=EXCLUDE).load({"foo": 3, "bar": 5})
assert data["foo"] == 3
assert "bar" not in data
data = MySchema().load({"foo": 3, "bar": 5}, unknown=EXCLUDE)
assert data["foo"] == 3
assert "bar" not in data
with pytest.raises(ValidationError):
MySchema(unknown=EXCLUDE).load({"foo": 3, "bar": 5}, unknown=RAISE)
def test_unknown_fields_deserialization_with_data_key(self):
class MySchema(Schema):
foo = fields.Integer(data_key="Foo")
data = MySchema().load({"Foo": 1})
assert data["foo"] == 1
assert "Foo" not in data
data = MySchema(unknown=RAISE).load({"Foo": 1})
assert data["foo"] == 1
assert "Foo" not in data
with pytest.raises(ValidationError):
MySchema(unknown=RAISE).load({"foo": 1})
data = MySchema(unknown=INCLUDE).load({"Foo": 1})
assert data["foo"] == 1
assert "Foo" not in data
def test_unknown_fields_deserialization_with_index_errors_false(self):
class MySchema(Schema):
foo = fields.Integer()
class Meta:
unknown = RAISE
index_errors = False
with pytest.raises(ValidationError) as excinfo:
MySchema(many=True).load([{"foo": "invalid"}, {"foo": 42, "bar": 24}])
err = excinfo.value
assert 1 not in err.messages
assert "foo" in err.messages
assert "bar" in err.messages
assert err.messages["foo"] == ["Not a valid integer."]
assert err.messages["bar"] == ["Unknown field."]
def test_dump_only_fields_considered_unknown(self):
class MySchema(Schema):
foo = fields.Int(dump_only=True)
with pytest.raises(ValidationError) as excinfo:
MySchema().load({"foo": 42})
err = excinfo.value
assert "foo" in err.messages
assert err.messages["foo"] == ["Unknown field."]
# When unknown = INCLUDE, dump-only fields are included as unknown
# without any validation.
data = MySchema(unknown=INCLUDE).load({"foo": "LOL"})
assert data["foo"] == "LOL"
def test_unknown_fields_do_not_unpack_dotted_names(self):
class MySchema(Schema):
class Meta:
unknown = INCLUDE
foo = fields.Str()
bar = fields.Str(data_key="bar.baz")
# dotted names are still supported
data = MySchema().load({"foo": "hi", "bar.baz": "okay"})
assert data == {"foo": "hi", "bar": "okay"}
# but extra keys included via unknown=INCLUDE are not transformed into nested dicts
data = MySchema().load({"foo": "hi", "bar.baz": "okay", "alpha.beta": "woah!"})
assert data == {"foo": "hi", "bar": "okay", "alpha.beta": "woah!"}
validators_gen = (
func for func in [predicate(lambda x: x <= 24), predicate(lambda x: x >= 18)]
)
validators_gen_float = (
func for func in [predicate(lambda f: f <= 4.1), predicate(lambda f: f >= 1.0)]
)
validators_gen_str = (
func
for func in [
predicate(lambda n: len(n) == 3),
predicate(lambda n: n[1].lower() == "o"),
]
)
|
TestSchemaDeserialization
|
python
|
huggingface__transformers
|
src/transformers/models/phi4_multimodal/modeling_phi4_multimodal.py
|
{
"start": 26032,
"end": 26898
}
|
class ____(nn.Module):
def __init__(self, config: Phi4MultimodalAudioConfig):
super().__init__()
self.layer_norm = nn.LayerNorm(config.hidden_size)
self.act_fn = ACT2FN[config.activation]
self.gate_up_proj = nn.Linear(config.hidden_size, config.intermediate_size * 2)
self.down_proj = nn.Linear(config.intermediate_size, config.hidden_size)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(self, hidden_states):
hidden_states = self.layer_norm(hidden_states)
up_states = self.gate_up_proj(hidden_states)
up_states, gate = up_states.chunk(2, dim=-1)
up_states = up_states * self.act_fn(gate)
up_states = self.dropout(up_states)
hidden_states = self.down_proj(up_states)
out = self.dropout(hidden_states)
return out
|
Phi4MultimodalAudioMLP
|
python
|
sphinx-doc__sphinx
|
sphinx/domains/citation.py
|
{
"start": 726,
"end": 4156
}
|
class ____(Domain):
"""Domain for citations."""
name = 'citation'
label = 'citation'
dangling_warnings = {
'ref': 'citation not found: %(target)s',
}
@property
def citations(self) -> dict[str, tuple[str, str, int]]:
return self.data.setdefault('citations', {})
@property
def citation_refs(self) -> dict[str, set[str]]:
return self.data.setdefault('citation_refs', {})
def clear_doc(self, docname: str) -> None:
for key, (fn, _l, _lineno) in list(self.citations.items()):
if fn == docname:
del self.citations[key]
for key, docnames in list(self.citation_refs.items()):
if docnames == {docname}:
del self.citation_refs[key]
elif docname in docnames:
docnames.remove(docname)
def merge_domaindata(self, docnames: Set[str], otherdata: dict[str, Any]) -> None:
# XXX duplicates?
for key, data in otherdata['citations'].items():
if data[0] in docnames:
self.citations[key] = data
for key, data in otherdata['citation_refs'].items():
citation_refs = self.citation_refs.setdefault(key, set())
for docname in data:
if docname in docnames:
citation_refs.add(docname)
def note_citation(self, node: nodes.citation) -> None:
label = node[0].astext()
if label in self.citations:
path = self.env.doc2path(self.citations[label][0])
logger.warning(
__('duplicate citation %s, other instance in %s'),
label,
path,
location=node,
type='ref',
subtype='citation',
)
self.citations[label] = (node['docname'], node['ids'][0], node.line) # type: ignore[assignment]
def note_citation_reference(self, node: pending_xref) -> None:
docnames = self.citation_refs.setdefault(node['reftarget'], set())
docnames.add(self.env.current_document.docname)
def check_consistency(self) -> None:
for name, (docname, _labelid, lineno) in self.citations.items():
if name not in self.citation_refs:
logger.warning(
__('Citation [%s] is not referenced.'),
name,
type='ref',
subtype='citation',
location=(docname, lineno),
)
def resolve_xref(
self,
env: BuildEnvironment,
fromdocname: str,
builder: Builder,
typ: str,
target: str,
node: pending_xref,
contnode: Element,
) -> nodes.reference | None:
docname, labelid, _lineno = self.citations.get(target, ('', '', 0))
if not docname:
return None
return make_refnode(builder, fromdocname, docname, labelid, contnode)
def resolve_any_xref(
self,
env: BuildEnvironment,
fromdocname: str,
builder: Builder,
target: str,
node: pending_xref,
contnode: Element,
) -> list[tuple[str, nodes.reference]]:
refnode = self.resolve_xref(
env, fromdocname, builder, 'ref', target, node, contnode
)
if refnode is None:
return []
else:
return [('ref', refnode)]
|
CitationDomain
|
python
|
tensorflow__tensorflow
|
tensorflow/python/kernel_tests/random/random_poisson_test.py
|
{
"start": 1411,
"end": 7089
}
|
class ____(test.TestCase):
"""This is a large test due to the moments computation taking some time."""
def _Sampler(self, num, lam, dtype, use_gpu, seed=None):
def func():
with self.session(use_gpu=use_gpu, graph=ops.Graph()) as sess:
rng = random_ops.random_poisson(lam, [num], dtype=dtype, seed=seed)
ret = np.empty([10, num])
for i in range(10):
ret[i, :] = self.evaluate(rng)
return ret
return func
def testMoments(self):
try:
from scipy import stats # pylint: disable=g-import-not-at-top
except ImportError as e:
tf_logging.warn("Cannot test moments: %s", e)
return
# The moments test is a z-value test. This is the largest z-value
# we want to tolerate. Since the z-test approximates a unit normal
# distribution, it should almost definitely never exceed 6.
z_limit = 6.0
for dt in _SUPPORTED_DTYPES:
# Test when lam < 10 and when lam >= 10
for stride in 0, 4, 10:
for lam in (3., 20):
max_moment = 5
sampler = self._Sampler(10000, lam, dt, use_gpu=False, seed=12345)
z_scores = util.test_moment_matching(
sampler(),
max_moment,
stats.poisson(lam),
stride=stride,
)
self.assertAllLess(z_scores, z_limit)
# Checks that the CPU and GPU implementation returns the same results,
# given the same random seed
@test_util.run_deprecated_v1
def testCPUGPUMatch(self):
for dt in _SUPPORTED_DTYPES:
results = {}
for use_gpu in [False, True]:
sampler = self._Sampler(1000, 1.0, dt, use_gpu=use_gpu, seed=12345)
results[use_gpu] = sampler()
if dt == dtypes.float16:
self.assertAllClose(results[False], results[True], rtol=1e-3, atol=1e-3)
else:
self.assertAllClose(results[False], results[True], rtol=1e-6, atol=1e-6)
@test_util.run_deprecated_v1
def testSeed(self):
for dt in dtypes.float16, dtypes.float32, dtypes.float64:
sx = self._Sampler(1000, 1.0, dt, use_gpu=True, seed=345)
sy = self._Sampler(1000, 1.0, dt, use_gpu=True, seed=345)
self.assertAllEqual(sx(), sy())
@test_util.run_deprecated_v1
def testNoCSE(self):
"""CSE = constant subexpression eliminator.
SetIsStateful() should prevent two identical random ops from getting
merged.
"""
for dtype in dtypes.float16, dtypes.float32, dtypes.float64:
with self.cached_session():
rnd1 = random_ops.random_poisson(2.0, [24], dtype=dtype)
rnd2 = random_ops.random_poisson(2.0, [24], dtype=dtype)
diff = rnd2 - rnd1
# Since these are all positive integers, the norm will
# be at least 1 if they are different.
self.assertGreaterEqual(np.linalg.norm(diff.eval()), 1)
def testZeroShape(self):
with self.cached_session():
rnd = random_ops.random_poisson([], [], seed=12345)
self.assertEqual([0], rnd.get_shape().as_list())
self.assertAllClose(np.array([], dtype=np.float32), self.evaluate(rnd))
@test_util.run_deprecated_v1
def testShape(self):
# Fully known shape
rnd = random_ops.random_poisson(2.0, [150], seed=12345)
self.assertEqual([150], rnd.get_shape().as_list())
rnd = random_ops.random_poisson(
lam=array_ops.ones([1, 2, 3]),
shape=[150],
seed=12345)
self.assertEqual([150, 1, 2, 3], rnd.get_shape().as_list())
rnd = random_ops.random_poisson(
lam=array_ops.ones([1, 2, 3]),
shape=[20, 30],
seed=12345)
self.assertEqual([20, 30, 1, 2, 3], rnd.get_shape().as_list())
rnd = random_ops.random_poisson(
lam=array_ops.placeholder(dtypes.float32, shape=(2,)),
shape=[12],
seed=12345)
self.assertEqual([12, 2], rnd.get_shape().as_list())
# Partially known shape.
rnd = random_ops.random_poisson(
lam=array_ops.ones([7, 3]),
shape=array_ops.placeholder(dtypes.int32, shape=(1,)),
seed=12345)
self.assertEqual([None, 7, 3], rnd.get_shape().as_list())
rnd = random_ops.random_poisson(
lam=array_ops.ones([9, 6]),
shape=array_ops.placeholder(dtypes.int32, shape=(3,)),
seed=12345)
self.assertEqual([None, None, None, 9, 6], rnd.get_shape().as_list())
# Unknown shape.
rnd = random_ops.random_poisson(
lam=array_ops.placeholder(dtypes.float32),
shape=array_ops.placeholder(dtypes.int32),
seed=12345)
self.assertIs(None, rnd.get_shape().ndims)
rnd = random_ops.random_poisson(
lam=array_ops.placeholder(dtypes.float32),
shape=[50],
seed=12345)
self.assertIs(None, rnd.get_shape().ndims)
@test_util.run_deprecated_v1
def testDTypeCombinationsV2(self):
"""Tests random_poisson_v2() for all supported dtype combinations."""
with self.cached_session():
for lam_dt in _SUPPORTED_DTYPES:
for out_dt in _SUPPORTED_DTYPES:
random_ops.random_poisson(
constant_op.constant([1], dtype=lam_dt), [10],
dtype=out_dt).eval()
@test_util.run_deprecated_v1
def testInfRate(self):
sample = random_ops.random_poisson(shape=[2], lam=np.inf)
self.assertAllEqual([np.inf, np.inf], self.evaluate(sample))
def testSizeTooLarge(self):
with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
"overflow"):
rate = constant_op.constant(1.0, shape=(4, 4, 4, 4, 4))
self.evaluate(
random_ops.random_poisson(
shape=[46902, 51188, 34063, 59195], lam=rate))
if __name__ == "__main__":
test.main()
|
RandomPoissonTest
|
python
|
tensorflow__tensorflow
|
tensorflow/python/saved_model/nested_structure_coder.py
|
{
"start": 3687,
"end": 4778
}
|
class ____:
"""Codec for lists."""
def can_encode(self, pyobj):
return isinstance(pyobj, list)
def do_encode(self, list_value, encode_fn):
encoded_list = struct_pb2.StructuredValue()
encoded_list.list_value.CopyFrom(struct_pb2.ListValue())
for element in list_value:
encoded_list.list_value.values.add().CopyFrom(encode_fn(element))
return encoded_list
def can_decode(self, value):
return value.HasField("list_value")
def do_decode(self, value, decode_fn):
return [decode_fn(element) for element in value.list_value.values]
def _is_tuple(obj):
return not _is_named_tuple(obj) and isinstance(obj, tuple)
def _is_named_tuple(instance):
"""Returns True iff `instance` is a `namedtuple`.
Args:
instance: An instance of a Python object.
Returns:
True if `instance` is a `namedtuple`.
"""
if not isinstance(instance, tuple):
return False
return (hasattr(instance, "_fields") and
isinstance(instance._fields, collections_abc.Sequence) and
all(isinstance(f, str) for f in instance._fields))
|
_ListCodec
|
python
|
django__django
|
django/db/models/functions/text.py
|
{
"start": 5769,
"end": 6122
}
|
class ____(Transform):
"""Return the number of characters in the expression."""
function = "LENGTH"
lookup_name = "length"
output_field = IntegerField()
def as_mysql(self, compiler, connection, **extra_context):
return super().as_sql(
compiler, connection, function="CHAR_LENGTH", **extra_context
)
|
Length
|
python
|
getsentry__sentry
|
fixtures/safe_migrations_apps/good_flow_delete_field_pending_with_not_null_app/migrations/0003_delete.py
|
{
"start": 190,
"end": 525
}
|
class ____(CheckedMigration):
dependencies = [
("good_flow_delete_field_pending_with_not_null_app", "0002_remove_not_null_and_pending"),
]
operations = [
SafeRemoveField(
model_name="testtable",
name="field",
deletion_action=DeletionAction.DELETE,
),
]
|
Migration
|
python
|
great-expectations__great_expectations
|
great_expectations/core/util.py
|
{
"start": 5907,
"end": 8478
}
|
class ____:
"""
Parses an Azure Blob Storage URL into its separate components.
Formats:
WASBS (for Spark): "wasbs://<CONTAINER>@<ACCOUNT_NAME>.blob.core.windows.net/<BLOB>"
HTTP(S) (for Pandas) "<ACCOUNT_NAME>.blob.core.windows.net/<CONTAINER>/<BLOB>"
Reference: WASBS -- Windows Azure Storage Blob (https://datacadamia.com/azure/wasb).
"""
AZURE_BLOB_STORAGE_PROTOCOL_DETECTION_REGEX_PATTERN: str = (
r"^[^@]+@.+\.blob\.core\.windows\.net\/.+$"
)
AZURE_BLOB_STORAGE_HTTPS_URL_REGEX_PATTERN: str = (
r"^(https?:\/\/)?(.+?)\.blob\.core\.windows\.net/([^/]+)/(.+)$"
)
AZURE_BLOB_STORAGE_HTTPS_URL_TEMPLATE: str = (
"{account_name}.blob.core.windows.net/{container}/{path}"
)
AZURE_BLOB_STORAGE_WASBS_URL_REGEX_PATTERN: str = (
r"^(wasbs?:\/\/)?([^/]+)@(.+?)\.blob\.core\.windows\.net/(.+)$"
)
AZURE_BLOB_STORAGE_WASBS_URL_TEMPLATE: str = (
"wasbs://{container}@{account_name}.blob.core.windows.net/{path}"
)
def __init__(self, url: str) -> None:
search = re.search(AzureUrl.AZURE_BLOB_STORAGE_PROTOCOL_DETECTION_REGEX_PATTERN, url)
if search is None:
search = re.search(AzureUrl.AZURE_BLOB_STORAGE_HTTPS_URL_REGEX_PATTERN, url)
assert search is not None, (
"The provided URL does not adhere to the format specified by the "
"Azure SDK (<ACCOUNT_NAME>.blob.core.windows.net/<CONTAINER>/<BLOB>)"
)
self._protocol = search.group(1)
self._account_name = search.group(2)
self._container = search.group(3)
self._blob = search.group(4)
else:
search = re.search(AzureUrl.AZURE_BLOB_STORAGE_WASBS_URL_REGEX_PATTERN, url)
assert search is not None, (
"The provided URL does not adhere to the format specified by the Azure SDK (wasbs://<CONTAINER>@<ACCOUNT_NAME>.blob.core.windows.net/<BLOB>)"
)
self._protocol = search.group(1)
self._container = search.group(2)
self._account_name = search.group(3)
self._blob = search.group(4)
@property
def protocol(self):
return self._protocol
@property
def account_name(self):
return self._account_name
@property
def account_url(self):
return f"{self.account_name}.blob.core.windows.net"
@property
def container(self):
return self._container
@property
def blob(self):
return self._blob
|
AzureUrl
|
python
|
spack__spack
|
lib/spack/spack/variant.py
|
{
"start": 952,
"end": 1343
}
|
class ____(enum.IntEnum):
"""Enum representing the three concrete variant types."""
BOOL = 1
SINGLE = 2
MULTI = 3
@property
def string(self) -> str:
"""Convert the variant type to a string."""
if self == VariantType.BOOL:
return "bool"
elif self == VariantType.SINGLE:
return "single"
return "multi"
|
VariantType
|
python
|
oauthlib__oauthlib
|
oauthlib/oauth2/rfc6749/request_validator.py
|
{
"start": 144,
"end": 28848
}
|
class ____:
def client_authentication_required(self, request, *args, **kwargs):
"""Determine if client authentication is required for current request.
According to the rfc6749, client authentication is required in the following cases:
- Resource Owner Password Credentials Grant, when Client type is Confidential or when
Client was issued client credentials or whenever Client provided client
authentication, see `Section 4.3.2`_.
- Authorization Code Grant, when Client type is Confidential or when Client was issued
client credentials or whenever Client provided client authentication,
see `Section 4.1.3`_.
- Refresh Token Grant, when Client type is Confidential or when Client was issued
client credentials or whenever Client provided client authentication, see
`Section 6`_
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:rtype: True or False
Method is used by:
- Authorization Code Grant
- Resource Owner Password Credentials Grant
- Refresh Token Grant
.. _`Section 4.3.2`: https://tools.ietf.org/html/rfc6749#section-4.3.2
.. _`Section 4.1.3`: https://tools.ietf.org/html/rfc6749#section-4.1.3
.. _`Section 6`: https://tools.ietf.org/html/rfc6749#section-6
"""
return True
def authenticate_client(self, request, *args, **kwargs):
"""Authenticate client through means outside the OAuth 2 spec.
Means of authentication is negotiated beforehand and may for example
be `HTTP Basic Authentication Scheme`_ which utilizes the Authorization
header.
Headers may be accesses through request.headers and parameters found in
both body and query can be obtained by direct attribute access, i.e.
request.client_id for client_id in the URL query.
The authentication process is required to contain the identification of
the client (i.e. search the database based on the client_id). In case the
client doesn't exist based on the received client_id, this method has to
return False and the HTTP response created by the library will contain
'invalid_client' message.
After the client identification succeeds, this method needs to set the
client on the request, i.e. request.client = client. A client object's
class must contain the 'client_id' attribute and the 'client_id' must have
a value.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:rtype: True or False
Method is used by:
- Authorization Code Grant
- Resource Owner Password Credentials Grant (may be disabled)
- Client Credentials Grant
- Refresh Token Grant
.. _`HTTP Basic Authentication Scheme`: https://tools.ietf.org/html/rfc1945#section-11.1
"""
raise NotImplementedError('Subclasses must implement this method.')
def authenticate_client_id(self, client_id, request, *args, **kwargs):
"""Ensure client_id belong to a non-confidential client.
A non-confidential client is one that is not required to authenticate
through other means, such as using HTTP Basic.
Note, while not strictly necessary it can often be very convenient
to set request.client to the client object associated with the
given client_id.
:param client_id: Unicode client identifier.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:rtype: True or False
Method is used by:
- Authorization Code Grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def confirm_redirect_uri(self, client_id, code, redirect_uri, client, request,
*args, **kwargs):
"""Ensure that the authorization process represented by this authorization
code began with this 'redirect_uri'.
If the client specifies a redirect_uri when obtaining code then that
redirect URI must be bound to the code and verified equal in this
method, according to RFC 6749 section 4.1.3. Do not compare against
the client's allowed redirect URIs, but against the URI used when the
code was saved.
:param client_id: Unicode client identifier.
:param code: Unicode authorization_code.
:param redirect_uri: Unicode absolute URI.
:param client: Client object set by you, see ``.authenticate_client``.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:rtype: True or False
Method is used by:
- Authorization Code Grant (during token request)
"""
raise NotImplementedError('Subclasses must implement this method.')
def get_default_redirect_uri(self, client_id, request, *args, **kwargs):
"""Get the default redirect URI for the client.
:param client_id: Unicode client identifier.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:rtype: The default redirect URI for the client
Method is used by:
- Authorization Code Grant
- Implicit Grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def get_default_scopes(self, client_id, request, *args, **kwargs):
"""Get the default scopes for the client.
:param client_id: Unicode client identifier.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:rtype: List of default scopes
Method is used by all core grant types:
- Authorization Code Grant
- Implicit Grant
- Resource Owner Password Credentials Grant
- Client Credentials grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def get_original_scopes(self, refresh_token, request, *args, **kwargs):
"""Get the list of scopes associated with the refresh token.
:param refresh_token: Unicode refresh token.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:rtype: List of scopes.
Method is used by:
- Refresh token grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def is_within_original_scope(self, request_scopes, refresh_token, request, *args, **kwargs):
"""Check if requested scopes are within a scope of the refresh token.
When access tokens are refreshed the scope of the new token
needs to be within the scope of the original token. This is
ensured by checking that all requested scopes strings are on
the list returned by the get_original_scopes. If this check
fails, is_within_original_scope is called. The method can be
used in situations where returning all valid scopes from the
get_original_scopes is not practical.
:param request_scopes: A list of scopes that were requested by client.
:param refresh_token: Unicode refresh_token.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:rtype: True or False
Method is used by:
- Refresh token grant
"""
return False
def introspect_token(self, token, token_type_hint, request, *args, **kwargs):
"""Introspect an access or refresh token.
Called once the introspect request is validated. This method should
verify the *token* and either return a dictionary with the list of
claims associated, or `None` in case the token is unknown.
Below the list of registered claims you should be interested in:
- scope : space-separated list of scopes
- client_id : client identifier
- username : human-readable identifier for the resource owner
- token_type : type of the token
- exp : integer timestamp indicating when this token will expire
- iat : integer timestamp indicating when this token was issued
- nbf : integer timestamp indicating when it can be "not-before" used
- sub : subject of the token - identifier of the resource owner
- aud : list of string identifiers representing the intended audience
- iss : string representing issuer of this token
- jti : string identifier for the token
Note that most of them are coming directly from JWT RFC. More details
can be found in `Introspect Claims`_ or `JWT Claims`_.
The implementation can use *token_type_hint* to improve lookup
efficiency, but must fallback to other types to be compliant with RFC.
The dict of claims is added to request.token after this method.
:param token: The token string.
:param token_type_hint: access_token or refresh_token.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
Method is used by:
- Introspect Endpoint (all grants are compatible)
.. _`Introspect Claims`: https://tools.ietf.org/html/rfc7662#section-2.2
.. _`JWT Claims`: https://tools.ietf.org/html/rfc7519#section-4
"""
raise NotImplementedError('Subclasses must implement this method.')
def invalidate_authorization_code(self, client_id, code, request, *args, **kwargs):
"""Invalidate an authorization code after use.
:param client_id: Unicode client identifier.
:param code: The authorization code grant (request.code).
:param request: OAuthlib request.
:type request: oauthlib.common.Request
Method is used by:
- Authorization Code Grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def revoke_token(self, token, token_type_hint, request, *args, **kwargs):
"""Revoke an access or refresh token.
:param token: The token string.
:param token_type_hint: access_token or refresh_token.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
Method is used by:
- Revocation Endpoint
"""
raise NotImplementedError('Subclasses must implement this method.')
def rotate_refresh_token(self, request):
"""Determine whether to rotate the refresh token. Default, yes.
When access tokens are refreshed the old refresh token can be kept
or replaced with a new one (rotated). Return True to rotate and
and False for keeping original.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:rtype: True or False
Method is used by:
- Refresh Token Grant
"""
return True
def save_authorization_code(self, client_id, code, request, *args, **kwargs):
"""Persist the authorization_code.
The code should at minimum be stored with:
- the client_id (``client_id``)
- the redirect URI used (``request.redirect_uri``)
- a resource owner / user (``request.user``)
- the authorized scopes (``request.scopes``)
To support PKCE, you MUST associate the code with:
- Code Challenge (``request.code_challenge``) and
- Code Challenge Method (``request.code_challenge_method``)
To support OIDC, you MUST associate the code with:
- nonce, if present (``code["nonce"]``)
The ``code`` argument is actually a dictionary, containing at least a
``code`` key with the actual authorization code:
``{'code': 'sdf345jsdf0934f'}``
It may also have a ``claims`` parameter which, when present, will be a dict
deserialized from JSON as described at
http://openid.net/specs/openid-connect-core-1_0.html#ClaimsParameter
This value should be saved in this method and used again in ``.validate_code``.
:param client_id: Unicode client identifier.
:param code: A dict of the authorization code grant and, optionally, state.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
Method is used by:
- Authorization Code Grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def save_token(self, token, request, *args, **kwargs):
"""Persist the token with a token type specific method.
Currently, only save_bearer_token is supported.
:param token: A (Bearer) token dict.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
"""
return self.save_bearer_token(token, request, *args, **kwargs)
def save_bearer_token(self, token, request, *args, **kwargs):
"""Persist the Bearer token.
The Bearer token should at minimum be associated with:
- a client and it's client_id, if available
- a resource owner / user (request.user)
- authorized scopes (request.scopes)
- an expiration time
- a refresh token, if issued
- a claims document, if present in request.claims
The Bearer token dict may hold a number of items::
{
'token_type': 'Bearer',
'access_token': 'askfjh234as9sd8',
'expires_in': 3600,
'scope': 'string of space separated authorized scopes',
'refresh_token': '23sdf876234', # if issued
'state': 'given_by_client', # if supplied by client (implicit ONLY)
}
Note that while "scope" is a string-separated list of authorized scopes,
the original list is still available in request.scopes.
The token dict is passed as a reference so any changes made to the dictionary
will go back to the user. If additional information must return to the client
user, and it is only possible to get this information after writing the token
to storage, it should be added to the token dictionary. If the token
dictionary must be modified but the changes should not go back to the user,
a copy of the dictionary must be made before making the changes.
Also note that if an Authorization Code grant request included a valid claims
parameter (for OpenID Connect) then the request.claims property will contain
the claims dict, which should be saved for later use when generating the
id_token and/or UserInfo response content.
:param token: A Bearer token dict.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:rtype: The default redirect URI for the client
Method is used by all core grant types issuing Bearer tokens:
- Authorization Code Grant
- Implicit Grant
- Resource Owner Password Credentials Grant (might not associate a client)
- Client Credentials grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def validate_bearer_token(self, token, scopes, request):
"""Ensure the Bearer token is valid and authorized access to scopes.
:param token: A string of random characters.
:param scopes: A list of scopes associated with the protected resource.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
A key to OAuth 2 security and restricting impact of leaked tokens is
the short expiration time of tokens, *always ensure the token has not
expired!*.
Two different approaches to scope validation:
1) all(scopes). The token must be authorized access to all scopes
associated with the resource. For example, the
token has access to ``read-only`` and ``images``,
thus the client can view images but not upload new.
Allows for fine grained access control through
combining various scopes.
2) any(scopes). The token must be authorized access to one of the
scopes associated with the resource. For example,
token has access to ``read-only-images``.
Allows for fine grained, although arguably less
convenient, access control.
A powerful way to use scopes would mimic UNIX ACLs and see a scope
as a group with certain privileges. For a restful API these might
map to HTTP verbs instead of read, write and execute.
Note, the request.user attribute can be set to the resource owner
associated with this token. Similarly the request.client and
request.scopes attribute can be set to associated client object
and authorized scopes. If you then use a decorator such as the
one provided for django these attributes will be made available
in all protected views as keyword arguments.
:param token: Unicode Bearer token
:param scopes: List of scopes (defined by you)
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:rtype: True or False
Method is indirectly used by all core Bearer token issuing grant types:
- Authorization Code Grant
- Implicit Grant
- Resource Owner Password Credentials Grant
- Client Credentials Grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def validate_client_id(self, client_id, request, *args, **kwargs):
"""Ensure client_id belong to a valid and active client.
Note, while not strictly necessary it can often be very convenient
to set request.client to the client object associated with the
given client_id.
:param client_id: Unicode client identifier.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:rtype: True or False
Method is used by:
- Authorization Code Grant
- Implicit Grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def validate_code(self, client_id, code, client, request, *args, **kwargs):
"""Verify that the authorization_code is valid and assigned to the given
client.
Before returning true, set the following based on the information stored
with the code in 'save_authorization_code':
- request.user
- request.scopes
- request.claims (if given)
OBS! The request.user attribute should be set to the resource owner
associated with this authorization code. Similarly request.scopes
must also be set.
The request.claims property, if it was given, should assigned a dict.
If PKCE is enabled (see 'is_pkce_required' and 'save_authorization_code')
you MUST set the following based on the information stored:
- request.code_challenge
- request.code_challenge_method
:param client_id: Unicode client identifier.
:param code: Unicode authorization code.
:param client: Client object set by you, see ``.authenticate_client``.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:rtype: True or False
Method is used by:
- Authorization Code Grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def validate_grant_type(self, client_id, grant_type, client, request, *args, **kwargs):
"""Ensure client is authorized to use the grant_type requested.
:param client_id: Unicode client identifier.
:param grant_type: Unicode grant type, i.e. authorization_code, password.
:param client: Client object set by you, see ``.authenticate_client``.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:rtype: True or False
Method is used by:
- Authorization Code Grant
- Resource Owner Password Credentials Grant
- Client Credentials Grant
- Refresh Token Grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def validate_redirect_uri(self, client_id, redirect_uri, request, *args, **kwargs):
"""Ensure client is authorized to redirect to the redirect_uri requested.
All clients should register the absolute URIs of all URIs they intend
to redirect to. The registration is outside of the scope of oauthlib.
:param client_id: Unicode client identifier.
:param redirect_uri: Unicode absolute URI.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:rtype: True or False
Method is used by:
- Authorization Code Grant
- Implicit Grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def validate_refresh_token(self, refresh_token, client, request, *args, **kwargs):
"""Ensure the Bearer token is valid and authorized access to scopes.
OBS! The request.user attribute should be set to the resource owner
associated with this refresh token.
:param refresh_token: Unicode refresh token.
:param client: Client object set by you, see ``.authenticate_client``.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:rtype: True or False
Method is used by:
- Authorization Code Grant (indirectly by issuing refresh tokens)
- Resource Owner Password Credentials Grant (also indirectly)
- Refresh Token Grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def validate_response_type(self, client_id, response_type, client, request, *args, **kwargs):
"""Ensure client is authorized to use the response_type requested.
:param client_id: Unicode client identifier.
:param response_type: Unicode response type, i.e. code, token.
:param client: Client object set by you, see ``.authenticate_client``.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:rtype: True or False
Method is used by:
- Authorization Code Grant
- Implicit Grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def validate_scopes(self, client_id, scopes, client, request, *args, **kwargs):
"""Ensure the client is authorized access to requested scopes.
:param client_id: Unicode client identifier.
:param scopes: List of scopes (defined by you).
:param client: Client object set by you, see ``.authenticate_client``.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:rtype: True or False
Method is used by all core grant types:
- Authorization Code Grant
- Implicit Grant
- Resource Owner Password Credentials Grant
- Client Credentials Grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def validate_user(self, username, password, client, request, *args, **kwargs):
"""Ensure the username and password is valid.
OBS! The validation should also set the user attribute of the request
to a valid resource owner, i.e. request.user = username or similar. If
not set you will be unable to associate a token with a user in the
persistence method used (commonly, save_bearer_token).
:param username: Unicode username.
:param password: Unicode password.
:param client: Client object set by you, see ``.authenticate_client``.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:rtype: True or False
Method is used by:
- Resource Owner Password Credentials Grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def is_pkce_required(self, client_id, request):
"""Determine if current request requires PKCE. Default, False.
This is called for both "authorization" and "token" requests.
Override this method by ``return True`` to enable PKCE for everyone.
You might want to enable it only for public clients.
Note that PKCE can also be used in addition of a client authentication.
OAuth 2.0 public clients utilizing the Authorization Code Grant are
susceptible to the authorization code interception attack. This
specification describes the attack as well as a technique to mitigate
against the threat through the use of Proof Key for Code Exchange
(PKCE, pronounced "pixy"). See `RFC7636`_.
:param client_id: Client identifier.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:rtype: True or False
Method is used by:
- Authorization Code Grant
.. _`RFC7636`: https://tools.ietf.org/html/rfc7636
"""
return False
def get_code_challenge(self, code, request):
"""Is called for every "token" requests.
When the server issues the authorization code in the authorization
response, it MUST associate the ``code_challenge`` and
``code_challenge_method`` values with the authorization code so it can
be verified later.
Typically, the ``code_challenge`` and ``code_challenge_method`` values
are stored in encrypted form in the ``code`` itself but could
alternatively be stored on the server associated with the code. The
server MUST NOT include the ``code_challenge`` value in client requests
in a form that other entities can extract.
Return the ``code_challenge`` associated to the code.
If ``None`` is returned, code is considered to not be associated to any
challenges.
:param code: Authorization code.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:rtype: code_challenge string
Method is used by:
- Authorization Code Grant - when PKCE is active
"""
return None
def get_code_challenge_method(self, code, request):
"""Is called during the "token" request processing, when a
``code_verifier`` and a ``code_challenge`` has been provided.
See ``.get_code_challenge``.
Must return ``plain`` or ``S256``. You can return a custom value if you have
implemented your own ``AuthorizationCodeGrant`` class.
:param code: Authorization code.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:rtype: code_challenge_method string
Method is used by:
- Authorization Code Grant - when PKCE is active
"""
raise NotImplementedError('Subclasses must implement this method.')
def is_origin_allowed(self, client_id, origin, request, *args, **kwargs):
"""Indicate if the given origin is allowed to access the token endpoint
via Cross-Origin Resource Sharing (CORS). CORS is used by browser-based
clients, such as Single-Page Applications, to perform the Authorization
Code Grant.
(Note: If performing Authorization Code Grant via a public client such
as a browser, you should use PKCE as well.)
If this method returns true, the appropriate CORS headers will be added
to the response. By default this method always returns False, meaning
CORS is disabled.
:param client_id: Unicode client identifier.
:param redirect_uri: Unicode origin.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:rtype: bool
Method is used by:
- Authorization Code Grant
- Refresh Token Grant
"""
return False
|
RequestValidator
|
python
|
fluentpython__example-code
|
13-op-overloading/vector_v7.py
|
{
"start": 6246,
"end": 9539
}
|
class ____:
typecode = 'd'
def __init__(self, components):
self._components = array(self.typecode, components)
def __iter__(self):
return iter(self._components)
def __repr__(self):
components = reprlib.repr(self._components)
components = components[components.find('['):-1]
return 'Vector({})'.format(components)
def __str__(self):
return str(tuple(self))
def __bytes__(self):
return (bytes([ord(self.typecode)]) +
bytes(self._components))
def __eq__(self, other):
return (len(self) == len(other) and
all(a == b for a, b in zip(self, other)))
def __hash__(self):
hashes = (hash(x) for x in self)
return functools.reduce(operator.xor, hashes, 0)
def __abs__(self):
return math.sqrt(sum(x * x for x in self))
def __neg__(self):
return Vector(-x for x in self)
def __pos__(self):
return Vector(self)
def __bool__(self):
return bool(abs(self))
def __len__(self):
return len(self._components)
def __getitem__(self, index):
cls = type(self)
if isinstance(index, slice):
return cls(self._components[index])
elif isinstance(index, numbers.Integral):
return self._components[index]
else:
msg = '{.__name__} indices must be integers'
raise TypeError(msg.format(cls))
shortcut_names = 'xyzt'
def __getattr__(self, name):
cls = type(self)
if len(name) == 1:
pos = cls.shortcut_names.find(name)
if 0 <= pos < len(self._components):
return self._components[pos]
msg = '{.__name__!r} object has no attribute {!r}'
raise AttributeError(msg.format(cls, name))
def angle(self, n):
r = math.sqrt(sum(x * x for x in self[n:]))
a = math.atan2(r, self[n-1])
if (n == len(self) - 1) and (self[-1] < 0):
return math.pi * 2 - a
else:
return a
def angles(self):
return (self.angle(n) for n in range(1, len(self)))
def __format__(self, fmt_spec=''):
if fmt_spec.endswith('h'): # hyperspherical coordinates
fmt_spec = fmt_spec[:-1]
coords = itertools.chain([abs(self)],
self.angles())
outer_fmt = '<{}>'
else:
coords = self
outer_fmt = '({})'
components = (format(c, fmt_spec) for c in coords)
return outer_fmt.format(', '.join(components))
@classmethod
def frombytes(cls, octets):
typecode = chr(octets[0])
memv = memoryview(octets[1:]).cast(typecode)
return cls(memv)
def __add__(self, other):
try:
pairs = itertools.zip_longest(self, other, fillvalue=0.0)
return Vector(a + b for a, b in pairs)
except TypeError:
return NotImplemented
def __radd__(self, other):
return self + other
def __mul__(self, scalar):
if isinstance(scalar, numbers.Real):
return Vector(n * scalar for n in self)
else:
return NotImplemented
def __rmul__(self, scalar):
return self * scalar
|
Vector
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-kyriba/source_kyriba/source.py
|
{
"start": 7434,
"end": 8539
}
|
class ____(AccountSubStream):
def stream_slices(
self, cursor_field: List[str] = None, stream_state: Mapping[str, Any] = None, **kwargs
) -> Iterable[Optional[Mapping[str, Any]]]:
slices = []
account_uuids = self.get_account_uuids()
# bank balances require the date to be specified
bal_date = self.start_date
end_date = self.end_date or date.today()
while bal_date <= end_date:
slices.extend([{**u, "date": bal_date.isoformat()} for u in account_uuids])
bal_date = bal_date + timedelta(days=1)
return slices
def path(self, stream_slice: Mapping[str, Any], **kwargs) -> str:
account_uuid = stream_slice["account_uuid"]
return f"bank-balances/accounts/{account_uuid}/balances"
def request_params(
self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, any] = None, next_page_token: Mapping[str, Any] = None
) -> MutableMapping[str, Any]:
return {
"date": stream_slice["date"],
"type": self.balance_type,
}
|
BankBalancesStream
|
python
|
ethereum__web3.py
|
web3/_utils/module_testing/go_ethereum_admin_module.py
|
{
"start": 1958,
"end": 3435
}
|
class ____:
@pytest.mark.asyncio
async def test_async_datadir(self, async_w3: "AsyncWeb3[Any]") -> None:
datadir = await async_w3.geth.admin.datadir()
assert isinstance(datadir, str)
@pytest.mark.asyncio
async def test_async_node_info(self, async_w3: "AsyncWeb3[Any]") -> None:
node_info = await async_w3.geth.admin.node_info()
assert "Geth" in node_info["name"]
@pytest.mark.asyncio
async def test_async_nodes(self, async_w3: "AsyncWeb3[Any]") -> None:
nodes = await async_w3.geth.admin.peers()
assert isinstance(nodes, list)
@pytest.mark.asyncio
async def test_admin_peers(self, async_w3: "AsyncWeb3[Any]") -> None:
node_info = await async_w3.geth.admin.node_info()
await async_w3.geth.admin.add_peer(node_info["enode"])
result = await async_w3.geth.admin.peers()
assert len(result) == 1
@pytest.mark.asyncio
async def test_admin_start_stop_http(self, async_w3: "AsyncWeb3[Any]") -> None:
stop = await async_w3.geth.admin.stop_http()
assert stop is True
start = await async_w3.geth.admin.start_http()
assert start is True
@pytest.mark.asyncio
async def test_admin_start_stop_ws(self, async_w3: "AsyncWeb3[Any]") -> None:
stop = await async_w3.geth.admin.stop_ws()
assert stop is True
start = await async_w3.geth.admin.start_ws()
assert start is True
|
GoEthereumAsyncAdminModuleTest
|
python
|
pytorch__pytorch
|
torch/onnx/_internal/fx/passes/type_promotion.py
|
{
"start": 42274,
"end": 45895
}
|
class ____:
"""Hackly distilling info from reference ops decorated with elementwise type promotion rule.
The goal is to retrieve the decorator
```python
@elementwise_type_promotion_wrapper(
type_promoting_args=("a", "b"),
type_promotion_kind=type_promotion_kind,
)
```
from the reference ops. It provides info as for which arguments are promoted
and what kind of promotion is applied.
"""
@classmethod
def generate_from_torch_refs(cls) -> set[ElementwiseTypePromotionRule]:
"""Parse type promotion rules from reference ops under torch._C._refs."""
rule_set = set()
rule_set.update(cls._parse_torch_refs(_refs))
rule_set.update(cls._parse_torch_refs(_nn_refs))
rule_set.update(cls._parse_torch_refs(_linalg_refs))
rule_set.update(cls._parse_torch_refs(_special_refs))
rule_set.update(cls._parse_torch_refs(_functional_refs))
return rule_set
@classmethod
def _parse_torch_refs(
cls, ref_module: ModuleType
) -> set[ElementwiseTypePromotionRule]:
logger.info("Processing module: %s", ref_module.__name__)
rule_set = set()
for name in ref_module.__all__:
decorated_op = getattr(ref_module, name)
rule = cls._parse_type_promotion_rule_from_refs_op(decorated_op)
if rule is not None and rule.is_valid():
rule_set.add(rule)
return rule_set
@classmethod
def _parse_type_promotion_rule_from_refs_op(
cls,
decorated_op: Callable,
) -> ElementwiseTypePromotionRule | None:
"""Retrieve and parse type promotion decorator from op under torch._refs."""
fn = decorated_op
type_promo_wrapper = None
while fn_closure_vars := _try_getclosurevars(fn):
if "fn" not in fn_closure_vars.nonlocals:
break
if "self" in fn_closure_vars.nonlocals and isinstance(
fn_closure_vars.nonlocals["self"],
_prims_common_wrappers.elementwise_type_promotion_wrapper,
):
type_promo_wrapper = fn_closure_vars.nonlocals["self"]
break
fn = fn_closure_vars.nonlocals["fn"]
if type_promo_wrapper is not None:
signature = inspect.signature(decorated_op)
pos = 0
promote_args_positions = []
promote_kwargs_names = []
if type_promo_wrapper.type_promoting_arg_names is not None:
for name, param in signature.parameters.items():
if name in type_promo_wrapper.type_promoting_arg_names:
if param.kind in (
param.POSITIONAL_OR_KEYWORD,
param.POSITIONAL_ONLY,
):
promote_args_positions.append(pos)
elif param.kind == param.KEYWORD_ONLY:
promote_kwargs_names.append(name)
pos += 1
return ElementwiseTypePromotionRule(
"aten",
decorated_op.__name__,
promote_args_positions=promote_args_positions,
promote_kwargs_names=promote_kwargs_names,
promotion_kind=type_promo_wrapper.type_promotion_kind,
)
logger.warning(
"Cannot find type promotion rule for: %s.%s",
decorated_op.__module__,
decorated_op.__name__,
)
return None
|
ElementwiseTypePromotionRuleSetGenerator
|
python
|
walkccc__LeetCode
|
solutions/2548. Maximum Price to Fill a Bag/2548.py
|
{
"start": 0,
"end": 370
}
|
class ____:
def maxPrice(self, items: list[list[int]], capacity: int) -> float:
ans = 0
# Sort items based on price//weight.
for price, weight in sorted(items, key=lambda x: -x[0] / x[1]):
# The bag is filled.
if capacity <= weight:
return ans + price * capacity / weight
ans += price
capacity -= weight
return -1
|
Solution
|
python
|
wandb__wandb
|
wandb/vendor/pygments/lexers/javascript.py
|
{
"start": 17034,
"end": 21444
}
|
class ____(RegexLexer):
"""
For `TypeScript <http://typescriptlang.org/>`_ source code.
.. versionadded:: 1.6
"""
name = 'TypeScript'
aliases = ['ts', 'typescript']
filenames = ['*.ts', '*.tsx']
mimetypes = ['text/x-typescript']
flags = re.DOTALL | re.MULTILINE
tokens = {
'commentsandwhitespace': [
(r'\s+', Text),
(r'<!--', Comment),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline)
],
'slashstartsregex': [
include('commentsandwhitespace'),
(r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
r'([gim]+\b|\B)', String.Regex, '#pop'),
(r'(?=/)', Text, ('#pop', 'badregex')),
default('#pop')
],
'badregex': [
(r'\n', Text, '#pop')
],
'root': [
(r'^(?=\s|/|<!--)', Text, 'slashstartsregex'),
include('commentsandwhitespace'),
(r'\+\+|--|~|&&|\?|:|\|\||\\(?=\n)|'
r'(<<|>>>?|==?|!=?|[-<>+*%&|^/])=?', Operator, 'slashstartsregex'),
(r'[{(\[;,]', Punctuation, 'slashstartsregex'),
(r'[})\].]', Punctuation),
(r'(for|in|while|do|break|return|continue|switch|case|default|if|else|'
r'throw|try|catch|finally|new|delete|typeof|instanceof|void|'
r'this)\b', Keyword, 'slashstartsregex'),
(r'(var|let|with|function)\b', Keyword.Declaration, 'slashstartsregex'),
(r'(abstract|boolean|byte|char|class|const|debugger|double|enum|export|'
r'extends|final|float|goto|implements|import|int|interface|long|native|'
r'package|private|protected|public|short|static|super|synchronized|throws|'
r'transient|volatile)\b', Keyword.Reserved),
(r'(true|false|null|NaN|Infinity|undefined)\b', Keyword.Constant),
(r'(Array|Boolean|Date|Error|Function|Math|netscape|'
r'Number|Object|Packages|RegExp|String|sun|decodeURI|'
r'decodeURIComponent|encodeURI|encodeURIComponent|'
r'Error|eval|isFinite|isNaN|parseFloat|parseInt|document|this|'
r'window)\b', Name.Builtin),
# Match stuff like: module name {...}
(r'\b(module)(\s*)(\s*[\w?.$][\w?.$]*)(\s*)',
bygroups(Keyword.Reserved, Text, Name.Other, Text), 'slashstartsregex'),
# Match variable type keywords
(r'\b(string|bool|number)\b', Keyword.Type),
# Match stuff like: constructor
(r'\b(constructor|declare|interface|as|AS)\b', Keyword.Reserved),
# Match stuff like: super(argument, list)
(r'(super)(\s*)(\([\w,?.$\s]+\s*\))',
bygroups(Keyword.Reserved, Text), 'slashstartsregex'),
# Match stuff like: function() {...}
(r'([a-zA-Z_?.$][\w?.$]*)\(\) \{', Name.Other, 'slashstartsregex'),
# Match stuff like: (function: return type)
(r'([\w?.$][\w?.$]*)(\s*:\s*)([\w?.$][\w?.$]*)',
bygroups(Name.Other, Text, Keyword.Type)),
(r'[$a-zA-Z_]\w*', Name.Other),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
(r'`', String.Backtick, 'interp'),
# Match stuff like: Decorators
(r'@\w+', Keyword.Declaration),
],
# The 'interp*' rules match those in JavascriptLexer. Changes made
# there should be reflected here as well.
'interp': [
(r'`', String.Backtick, '#pop'),
(r'\\\\', String.Backtick),
(r'\\`', String.Backtick),
(r'\$\{', String.Interpol, 'interp-inside'),
(r'\$', String.Backtick),
(r'[^`\\$]+', String.Backtick),
],
'interp-inside': [
# TODO: should this include single-line comments and allow nesting strings?
(r'\}', String.Interpol, '#pop'),
include('root'),
],
}
def analyse_text(text):
if re.search('^(import.+(from\s+)?["\']|'
'(export\s*)?(interface|class|function)\s+)',
text, re.MULTILINE):
return 1.0
|
TypeScriptLexer
|
python
|
ansible__ansible
|
test/lib/ansible_test/_internal/util_common.py
|
{
"start": 4126,
"end": 17814
}
|
class ____:
"""Configuration common to all commands."""
def __init__(self, args: t.Any, command: str) -> None:
self.command = command
self.interactive = False
self.check_layout = True
self.success: t.Optional[bool] = None
self.color: bool = args.color
self.explain: bool = args.explain
self.verbosity: int = args.verbosity
self.debug: bool = args.debug
self.truncate: int = args.truncate
self.redact: bool = args.redact
self.display_traceback: str = args.display_traceback
self.display_stderr: bool = False
self.session_name = generate_name()
self.cache: dict[str, t.Any] = {}
def get_ansible_config(self) -> str:
"""Return the path to the Ansible config for the given config."""
return os.path.join(ANSIBLE_TEST_DATA_ROOT, 'ansible.cfg')
def get_docs_url(url: str) -> str:
"""
Return the given docs.ansible.com URL updated to match the running ansible-test version, if it is not a pre-release version.
The URL should be in the form: https://docs.ansible.com/ansible/devel/path/to/doc.html
Where 'devel' will be replaced with the current version, unless it is a pre-release version.
When run under a pre-release version, the URL will remain unchanged.
This serves to provide a fallback URL for pre-release versions.
It also makes searching the source for docs links easier, since a full URL is provided to this function.
"""
url_prefix = 'https://docs.ansible.com/ansible-core/devel/'
if not url.startswith(url_prefix):
raise ValueError(f'URL "{url}" does not start with: {url_prefix}')
ansible_version = get_ansible_version()
if re.search(r'^[0-9.]+$', ansible_version):
url_version = '.'.join(ansible_version.split('.')[:2])
new_prefix = f'https://docs.ansible.com/ansible-core/{url_version}/'
url = url.replace(url_prefix, new_prefix)
return url
def create_result_directories(args: CommonConfig) -> None:
"""Create result directories."""
if args.explain:
return
make_dirs(ResultType.COVERAGE.path)
make_dirs(ResultType.DATA.path)
def handle_layout_messages(messages: t.Optional[LayoutMessages]) -> None:
"""Display the given layout messages."""
if not messages:
return
for message in messages.info:
display.info(message, verbosity=1)
for message in messages.warning:
display.warning(message)
if messages.error:
raise ApplicationError('\n'.join(messages.error))
def process_scoped_temporary_file(args: CommonConfig, prefix: t.Optional[str] = 'ansible-test-', suffix: t.Optional[str] = None) -> str:
"""Return the path to a temporary file that will be automatically removed when the process exits."""
if args.explain:
path = os.path.join(tempfile.gettempdir(), f'{prefix or tempfile.gettempprefix()}{generate_name()}{suffix or ""}')
else:
temp_fd, path = tempfile.mkstemp(prefix=prefix, suffix=suffix)
os.close(temp_fd)
ExitHandler.register(lambda: os.remove(path))
return path
def process_scoped_temporary_directory(args: CommonConfig, prefix: t.Optional[str] = 'ansible-test-', suffix: t.Optional[str] = None) -> str:
"""Return the path to a temporary directory that will be automatically removed when the process exits."""
if args.explain:
path = os.path.join(tempfile.gettempdir(), f'{prefix or tempfile.gettempprefix()}{generate_name()}{suffix or ""}')
else:
path = tempfile.mkdtemp(prefix=prefix, suffix=suffix)
ExitHandler.register(lambda: remove_tree(path))
return path
@contextlib.contextmanager
def named_temporary_file(args: CommonConfig, prefix: str, suffix: str, directory: t.Optional[str], content: str) -> c.Iterator[str]:
"""Context manager for a named temporary file."""
if args.explain:
yield os.path.join(directory or '/tmp', '%stemp%s' % (prefix, suffix))
else:
with tempfile.NamedTemporaryFile(prefix=prefix, suffix=suffix, dir=directory) as tempfile_fd:
tempfile_fd.write(to_bytes(content))
tempfile_fd.flush()
try:
yield tempfile_fd.name
finally:
pass
def write_json_test_results(
category: ResultType,
name: str,
content: t.Union[list[t.Any], dict[str, t.Any]],
formatted: bool = True,
encoder: t.Optional[t.Type[json.JSONEncoder]] = None,
) -> None:
"""Write the given json content to the specified test results path, creating directories as needed."""
path = os.path.join(category.path, name)
write_json_file(path, content, create_directories=True, formatted=formatted, encoder=encoder)
def write_text_test_results(category: ResultType, name: str, content: str) -> None:
"""Write the given text content to the specified test results path, creating directories as needed."""
path = os.path.join(category.path, name)
write_text_file(path, content, create_directories=True)
@cache
def get_injector_path() -> str:
"""Return the path to a directory which contains a `python.py` executable and associated injector scripts."""
injector_path = tempfile.mkdtemp(prefix='ansible-test-', suffix='-injector', dir='/tmp')
display.info(f'Initializing "{injector_path}" as the temporary injector directory.', verbosity=1)
injector_names = sorted(list(ANSIBLE_BIN_SYMLINK_MAP) + [
'importer.py',
'pytest',
'ansible_connection_cli_stub.py',
])
scripts = (
('python.py', '/usr/bin/env python', MODE_FILE_EXECUTE),
('virtualenv.sh', '/usr/bin/env bash', MODE_FILE),
)
source_path = os.path.join(ANSIBLE_TEST_TARGET_ROOT, 'injector')
for name in injector_names:
os.symlink('python.py', os.path.join(injector_path, name))
for name, shebang, mode in scripts:
src = os.path.join(source_path, name)
dst = os.path.join(injector_path, name)
script = read_text_file(src)
script = set_shebang(script, shebang)
write_text_file(dst, script)
verified_chmod(dst, mode)
verified_chmod(injector_path, MODE_DIRECTORY)
def cleanup_injector() -> None:
"""Remove the temporary injector directory."""
remove_tree(injector_path)
ExitHandler.register(cleanup_injector)
return injector_path
def set_shebang(script: str, executable: str) -> str:
"""Return the given script with the specified executable used for the shebang."""
prefix = '#!'
shebang = prefix + executable
overwrite = (
prefix,
'# auto-shebang',
'# shellcheck shell=',
)
lines = script.splitlines()
if any(lines[0].startswith(value) for value in overwrite):
lines[0] = shebang
else:
lines.insert(0, shebang)
script = '\n'.join(lines)
return script
def get_python_path(interpreter: str) -> str:
"""Return the path to a directory which contains a `python` executable that runs the specified interpreter."""
python_path = PYTHON_PATHS.get(interpreter)
if python_path:
return python_path
prefix = 'python-'
suffix = '-ansible'
root_temp_dir = '/tmp'
python_path = tempfile.mkdtemp(prefix=prefix, suffix=suffix, dir=root_temp_dir)
injected_interpreter = os.path.join(python_path, 'python')
# A symlink is faster than the execv wrapper, but isn't guaranteed to provide the correct result.
# There are several scenarios known not to work with symlinks:
#
# - A virtual environment where the target is a symlink to another directory.
# - A pyenv environment where the target is a shell script that changes behavior based on the program name.
#
# To avoid issues for these and other scenarios, only an exec wrapper is used.
display.info('Injecting "%s" as a execv wrapper for the "%s" interpreter.' % (injected_interpreter, interpreter), verbosity=1)
create_interpreter_wrapper(interpreter, injected_interpreter)
verified_chmod(python_path, MODE_DIRECTORY)
if not PYTHON_PATHS:
ExitHandler.register(cleanup_python_paths)
PYTHON_PATHS[interpreter] = python_path
return python_path
def create_temp_dir(prefix: t.Optional[str] = None, suffix: t.Optional[str] = None, base_dir: t.Optional[str] = None) -> str:
"""Create a temporary directory that persists until the current process exits."""
temp_path = tempfile.mkdtemp(prefix=prefix or 'tmp', suffix=suffix or '', dir=base_dir)
ExitHandler.register(remove_tree, temp_path)
return temp_path
def create_interpreter_wrapper(interpreter: str, injected_interpreter: str) -> None:
"""Create a wrapper for the given Python interpreter at the specified path."""
# sys.executable is used for the shebang to guarantee it is a binary instead of a script
# injected_interpreter could be a script from the system or our own wrapper created for the --venv option
shebang_interpreter = sys.executable
code = textwrap.dedent("""
#!%s
from __future__ import annotations
from os import execv
from sys import argv
python = '%s'
execv(python, [python] + argv[1:])
""" % (shebang_interpreter, interpreter)).lstrip()
write_text_file(injected_interpreter, code)
verified_chmod(injected_interpreter, MODE_FILE_EXECUTE)
def cleanup_python_paths() -> None:
"""Clean up all temporary python directories."""
for path in sorted(PYTHON_PATHS.values()):
display.info('Cleaning up temporary python directory: %s' % path, verbosity=2)
remove_tree(path)
def intercept_python(
args: CommonConfig,
python: PythonConfig,
cmd: list[str],
env: dict[str, str],
capture: bool,
data: t.Optional[str] = None,
cwd: t.Optional[str] = None,
always: bool = False,
) -> tuple[t.Optional[str], t.Optional[str]]:
"""
Run a command while intercepting invocations of Python to control the version used.
If the specified Python is an ansible-test managed virtual environment, it will be added to PATH to activate it.
Otherwise, a temporary directory will be created to ensure the correct Python can be found in PATH.
"""
cmd = list(cmd)
env = get_injector_env(python, env)
return run_command(args, cmd, capture=capture, env=env, data=data, cwd=cwd, always=always)
def get_injector_env(
python: PythonConfig,
env: dict[str, str],
) -> dict[str, str]:
"""Get the environment variables needed to inject the given Python interpreter into the environment."""
env = env.copy()
inject_path = get_injector_path()
# make sure scripts (including injector.py) find the correct Python interpreter
if isinstance(python, VirtualPythonConfig):
python_path = os.path.dirname(python.path)
else:
python_path = get_python_path(python.path)
env['PATH'] = os.path.pathsep.join([inject_path, python_path, env['PATH']])
env['ANSIBLE_TEST_PYTHON_VERSION'] = python.version
env['ANSIBLE_TEST_PYTHON_INTERPRETER'] = python.path
return env
def run_command(
args: CommonConfig,
cmd: c.Iterable[str],
capture: bool,
env: t.Optional[dict[str, str]] = None,
data: t.Optional[str] = None,
cwd: t.Optional[str] = None,
always: bool = False,
stdin: t.Optional[t.IO[bytes]] = None,
stdout: t.Optional[t.IO[bytes]] = None,
interactive: bool = False,
output_stream: t.Optional[OutputStream] = None,
cmd_verbosity: int = 1,
str_errors: str = 'strict',
error_callback: t.Optional[c.Callable[[SubprocessError], None]] = None,
) -> tuple[t.Optional[str], t.Optional[str]]:
"""Run the specified command and return stdout and stderr as a tuple."""
explain = args.explain and not always
return raw_command(
cmd,
capture=capture,
env=env,
data=data,
cwd=cwd,
explain=explain,
stdin=stdin,
stdout=stdout,
interactive=interactive,
output_stream=output_stream,
cmd_verbosity=cmd_verbosity,
str_errors=str_errors,
error_callback=error_callback,
)
def yamlcheck(python: PythonConfig, explain: bool = False) -> t.Optional[bool]:
"""Return True if PyYAML has libyaml support, False if it does not and None if it was not found."""
stdout = raw_command([python.path, os.path.join(ANSIBLE_TEST_TARGET_TOOLS_ROOT, 'yamlcheck.py')], capture=True, explain=explain)[0]
if explain:
return None
result = json.loads(stdout)
if not result['yaml']:
return None
return result['cloader']
def check_pyyaml(python: PythonConfig, required: bool = True, quiet: bool = False) -> t.Optional[bool]:
"""
Return True if PyYAML has libyaml support, False if it does not and None if it was not found.
The result is cached if True or required.
"""
try:
return CHECK_YAML_VERSIONS[python.path]
except KeyError:
pass
state = yamlcheck(python)
if state is not None or required:
# results are cached only if pyyaml is required or present
# it is assumed that tests will not uninstall/re-install pyyaml -- if they do, those changes will go undetected
CHECK_YAML_VERSIONS[python.path] = state
if not quiet:
if state is None:
if required:
display.warning('PyYAML is not installed for interpreter: %s' % python.path)
elif not state:
display.warning('PyYAML will be slow due to installation without libyaml support for interpreter: %s' % python.path)
return state
|
CommonConfig
|
python
|
huggingface__transformers
|
examples/pytorch/object-detection/run_object_detection.py
|
{
"start": 2065,
"end": 8970
}
|
class ____:
logits: torch.Tensor
pred_boxes: torch.Tensor
def format_image_annotations_as_coco(
image_id: str, categories: list[int], areas: list[float], bboxes: list[tuple[float]]
) -> dict:
"""Format one set of image annotations to the COCO format
Args:
image_id (str): image id. e.g. "0001"
categories (list[int]): list of categories/class labels corresponding to provided bounding boxes
areas (list[float]): list of corresponding areas to provided bounding boxes
bboxes (list[tuple[float]]): list of bounding boxes provided in COCO format
([center_x, center_y, width, height] in absolute coordinates)
Returns:
dict: {
"image_id": image id,
"annotations": list of formatted annotations
}
"""
annotations = []
for category, area, bbox in zip(categories, areas, bboxes):
formatted_annotation = {
"image_id": image_id,
"category_id": category,
"iscrowd": 0,
"area": area,
"bbox": list(bbox),
}
annotations.append(formatted_annotation)
return {
"image_id": image_id,
"annotations": annotations,
}
def convert_bbox_yolo_to_pascal(boxes: torch.Tensor, image_size: tuple[int, int]) -> torch.Tensor:
"""
Convert bounding boxes from YOLO format (x_center, y_center, width, height) in range [0, 1]
to Pascal VOC format (x_min, y_min, x_max, y_max) in absolute coordinates.
Args:
boxes (torch.Tensor): Bounding boxes in YOLO format
image_size (tuple[int, int]): Image size in format (height, width)
Returns:
torch.Tensor: Bounding boxes in Pascal VOC format (x_min, y_min, x_max, y_max)
"""
# convert center to corners format
boxes = center_to_corners_format(boxes)
# convert to absolute coordinates
height, width = image_size
boxes = boxes * torch.tensor([[width, height, width, height]])
return boxes
def augment_and_transform_batch(
examples: Mapping[str, Any],
transform: A.Compose,
image_processor: AutoImageProcessor,
return_pixel_mask: bool = False,
) -> BatchFeature:
"""Apply augmentations and format annotations in COCO format for object detection task"""
images = []
annotations = []
for image_id, image, objects in zip(examples["image_id"], examples["image"], examples["objects"]):
image = np.array(image.convert("RGB"))
# apply augmentations
output = transform(image=image, bboxes=objects["bbox"], category=objects["category"])
images.append(output["image"])
# format annotations in COCO format
formatted_annotations = format_image_annotations_as_coco(
image_id, output["category"], objects["area"], output["bboxes"]
)
annotations.append(formatted_annotations)
# Apply the image processor transformations: resizing, rescaling, normalization
result = image_processor(images=images, annotations=annotations, return_tensors="pt")
if not return_pixel_mask:
result.pop("pixel_mask", None)
return result
def collate_fn(batch: list[BatchFeature]) -> Mapping[str, Union[torch.Tensor, list[Any]]]:
data = {}
data["pixel_values"] = torch.stack([x["pixel_values"] for x in batch])
data["labels"] = [x["labels"] for x in batch]
if "pixel_mask" in batch[0]:
data["pixel_mask"] = torch.stack([x["pixel_mask"] for x in batch])
return data
@torch.no_grad()
def compute_metrics(
evaluation_results: EvalPrediction,
image_processor: AutoImageProcessor,
threshold: float = 0.0,
id2label: Optional[Mapping[int, str]] = None,
) -> Mapping[str, float]:
"""
Compute mean average mAP, mAR and their variants for the object detection task.
Args:
evaluation_results (EvalPrediction): Predictions and targets from evaluation.
threshold (float, optional): Threshold to filter predicted boxes by confidence. Defaults to 0.0.
id2label (Optional[dict], optional): Mapping from class id to class name. Defaults to None.
Returns:
Mapping[str, float]: Metrics in a form of dictionary {<metric_name>: <metric_value>}
"""
predictions, targets = evaluation_results.predictions, evaluation_results.label_ids
# For metric computation we need to provide:
# - targets in a form of list of dictionaries with keys "boxes", "labels"
# - predictions in a form of list of dictionaries with keys "boxes", "scores", "labels"
image_sizes = []
post_processed_targets = []
post_processed_predictions = []
# Collect targets in the required format for metric computation
for batch in targets:
# collect image sizes, we will need them for predictions post processing
batch_image_sizes = torch.tensor([x["orig_size"] for x in batch])
image_sizes.append(batch_image_sizes)
# collect targets in the required format for metric computation
# boxes were converted to YOLO format needed for model training
# here we will convert them to Pascal VOC format (x_min, y_min, x_max, y_max)
for image_target in batch:
boxes = torch.tensor(image_target["boxes"])
boxes = convert_bbox_yolo_to_pascal(boxes, image_target["orig_size"])
labels = torch.tensor(image_target["class_labels"])
post_processed_targets.append({"boxes": boxes, "labels": labels})
# Collect predictions in the required format for metric computation,
# model produce boxes in YOLO format, then image_processor convert them to Pascal VOC format
for batch, target_sizes in zip(predictions, image_sizes):
batch_logits, batch_boxes = batch[1], batch[2]
output = ModelOutput(logits=torch.tensor(batch_logits), pred_boxes=torch.tensor(batch_boxes))
post_processed_output = image_processor.post_process_object_detection(
output, threshold=threshold, target_sizes=target_sizes
)
post_processed_predictions.extend(post_processed_output)
# Compute metrics
metric = MeanAveragePrecision(box_format="xyxy", class_metrics=True)
metric.update(post_processed_predictions, post_processed_targets)
metrics = metric.compute()
# Replace list of per class metrics with separate metric for each class
classes = metrics.pop("classes")
map_per_class = metrics.pop("map_per_class")
mar_100_per_class = metrics.pop("mar_100_per_class")
for class_id, class_map, class_mar in zip(classes, map_per_class, mar_100_per_class):
class_name = id2label[class_id.item()] if id2label is not None else class_id.item()
metrics[f"map_{class_name}"] = class_map
metrics[f"mar_100_{class_name}"] = class_mar
metrics = {k: round(v.item(), 4) for k, v in metrics.items()}
return metrics
@dataclass
|
ModelOutput
|
python
|
huggingface__transformers
|
src/transformers/generation/candidate_generator.py
|
{
"start": 1239,
"end": 3334
}
|
class ____:
"""Abstract base class for all candidate generators that can be applied during assisted generation."""
def get_candidates(self, input_ids: torch.LongTensor) -> tuple[torch.LongTensor, torch.FloatTensor | None]:
"""
Fetches the candidates to be tried for the current input.
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids)
Return:
`torch.LongTensor` of shape `(batch_size, candidate_length)` containing the candidate sequences to be
assessed by the model and, optionally, a `torch.FloatTensor` of shape `(batch_size, candidate_length,
vocabulary_size)` containing the logits associated to each candidate.
"""
raise NotImplementedError(
f"{self.__class__} is an abstract class. Only classes inheriting this class can call `get_candidates`."
)
def update_candidate_strategy(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, num_matches: int):
"""
Updates the candidate generation strategy based on the outcomes.
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, candidate_length, config.vocab_size)`):
Prediction scores of a language modeling head. These can be logits for each vocabulary when not using
beam search or log softmax for each vocabulary token when using beam search
num_matches (`int`):
The number of matches between the candidate sequences and the model predictions.
"""
raise NotImplementedError(
f"{self.__class__} is an abstract class. Only classes inheriting this class can call "
"`update_candidate_strategy`."
)
|
CandidateGenerator
|
python
|
conda__conda
|
conda/core/package_cache_data.py
|
{
"start": 21496,
"end": 23293
}
|
class ____:
# this is a class to manage urls.txt
# it should basically be thought of as a sequence
# in this class I'm breaking the rule that all disk access goes through conda.gateways
def __init__(self, pkgs_dir):
self.pkgs_dir = pkgs_dir
self.urls_txt_path = urls_txt_path = join(pkgs_dir, "urls.txt")
if isfile(urls_txt_path):
with open(urls_txt_path, "rb") as fh:
self._urls_data = [line.strip().decode("utf-8") for line in fh]
self._urls_data.reverse()
else:
self._urls_data = []
def __contains__(self, url):
return url in self._urls_data
def __iter__(self):
return iter(self._urls_data)
def add_url(self, url):
with codecs.open(self.urls_txt_path, mode="ab", encoding="utf-8") as fh:
linefeed = "\r\n" if platform == "win32" else "\n"
fh.write(url + linefeed)
self._urls_data.insert(0, url)
@memoizemethod
def get_url(self, package_path):
# package path can be a full path or just a basename
# can be either an extracted directory or tarball
package_path = basename(package_path)
# NOTE: This makes an assumption that all extensionless packages came from a .tar.bz2.
# That's probably a good assumption going forward, because we should now always
# be recording the extension in urls.txt. The extensionless situation should be
# legacy behavior only.
if not package_path.endswith(CONDA_PACKAGE_EXTENSIONS):
package_path += CONDA_PACKAGE_EXTENSION_V1
return first(self, lambda url: basename(url) == package_path)
# ##############################
# downloading
# ##############################
|
UrlsData
|
python
|
ray-project__ray
|
rllib/utils/exploration/epsilon_greedy.py
|
{
"start": 789,
"end": 9429
}
|
class ____(Exploration):
"""Epsilon-greedy Exploration class that produces exploration actions.
When given a Model's output and a current epsilon value (based on some
Schedule), it produces a random action (if rand(1) < eps) or
uses the model-computed one (if rand(1) >= eps).
"""
def __init__(
self,
action_space: gym.spaces.Space,
*,
framework: str,
initial_epsilon: float = 1.0,
final_epsilon: float = 0.05,
warmup_timesteps: int = 0,
epsilon_timesteps: int = int(1e5),
epsilon_schedule: Optional[Schedule] = None,
**kwargs,
):
"""Create an EpsilonGreedy exploration class.
Args:
action_space: The action space the exploration should occur in.
framework: The framework specifier.
initial_epsilon: The initial epsilon value to use.
final_epsilon: The final epsilon value to use.
warmup_timesteps: The timesteps over which to not change epsilon in the
beginning.
epsilon_timesteps: The timesteps (additional to `warmup_timesteps`)
after which epsilon should always be `final_epsilon`.
E.g.: warmup_timesteps=20k epsilon_timesteps=50k -> After 70k timesteps,
epsilon will reach its final value.
epsilon_schedule: An optional Schedule object
to use (instead of constructing one from the given parameters).
"""
assert framework is not None
super().__init__(action_space=action_space, framework=framework, **kwargs)
self.epsilon_schedule = from_config(
Schedule, epsilon_schedule, framework=framework
) or PiecewiseSchedule(
endpoints=[
(0, initial_epsilon),
(warmup_timesteps, initial_epsilon),
(warmup_timesteps + epsilon_timesteps, final_epsilon),
],
outside_value=final_epsilon,
framework=self.framework,
)
# The current timestep value (tf-var or python int).
self.last_timestep = get_variable(
np.array(0, np.int64),
framework=framework,
tf_name="timestep",
dtype=np.int64,
)
# Build the tf-info-op.
if self.framework == "tf":
self._tf_state_op = self.get_state()
@override(Exploration)
def get_exploration_action(
self,
*,
action_distribution: ActionDistribution,
timestep: Union[int, TensorType],
explore: Optional[Union[bool, TensorType]] = True,
):
if self.framework in ["tf2", "tf"]:
return self._get_tf_exploration_action_op(
action_distribution, explore, timestep
)
else:
return self._get_torch_exploration_action(
action_distribution, explore, timestep
)
def _get_tf_exploration_action_op(
self,
action_distribution: ActionDistribution,
explore: Union[bool, TensorType],
timestep: Union[int, TensorType],
) -> "tf.Tensor":
"""TF method to produce the tf op for an epsilon exploration action.
Args:
action_distribution: The instantiated ActionDistribution object
to work with when creating exploration actions.
Returns:
The tf exploration-action op.
"""
# TODO: Support MultiActionDistr for tf.
q_values = action_distribution.inputs
epsilon = self.epsilon_schedule(
timestep if timestep is not None else self.last_timestep
)
# Get the exploit action as the one with the highest logit value.
exploit_action = tf.argmax(q_values, axis=1)
batch_size = tf.shape(q_values)[0]
# Mask out actions with q-value=-inf so that we don't even consider
# them for exploration.
random_valid_action_logits = tf.where(
tf.equal(q_values, tf.float32.min),
tf.ones_like(q_values) * tf.float32.min,
tf.ones_like(q_values),
)
random_actions = tf.squeeze(
tf.random.categorical(random_valid_action_logits, 1), axis=1
)
chose_random = (
tf.random.uniform(
tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32
)
< epsilon
)
action = tf.cond(
pred=tf.constant(explore, dtype=tf.bool)
if isinstance(explore, bool)
else explore,
true_fn=(lambda: tf.where(chose_random, random_actions, exploit_action)),
false_fn=lambda: exploit_action,
)
if self.framework == "tf2" and not self.policy_config["eager_tracing"]:
self.last_timestep = timestep
return action, tf.zeros_like(action, dtype=tf.float32)
else:
assign_op = tf1.assign(self.last_timestep, tf.cast(timestep, tf.int64))
with tf1.control_dependencies([assign_op]):
return action, tf.zeros_like(action, dtype=tf.float32)
def _get_torch_exploration_action(
self,
action_distribution: ActionDistribution,
explore: bool,
timestep: Union[int, TensorType],
) -> "torch.Tensor":
"""Torch method to produce an epsilon exploration action.
Args:
action_distribution: The instantiated
ActionDistribution object to work with when creating
exploration actions.
Returns:
The exploration-action.
"""
q_values = action_distribution.inputs
self.last_timestep = timestep
exploit_action = action_distribution.deterministic_sample()
batch_size = q_values.size()[0]
action_logp = torch.zeros(batch_size, dtype=torch.float)
# Explore.
if explore:
# Get the current epsilon.
epsilon = self.epsilon_schedule(self.last_timestep)
if isinstance(action_distribution, TorchMultiActionDistribution):
exploit_action = tree.flatten(exploit_action)
for i in range(batch_size):
if random.random() < epsilon:
# TODO: (bcahlit) Mask out actions
random_action = tree.flatten(self.action_space.sample())
for j in range(len(exploit_action)):
exploit_action[j][i] = torch.tensor(random_action[j])
exploit_action = tree.unflatten_as(
action_distribution.action_space_struct, exploit_action
)
return exploit_action, action_logp
else:
# Mask out actions, whose Q-values are -inf, so that we don't
# even consider them for exploration.
random_valid_action_logits = torch.where(
q_values <= FLOAT_MIN,
torch.ones_like(q_values) * 0.0,
torch.ones_like(q_values),
)
# A random action.
random_actions = torch.squeeze(
torch.multinomial(random_valid_action_logits, 1), axis=1
)
# Pick either random or greedy.
action = torch.where(
torch.empty((batch_size,)).uniform_().to(self.device) < epsilon,
random_actions,
exploit_action,
)
return action, action_logp
# Return the deterministic "sample" (argmax) over the logits.
else:
return exploit_action, action_logp
@override(Exploration)
def get_state(self, sess: Optional["tf.Session"] = None):
if sess:
return sess.run(self._tf_state_op)
eps = self.epsilon_schedule(self.last_timestep)
return {
"cur_epsilon": convert_to_numpy(eps) if self.framework != "tf" else eps,
"last_timestep": convert_to_numpy(self.last_timestep)
if self.framework != "tf"
else self.last_timestep,
}
@override(Exploration)
def set_state(self, state: dict, sess: Optional["tf.Session"] = None) -> None:
if self.framework == "tf":
self.last_timestep.load(state["last_timestep"], session=sess)
elif isinstance(self.last_timestep, int):
self.last_timestep = state["last_timestep"]
else:
self.last_timestep.assign(state["last_timestep"])
|
EpsilonGreedy
|
python
|
wandb__wandb
|
wandb/automations/_generated/delete_automation.py
|
{
"start": 153,
"end": 225
}
|
class ____(GQLResult):
result: DeleteAutomationResult
|
DeleteAutomation
|
python
|
great-expectations__great_expectations
|
contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_to_be_valid_leap_year.py
|
{
"start": 1629,
"end": 3865
}
|
class ____(ColumnMapExpectation):
"""Expect column values to be a valid leap year."""
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"data": {
"well_formed_leap_year": [
"4",
"400",
"404",
"1996",
"2000",
],
"malformed_leap_year": [
"",
"1994",
"1997",
"2022",
"This is not a valid Leap Year",
],
},
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "well_formed_leap_year"},
"out": {"success": True},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "malformed_leap_year"},
"out": {"success": False},
},
],
}
]
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.valid_leap_year"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
success_keys = ("mostly",)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {}
# This object contains metadata for display in the public Gallery
library_metadata = {
"maturity": "experimental",
"tags": ["experimental", "hackathon", "typed-entities"],
"contributors": [
"@voidforall",
],
}
if __name__ == "__main__":
ExpectColumnValuesToBeValidLeapYear().print_diagnostic_checklist()
|
ExpectColumnValuesToBeValidLeapYear
|
python
|
google__python-fire
|
fire/console/platforms.py
|
{
"start": 1595,
"end": 4934
}
|
class ____(object):
"""An enum representing the operating system you are running on."""
class _OS(object):
"""A single operating system."""
# pylint: disable=redefined-builtin
def __init__(self, id, name, file_name):
self.id = id
self.name = name
self.file_name = file_name
def __str__(self):
return self.id
def __eq__(self, other):
return (isinstance(other, type(self)) and
self.id == other.id and
self.name == other.name and
self.file_name == other.file_name)
def __hash__(self):
return hash(self.id) + hash(self.name) + hash(self.file_name)
def __ne__(self, other):
return not self == other
@classmethod
def _CmpHelper(cls, x, y):
"""Just a helper equivalent to the cmp() function in Python 2."""
return (x > y) - (x < y)
def __lt__(self, other):
return self._CmpHelper(
(self.id, self.name, self.file_name),
(other.id, other.name, other.file_name)) < 0
def __gt__(self, other):
return self._CmpHelper(
(self.id, self.name, self.file_name),
(other.id, other.name, other.file_name)) > 0
def __le__(self, other):
return not self.__gt__(other)
def __ge__(self, other):
return not self.__lt__(other)
WINDOWS = _OS('WINDOWS', 'Windows', 'windows')
MACOSX = _OS('MACOSX', 'Mac OS X', 'darwin')
LINUX = _OS('LINUX', 'Linux', 'linux')
CYGWIN = _OS('CYGWIN', 'Cygwin', 'cygwin')
MSYS = _OS('MSYS', 'Msys', 'msys')
_ALL = [WINDOWS, MACOSX, LINUX, CYGWIN, MSYS]
@staticmethod
def AllValues():
"""Gets all possible enum values.
Returns:
list, All the enum values.
"""
return list(OperatingSystem._ALL)
@staticmethod
def FromId(os_id, error_on_unknown=True):
"""Gets the enum corresponding to the given operating system id.
Args:
os_id: str, The operating system id to parse
error_on_unknown: bool, True to raise an exception if the id is unknown,
False to just return None.
Raises:
InvalidEnumValue: If the given value cannot be parsed.
Returns:
OperatingSystemTuple, One of the OperatingSystem constants or None if the
input is None.
"""
if not os_id:
return None
for operating_system in OperatingSystem._ALL:
if operating_system.id == os_id:
return operating_system
if error_on_unknown:
raise InvalidEnumValue(os_id, 'Operating System',
[value.id for value in OperatingSystem._ALL])
return None
@staticmethod
def Current():
"""Determines the current operating system.
Returns:
OperatingSystemTuple, One of the OperatingSystem constants or None if it
cannot be determined.
"""
if os.name == 'nt':
return OperatingSystem.WINDOWS
elif 'linux' in sys.platform:
return OperatingSystem.LINUX
elif 'darwin' in sys.platform:
return OperatingSystem.MACOSX
elif 'cygwin' in sys.platform:
return OperatingSystem.CYGWIN
elif 'msys' in sys.platform:
return OperatingSystem.MSYS
return None
@staticmethod
def IsWindows():
"""Returns True if the current operating system is Windows."""
return OperatingSystem.Current() is OperatingSystem.WINDOWS
|
OperatingSystem
|
python
|
boto__boto3
|
boto3/resources/model.py
|
{
"start": 2415,
"end": 3108
}
|
class ____:
"""
An item which has parameters exposed via the ``params`` property.
A request has an operation and parameters, while a waiter has
a name, a low-level waiter name and parameters.
:type definition: dict
:param definition: The JSON definition
"""
def __init__(self, definition):
self._definition = definition
@property
def params(self):
"""
Get a list of auto-filled parameters for this request.
:type: list(:py:class:`Parameter`)
"""
params = []
for item in self._definition.get('params', []):
params.append(Parameter(**item))
return params
|
DefinitionWithParams
|
python
|
getsentry__sentry
|
src/sentry/migrations/0913_split_discover_dataset_dashboards_self_hosted.py
|
{
"start": 1544,
"end": 2481
}
|
class ____(Enum):
"""
Ambiguous queries that haven't been or couldn't be categorized into a
specific dataset.
"""
UNKNOWN = 0
"""
Dataset inferred by either running the query or using heuristics.
"""
INFERRED = 1
"""
Canonical dataset, user explicitly selected it.
"""
USER = 2
"""
Was an ambiguous dataset forced to split (i.e. we picked a default)
"""
FORCED = 3
"""
Dataset inferred by split script, version 1
"""
SPLIT_VERSION_1 = 4
"""
Dataset inferred by split script, version 2
"""
SPLIT_VERSION_2 = 5
@classmethod
def as_choices(cls) -> tuple[tuple[int, str], ...]:
return tuple((source.value, source.name.lower()) for source in cls)
@classmethod
def as_text_choices(cls) -> tuple[tuple[str, int], ...]:
return tuple((source.name.lower(), source.value) for source in cls)
|
DatasetSourcesTypes
|
python
|
getsentry__sentry
|
src/sentry/models/groupopenperiodactivity.py
|
{
"start": 256,
"end": 575
}
|
class ____(IntEnum):
OPENED = 1
STATUS_CHANGE = 2
CLOSED = 3
def to_str(self) -> str:
"""
Return the string representation of the activity type.
"""
return self.name.lower()
def generate_random_uuid() -> UUID:
return uuid4()
@region_silo_model
|
OpenPeriodActivityType
|
python
|
numba__llvmlite
|
llvmlite/ir/instructions.py
|
{
"start": 30750,
"end": 31551
}
|
class ____(Instruction):
def __init__(self, parent, typ, name='', cleanup=False):
super(LandingPadInstr, self).__init__(parent, typ, "landingpad", [],
name=name)
self.cleanup = cleanup
self.clauses = []
def add_clause(self, clause):
assert isinstance(clause, _LandingPadClause)
self.clauses.append(clause)
def descr(self, buf):
fmt = "landingpad {type}{cleanup}{clauses}\n"
buf.append(fmt.format(type=self.type,
cleanup=' cleanup' if self.cleanup else '',
clauses=''.join(["\n {0}".format(clause)
for clause in self.clauses]),
))
|
LandingPadInstr
|
python
|
mlflow__mlflow
|
mlflow/utils/file_utils.py
|
{
"start": 31634,
"end": 32672
}
|
class ____:
"""
Exclusive file lock (only works on Unix system)
"""
def __init__(self, path: str):
if os.name == "nt":
raise MlflowException("ExclusiveFileLock class does not support Windows system.")
self.path = path
self.fd = None
def __enter__(self) -> None:
# Python on Windows does not have `fcntl` module, so importing it lazily.
import fcntl # clint: disable=lazy-builtin-import
# Open file (create if missing)
self.fd = open(self.path, "w")
# Acquire exclusive lock (blocking)
fcntl.flock(self.fd, fcntl.LOCK_EX)
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
):
# Python on Windows does not have `fcntl` module, so importing it lazily.
import fcntl # clint: disable=lazy-builtin-import
# Release lock
fcntl.flock(self.fd, fcntl.LOCK_UN)
self.fd.close()
|
ExclusiveFileLock
|
python
|
numba__llvmlite
|
llvmlite/tests/test_binding.py
|
{
"start": 63205,
"end": 72996
}
|
class ____(BaseTest):
def test_str(self):
mod = self.module()
glob = mod.get_global_variable("glob")
self.assertEqual(str(glob.global_value_type), "i32")
glob_struct_type = mod.get_struct_type("struct.glob_type")
self.assertEqual(str(glob_struct_type),
"%struct.glob_type = type { i64, [2 x i64] }")
elements = list(glob_struct_type.elements)
self.assertEqual(len(elements), 2)
self.assertEqual(str(elements[0]), "i64")
self.assertEqual(str(elements[1]), "[2 x i64]")
def test_type_kind(self):
mod = self.module()
glob = mod.get_global_variable("glob")
self.assertEqual(glob.type.type_kind, llvm.TypeKind.pointer)
self.assertTrue(glob.type.is_pointer)
glob_struct = mod.get_global_variable("glob_struct")
self.assertEqual(glob_struct.type.type_kind, llvm.TypeKind.pointer)
self.assertTrue(glob_struct.type.is_pointer)
stype = glob_struct.global_value_type
self.assertEqual(stype.type_kind, llvm.TypeKind.struct)
self.assertTrue(stype.is_struct)
stype_a, stype_b = stype.elements
self.assertEqual(stype_a.type_kind, llvm.TypeKind.integer)
self.assertEqual(stype_b.type_kind, llvm.TypeKind.array)
self.assertTrue(stype_b.is_array)
glob_vec_struct_type = mod.get_struct_type("struct.glob_type_vec")
_, vector_type = glob_vec_struct_type.elements
self.assertEqual(vector_type.type_kind, llvm.TypeKind.vector)
self.assertTrue(vector_type.is_vector)
funcptr = mod.get_function("sum").type
self.assertEqual(funcptr.type_kind, llvm.TypeKind.pointer)
functype = mod.get_function("sum").global_value_type
self.assertEqual(functype.type_kind, llvm.TypeKind.function)
def test_element_count(self):
mod = self.module()
glob_struct_type = mod.get_struct_type("struct.glob_type")
_, array_type = glob_struct_type.elements
self.assertEqual(array_type.element_count, 2)
with self.assertRaises(ValueError):
glob_struct_type.element_count
def test_type_width(self):
mod = self.module()
glob_struct_type = mod.get_struct_type("struct.glob_type")
glob_vec_struct_type = mod.get_struct_type("struct.glob_type_vec")
integer_type, array_type = glob_struct_type.elements
_, vector_type = glob_vec_struct_type.elements
self.assertEqual(integer_type.type_width, 64)
self.assertEqual(vector_type.type_width, 64 * 2)
# Structs and arrays are not primitive types
self.assertEqual(glob_struct_type.type_width, 0)
self.assertEqual(array_type.type_width, 0)
def test_vararg_function(self):
# Variadic function
mod = self.module(asm_vararg_declare)
func = mod.get_function('vararg')
decltype = func.global_value_type
self.assertTrue(decltype.is_function_vararg)
mod = self.module(asm_sum_declare)
func = mod.get_function('sum')
decltype = func.global_value_type
self.assertFalse(decltype.is_function_vararg)
# test that the function pointer type cannot use is_function_vararg
self.assertTrue(func.type.is_pointer)
with self.assertRaises(ValueError) as raises:
func.type.is_function_vararg
self.assertIn("Type ptr is not a function", str(raises.exception))
def test_function_typeref_as_ir(self):
mod = self.module()
[fn] = list(mod.functions)
# .type gives a pointer type, a problem if it's opaque (llvm15+)
self.assertEqual(fn.type.type_kind, llvm.TypeKind.pointer)
self.assertFalse(fn.type.is_function)
# Use .global_value_type instead
fnty = fn.global_value_type
self.assertEqual(fnty.type_kind, llvm.TypeKind.function)
self.assertTrue(fnty.is_function)
# Run .as_ir() to get llvmlite.ir.FunctionType
tyir = fnty.as_ir(ir.global_context)
self.assertIsInstance(tyir, ir.FunctionType)
self.assertEqual(tyir.args, (ir.IntType(32), ir.IntType(32)))
self.assertEqual(tyir.return_type ,ir.IntType(32))
def test_void_typeref_as_ir(self):
# Void type can only be used as return-type of llvmlite.ir.FunctionType.
fnty = ir.FunctionType(ir.VoidType(), ())
irmod = ir.Module()
fn = ir.Function(irmod, fnty, "foo")
mod = self.module(str(irmod))
fn = mod.get_function("foo")
gvty = fn.global_value_type
self.assertEqual(fnty.return_type,
gvty.as_ir(ir.global_context).return_type)
def test_global_typeref_as_ir(self):
from llvmlite.binding.typeref import _TypeKindToIRType
ctx = ir.Context()
skipped = {
"function", # tested in test_function_typeref_as_ir
"void", # tested in test_void_typeref_as_ir
}
makers = {}
def maker_half():
yield ir.HalfType()
makers['half'] = maker_half
def maker_float():
yield ir.FloatType()
makers['float'] = maker_float
def maker_double():
yield ir.DoubleType()
makers['double'] = maker_double
def maker_integer():
yield ir.IntType(32)
makers['integer'] = maker_integer
def maker_pointer():
yield ir.PointerType(ir.IntType(8))
# opaque struct ptr
yield ctx.get_identified_type("myclass").as_pointer()
# named struct with defined body
myclass2 = ctx.get_identified_type("myclass2")
myclass2.set_body(ir.IntType(8))
yield myclass2.as_pointer()
makers['pointer'] = maker_pointer
def maker_array():
yield ir.ArrayType(ir.IntType(8), 123)
makers['array'] = maker_array
def maker_vector():
yield ir.VectorType(ir.FloatType(), 2)
makers['vector'] = maker_vector
def maker_struct():
yield ir.LiteralStructType([ir.FloatType(), ir.IntType(64)])
yield ir.LiteralStructType([ir.FloatType(), ir.IntType(64)],
packed=True)
makers['struct'] = maker_struct
# Ensure that number of supported TypeKind matches number of makers
self.assertEqual({x.name for x in _TypeKindToIRType.keys()},
set(makers.keys()) | set(skipped))
# Test each type-kind
for type_kind, irtype in _TypeKindToIRType.items():
if type_kind.name in skipped:
continue
for ty in makers[type_kind.name]():
with self.subTest(f"{type_kind!s} -> {ty}"):
irmod = ir.Module(context=ctx)
ir.GlobalVariable(irmod, ty, name='gv')
asm = str(irmod)
mod = llvm.parse_assembly(asm)
gv = mod.get_global_variable("gv")
gvty = gv.global_value_type
got = gvty.as_ir(ir.Context()) # fresh context
self.assertEqual(got, ty)
self.assertIsInstance(got, irtype)
def _check_typeref_as_ir_for_wrappers(self, asm, target_symbol):
# Get a clang++ defined function from a llvm ir
mod = llvm.parse_assembly(asm)
cppfn = mod.get_function(target_symbol)
cppfntype = cppfn.global_value_type
# Get the function type into a new context
my_context = ir.Context() # don't populate global context
ty = cppfntype.as_ir(ir_ctx=my_context)
# Build a wrapper module for the cpp function
wrapper_mod = ir.Module(context=my_context)
# declare the original function
declfn = ir.Function(wrapper_mod, ty, name=cppfn.name)
# populate the wrapper function
wrapfn = ir.Function(wrapper_mod, ty, name="wrapper")
builder = ir.IRBuilder(wrapfn.append_basic_block())
# just call the original function
builder.call(declfn, wrapfn.args)
builder.ret_void()
# Create a new LLVM module with the wrapper
new_mod = llvm.parse_assembly(str(wrapper_mod))
self.assertTrue(new_mod.get_function(declfn.name).is_declaration,
msg="declfn must not have a body")
# Merge/link the original module into the new module
new_mod.link_in(mod, preserve=True)
self.assertEqual(len(list(new_mod.functions)),
len(list(mod.functions)) + 1,
msg="the only new function is the wrapper")
self.assertFalse(new_mod.get_function(declfn.name).is_declaration,
msg="declfn must have a body now")
self.assertEqual(new_mod.get_function(declfn.name).global_value_type,
new_mod.get_function(wrapfn.name).global_value_type,
msg="declfn and wrapfn must have the same llvm Type")
def test_typeref_as_ir_for_wrappers_of_cpp_class(self):
"""Exercise extracting C++ defined class types.
Contains both opaque and non-opaque class definitions.
"""
self._check_typeref_as_ir_for_wrappers(
asm_cpp_class,
"_Z3fooP7MyClass14MyClassDefined",
)
def test_typeref_as_ir_for_wrappers_of_cpp_vector_struct(self):
"""Exercise extracting C++ struct types that are passed as vectors.
IA64 ABI on x86_64 will put struct with two floats as
a vector of two floats.
"""
self._check_typeref_as_ir_for_wrappers(
asm_cpp_vector,
"_Z3foo8Vector2DPS_",
)
|
TestTypeRef
|
python
|
sanic-org__sanic
|
sanic/models/futures.py
|
{
"start": 1414,
"end": 1568
}
|
class ____(NamedTuple):
handler: SignalHandler
event: str
condition: Optional[dict[str, str]]
exclusive: bool
priority: int
|
FutureSignal
|
python
|
PyCQA__pylint
|
tests/functional/u/use/use_implicit_booleaness_not_comparison.py
|
{
"start": 2902,
"end": 4845
}
|
class ____:
lst = []
@staticmethod
def test(b=1):
print(b)
return []
if A.lst == []: # [use-implicit-booleaness-not-comparison]
pass
if [] == A.lst: # [use-implicit-booleaness-not-comparison]
pass
if A.test("b") == []: # [use-implicit-booleaness-not-comparison]
pass
def test_function():
return []
if test_function() == []: # [use-implicit-booleaness-not-comparison]
pass
# pylint: disable=import-outside-toplevel, wrong-import-position, import-error
# Numpy has its own implementation of __bool__, but base class has list, that's why the comparison check is happening
import numpy
numpy_array = numpy.array([0])
if numpy_array == []: # [use-implicit-booleaness-not-comparison]
print('numpy_array')
if numpy_array != []: # [use-implicit-booleaness-not-comparison]
print('numpy_array')
if numpy_array >= (): # [use-implicit-booleaness-not-comparison]
print('b')
# pandas has its own implementations of __bool__ and is not subclass of list, dict, or tuple; that's why comparison check is not happening
import pandas as pd
pandas_df = pd.DataFrame()
if pandas_df == []:
pass
if pandas_df != ():
pass
if pandas_df <= []:
print("don't emit warning if variable can't safely be inferred")
from typing import Union
from random import random
var: Union[dict, bool, None] = {}
if random() > 0.5:
var = True
if var == {}:
pass
data = {}
if data == {}: # [use-implicit-booleaness-not-comparison]
print("This will be printed")
if data != {}: # [use-implicit-booleaness-not-comparison]
print("This will also be printed")
if data or not data:
print("This however won't be")
# literal string check
long_test = {}
if long_test == { }: # [use-implicit-booleaness-not-comparison]
pass
# Check for properties and uninferable class methods
# See https://github.com/pylint-dev/pylint/issues/5646
from xyz import AnotherClassWithProperty
|
A
|
python
|
google__flatbuffers
|
tests/monster_test_generated.py
|
{
"start": 16809,
"end": 17709
}
|
class ____(object):
__slots__ = ['_tab']
@classmethod
def SizeOf(cls):
return 20
# StructOfStructsOfStructs
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# StructOfStructsOfStructs
def A(self, obj):
obj.Init(self._tab.Bytes, self._tab.Pos + 0)
return obj
def CreateStructOfStructsOfStructs(builder, a_a_id, a_a_distance, a_b_a, a_b_b, a_c_id, a_c_distance):
builder.Prep(4, 20)
builder.Prep(4, 20)
builder.Prep(4, 8)
builder.PrependUint32(a_c_distance)
builder.PrependUint32(a_c_id)
builder.Prep(2, 4)
builder.Pad(1)
builder.PrependInt8(a_b_b)
builder.PrependInt16(a_b_a)
builder.Prep(4, 8)
builder.PrependUint32(a_a_distance)
builder.PrependUint32(a_a_id)
return builder.Offset()
try:
from typing import Optional
except:
pass
|
StructOfStructsOfStructs
|
python
|
tensorflow__tensorflow
|
tensorflow/lite/python/metrics/metrics_portable.py
|
{
"start": 1827,
"end": 2048
}
|
class ____(TFLiteMetrics):
"""Similar to TFLiteMetrics but specialized for converter."""
def __del__(self):
pass
def set_export_required(self):
pass
def export_metrics(self):
pass
|
TFLiteConverterMetrics
|
python
|
pyinstaller__pyinstaller
|
tests/unit/test_modulegraph/test_imports.py
|
{
"start": 16725,
"end": 17752
}
|
class ____ (unittest.TestCase):
if not hasattr(unittest.TestCase, 'assertIsInstance'):
def assertIsInstance(self, value, types):
if not isinstance(value, types):
self.fail("%r is not an instance of %r"%(value, types))
def setUp(self):
root = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'testpkg-regr4')
self.mf = modulegraph.ModuleGraph(path=[ root ] + sys.path)
self.mf.add_script(os.path.join(root, 'script.py'))
def testRegr1(self):
node = self.mf.find_node('pkg.core')
self.assertIsInstance(node, modulegraph.Package)
node = self.mf.find_node('pkg.core.callables')
self.assertIsInstance(node, modulegraph.SourceModule)
node = self.mf.find_node('pkg.core.listener')
self.assertIsInstance(node, modulegraph.SourceModule)
node = self.mf.find_node('pkg.core.listenerimpl')
self.assertIsInstance(node, modulegraph.SourceModule)
|
TestRegression4
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-shopify/source_shopify/streams/streams.py
|
{
"start": 9263,
"end": 9367
}
|
class ____(IncrementalShopifyGraphQlBulkStream):
bulk_query: DiscountCode = DiscountCode
|
DiscountCodes
|
python
|
getsentry__sentry
|
tests/sentry/api/endpoints/test_project_plugin_details.py
|
{
"start": 2056,
"end": 2742
}
|
class ____(ProjectPluginDetailsTestBase):
method = "put"
def test_simple(self) -> None:
with outbox_runner():
self.get_success_response(
self.project.organization.slug,
self.project.slug,
"webhooks",
**{"urls": "http://example.com/foo"},
)
with assume_test_silo_mode(SiloMode.CONTROL):
audit = AuditLogEntry.objects.get(target_object=self.project.id)
assert audit.event == 111
assert (
ProjectOption.objects.get(key="webhooks:urls", project=self.project).value
== "http://example.com/foo"
)
|
UpdateProjectPluginTest
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_23/events.py
|
{
"start": 56384,
"end": 57485
}
|
class ____(Request):
"""
Clear an open Scroll ID
:param scroll_id: Scroll ID as returned by previous events service calls
:type scroll_id: str
"""
_service = "events"
_action = "clear_scroll"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"scroll_id": {
"description": "Scroll ID as returned by previous events service calls",
"type": "string",
}
},
"required": ["scroll_id"],
"type": "object",
}
def __init__(self, scroll_id: str, **kwargs: Any) -> None:
super(ClearScrollRequest, self).__init__(**kwargs)
self.scroll_id = scroll_id
@schema_property("scroll_id")
def scroll_id(self) -> str:
return self._property_scroll_id
@scroll_id.setter
def scroll_id(self, value: str) -> None:
if value is None:
self._property_scroll_id = None
return
self.assert_isinstance(value, "scroll_id", six.string_types)
self._property_scroll_id = value
|
ClearScrollRequest
|
python
|
sqlalchemy__sqlalchemy
|
test/orm/test_validators.py
|
{
"start": 551,
"end": 15081
}
|
class ____(_fixtures.FixtureTest):
def test_scalar(self):
users = self.tables.users
canary = Mock()
class User(ComparableEntity):
@validates("name")
def validate_name(self, key, name):
canary(key, name)
ne_(name, "fred")
return name + " modified"
self.mapper_registry.map_imperatively(User, users)
sess = fixture_session()
u1 = User(name="ed")
eq_(u1.name, "ed modified")
assert_raises(AssertionError, setattr, u1, "name", "fred")
eq_(u1.name, "ed modified")
eq_(canary.mock_calls, [call("name", "ed"), call("name", "fred")])
sess.add(u1)
sess.commit()
eq_(
sess.query(User).filter_by(name="ed modified").one(),
User(name="ed"),
)
def test_collection(self):
users, addresses, Address = (
self.tables.users,
self.tables.addresses,
self.classes.Address,
)
canary = Mock()
class User(ComparableEntity):
@validates("addresses")
def validate_address(self, key, ad):
canary(key, ad)
assert "@" in ad.email_address
return ad
self.mapper_registry.map_imperatively(
User, users, properties={"addresses": relationship(Address)}
)
self.mapper_registry.map_imperatively(Address, addresses)
sess = fixture_session()
u1 = User(name="edward")
a0 = Address(email_address="noemail")
assert_raises(AssertionError, u1.addresses.append, a0)
a1 = Address(id=15, email_address="foo@bar.com")
u1.addresses.append(a1)
eq_(canary.mock_calls, [call("addresses", a0), call("addresses", a1)])
sess.add(u1)
sess.commit()
eq_(
sess.query(User).filter_by(name="edward").one(),
User(
name="edward", addresses=[Address(email_address="foo@bar.com")]
),
)
def test_validators_dict(self):
users, addresses, Address = (
self.tables.users,
self.tables.addresses,
self.classes.Address,
)
class User(ComparableEntity):
@validates("name")
def validate_name(self, key, name):
ne_(name, "fred")
return name + " modified"
@validates("addresses")
def validate_address(self, key, ad):
assert "@" in ad.email_address
return ad
def simple_function(self, key, value):
return key, value
u_m = self.mapper_registry.map_imperatively(
User, users, properties={"addresses": relationship(Address)}
)
self.mapper_registry.map_imperatively(Address, addresses)
eq_(
{k: v[0].__name__ for k, v in list(u_m.validators.items())},
{"name": "validate_name", "addresses": "validate_address"},
)
def test_validator_w_removes(self):
users, addresses, Address = (
self.tables.users,
self.tables.addresses,
self.classes.Address,
)
canary = Mock()
class User(ComparableEntity):
@validates("name", include_removes=True)
def validate_name(self, key, item, remove):
canary(key, item, remove)
return item
@validates("addresses", include_removes=True)
def validate_address(self, key, item, remove):
canary(key, item, remove)
return item
self.mapper_registry.map_imperatively(
User, users, properties={"addresses": relationship(Address)}
)
self.mapper_registry.map_imperatively(Address, addresses)
u1 = User()
u1.name = "ed"
u1.name = "mary"
del u1.name
a1, a2, a3 = Address(), Address(), Address()
u1.addresses.append(a1)
u1.addresses.remove(a1)
u1.addresses = [a1, a2]
u1.addresses = [a2, a3]
eq_(
canary.mock_calls,
[
call("name", "ed", False),
call("name", "mary", False),
call("name", "mary", True),
# append a1
call("addresses", a1, False),
# remove a1
call("addresses", a1, True),
# set to [a1, a2] - this is two appends
call("addresses", a1, False),
call("addresses", a2, False),
# set to [a2, a3] - this is a remove of a1,
# append of a3. the appends are first.
# in 1.2 due to #3896, we also get 'a2' in the
# validates as it is part of the set
call("addresses", a2, False),
call("addresses", a3, False),
call("addresses", a1, True),
],
)
def test_validator_bulk_collection_set(self):
users, addresses, Address = (
self.tables.users,
self.tables.addresses,
self.classes.Address,
)
class User(ComparableEntity):
@validates("addresses", include_removes=True)
def validate_address(self, key, item, remove):
if not remove:
assert isinstance(item, str)
else:
assert isinstance(item, Address)
item = Address(email_address=item)
return item
self.mapper_registry.map_imperatively(
User, users, properties={"addresses": relationship(Address)}
)
self.mapper_registry.map_imperatively(Address, addresses)
u1 = User()
u1.addresses.append("e1")
u1.addresses.append("e2")
eq_(
u1.addresses,
[Address(email_address="e1"), Address(email_address="e2")],
)
u1.addresses = ["e3", "e4"]
eq_(
u1.addresses,
[Address(email_address="e3"), Address(email_address="e4")],
)
def test_validator_bulk_dict_set(self):
users, addresses, Address = (
self.tables.users,
self.tables.addresses,
self.classes.Address,
)
class User(ComparableEntity):
@validates("addresses", include_removes=True)
def validate_address(self, key, item, remove):
if not remove:
assert isinstance(item, str)
else:
assert isinstance(item, Address)
item = Address(email_address=item)
return item
self.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(
Address,
collection_class=collections.attribute_keyed_dict(
"email_address"
),
)
},
)
self.mapper_registry.map_imperatively(Address, addresses)
u1 = User()
u1.addresses["e1"] = "e1"
u1.addresses["e2"] = "e2"
eq_(
u1.addresses,
{
"e1": Address(email_address="e1"),
"e2": Address(email_address="e2"),
},
)
u1.addresses = {"e3": "e3", "e4": "e4"}
eq_(
u1.addresses,
{
"e3": Address(email_address="e3"),
"e4": Address(email_address="e4"),
},
)
def test_validator_as_callable_object(self):
"""test #6538"""
users = self.tables.users
canary = Mock()
class SomeValidator:
def __call__(self, obj, key, name):
canary(key, name)
ne_(name, "fred")
return name + " modified"
class User(ComparableEntity):
sv = validates("name")(SomeValidator())
self.mapper_registry.map_imperatively(User, users)
u1 = User(name="ed")
eq_(u1.name, "ed modified")
def test_validator_multi_warning(self):
users = self.tables.users
class Foo:
@validates("name")
def validate_one(self, key, value):
pass
@validates("name")
def validate_two(self, key, value):
pass
assert_raises_message(
exc.InvalidRequestError,
"A validation function for mapped attribute "
"'name' on mapper Mapper|Foo|users already exists",
self.mapper_registry.map_imperatively,
Foo,
users,
)
class Bar:
@validates("id")
def validate_three(self, key, value):
return value + 10
@validates("id", "name")
def validate_four(self, key, value):
return value + "foo"
assert_raises_message(
exc.InvalidRequestError,
"A validation function for mapped attribute "
"'name' on mapper Mapper|Bar|users already exists",
self.mapper_registry.map_imperatively,
Bar,
users,
)
@testing.variation("include_backrefs", [True, False, "default"])
@testing.variation("include_removes", [True, False, "default"])
def test_validator_backrefs(self, include_backrefs, include_removes):
users, addresses = (self.tables.users, self.tables.addresses)
canary = Mock()
need_remove_param = (
bool(include_removes) and not include_removes.default
)
validate_kw = {}
if not include_removes.default:
validate_kw["include_removes"] = bool(include_removes)
if not include_backrefs.default:
validate_kw["include_backrefs"] = bool(include_backrefs)
expect_include_backrefs = include_backrefs.default or bool(
include_backrefs
)
expect_include_removes = (
bool(include_removes) and not include_removes.default
)
class User(ComparableEntity):
if need_remove_param:
@validates("addresses", **validate_kw)
def validate_address(self, key, item, remove):
canary(key, item, remove)
return item
else:
@validates("addresses", **validate_kw)
def validate_address(self, key, item):
canary(key, item)
return item
class Address(ComparableEntity):
if need_remove_param:
@validates("user", **validate_kw)
def validate_user(self, key, item, remove):
canary(key, item, remove)
return item
else:
@validates("user", **validate_kw)
def validate_user(self, key, item):
canary(key, item)
return item
self.mapper_registry.map_imperatively(
User,
users,
properties={"addresses": relationship(Address, backref="user")},
)
self.mapper_registry.map_imperatively(Address, addresses)
u1 = User()
u2 = User()
a1, a2 = Address(), Address()
# 3 append/set, two removes
u1.addresses.append(a1)
u1.addresses.append(a2)
a2.user = u2
del a1.user
u2.addresses.remove(a2)
# copy, so that generation of the
# comparisons don't get caught
calls = list(canary.mock_calls)
if expect_include_backrefs:
if expect_include_removes:
eq_(
calls,
[
# append #1
call("addresses", Address(), False),
# backref for append
call("user", User(addresses=[]), False),
# append #2
call("addresses", Address(user=None), False),
# backref for append
call("user", User(addresses=[]), False),
# assign a2.user = u2
call("user", User(addresses=[]), False),
# backref for u1.addresses.remove(a2)
call("addresses", Address(user=None), True),
# backref for u2.addresses.append(a2)
call("addresses", Address(user=None), False),
# del a1.user
call("user", User(addresses=[]), True),
# backref for u1.addresses.remove(a1)
call("addresses", Address(), True),
# u2.addresses.remove(a2)
call("addresses", Address(user=None), True),
# backref for a2.user = None
call("user", None, False),
],
)
else:
eq_(
calls,
[
call("addresses", Address()),
call("user", User(addresses=[])),
call("addresses", Address(user=None)),
call("user", User(addresses=[])),
call("user", User(addresses=[])),
call("addresses", Address(user=None)),
call("user", None),
],
)
else:
if expect_include_removes:
eq_(
calls,
[
call("addresses", Address(), False),
call("addresses", Address(user=None), False),
call("user", User(addresses=[]), False),
call("user", User(addresses=[]), True),
call("addresses", Address(user=None), True),
],
)
else:
eq_(
calls,
[
call("addresses", Address()),
call("addresses", Address(user=None)),
call("user", User(addresses=[])),
],
)
|
ValidatorTest
|
python
|
pydata__xarray
|
xarray/tests/test_datatree.py
|
{
"start": 74212,
"end": 78130
}
|
class ____:
def test_isel_siblings(self) -> None:
tree = DataTree.from_dict(
{
"/first": xr.Dataset({"a": ("x", [1, 2])}),
"/second": xr.Dataset({"b": ("x", [1, 2, 3])}),
}
)
expected = DataTree.from_dict(
{
"/first": xr.Dataset({"a": 2}),
"/second": xr.Dataset({"b": 3}),
}
)
actual = tree.isel(x=-1)
assert_identical(actual, expected)
expected = DataTree.from_dict(
{
"/first": xr.Dataset({"a": ("x", [1])}),
"/second": xr.Dataset({"b": ("x", [1])}),
}
)
actual = tree.isel(x=slice(1))
assert_identical(actual, expected)
actual = tree.isel(x=[0])
assert_identical(actual, expected)
actual = tree.isel(x=slice(None))
assert_identical(actual, tree)
def test_isel_inherited(self) -> None:
tree = DataTree.from_dict(
{
"/": xr.Dataset(coords={"x": [1, 2]}),
"/child": xr.Dataset({"foo": ("x", [3, 4])}),
}
)
expected = DataTree.from_dict(
{
"/": xr.Dataset(coords={"x": 2}),
"/child": xr.Dataset({"foo": 4}),
}
)
actual = tree.isel(x=-1)
assert_identical(actual, expected)
expected = DataTree.from_dict(
{
"/child": xr.Dataset({"foo": 4}),
}
)
actual = tree.isel(x=-1, drop=True)
assert_identical(actual, expected)
expected = DataTree.from_dict(
{
"/": xr.Dataset(coords={"x": [1]}),
"/child": xr.Dataset({"foo": ("x", [3])}),
}
)
actual = tree.isel(x=[0])
assert_identical(actual, expected)
actual = tree.isel(x=slice(None))
# TODO: re-enable after the fix to copy() from #9628 is submitted
# actual = tree.children["child"].isel(x=slice(None))
# expected = tree.children["child"].copy()
# assert_identical(actual, expected)
actual = tree.children["child"].isel(x=0)
expected = DataTree(
dataset=xr.Dataset({"foo": 3}, coords={"x": 1}),
name="child",
)
assert_identical(actual, expected)
def test_sel(self) -> None:
tree = DataTree.from_dict(
{
"/first": xr.Dataset({"a": ("x", [1, 2, 3])}, coords={"x": [1, 2, 3]}),
"/second": xr.Dataset({"b": ("x", [4, 5])}, coords={"x": [2, 3]}),
}
)
expected = DataTree.from_dict(
{
"/first": xr.Dataset({"a": 2}, coords={"x": 2}),
"/second": xr.Dataset({"b": 4}, coords={"x": 2}),
}
)
actual = tree.sel(x=2)
assert_identical(actual, expected)
actual = tree.children["first"].sel(x=2)
expected = DataTree(
dataset=xr.Dataset({"a": 2}, coords={"x": 2}),
name="first",
)
assert_identical(actual, expected)
def test_sel_isel_error_has_node_info(self) -> None:
tree = DataTree.from_dict(
{
"/first": xr.Dataset({"a": ("x", [1, 2, 3])}, coords={"x": [1, 2, 3]}),
"/second": xr.Dataset({"b": ("x", [4, 5])}, coords={"x": [2, 3]}),
}
)
with pytest.raises(
KeyError,
match=re.escape(
"Raised whilst mapping function over node(s) with path 'second'"
),
):
tree.sel(x=1)
with pytest.raises(
IndexError,
match=re.escape(
"Raised whilst mapping function over node(s) with path 'first'"
),
):
tree.isel(x=4)
|
TestIndexing
|
python
|
python__mypy
|
mypy/plugins/attrs.py
|
{
"start": 39102,
"end": 46406
}
|
class ____:
"""Helper to add methods to a TypeInfo.
ctx: The ClassDefCtx we are using on which we will add methods.
"""
# TODO: Combine this with the code build_namedtuple_typeinfo to support both.
def __init__(self, ctx: mypy.plugin.ClassDefContext) -> None:
self.ctx = ctx
self.self_type = fill_typevars(ctx.cls.info)
def add_method(
self,
method_name: str,
args: list[Argument],
ret_type: Type,
self_type: Type | None = None,
tvd: TypeVarType | None = None,
) -> None:
"""Add a method: def <method_name>(self, <args>) -> <ret_type>): ... to info.
self_type: The type to use for the self argument or None to use the inferred self type.
tvd: If the method is generic these should be the type variables.
"""
self_type = self_type if self_type is not None else self.self_type
add_method_to_class(
self.ctx.api, self.ctx.cls, method_name, args, ret_type, self_type, tvd
)
def _get_attrs_init_type(typ: Instance) -> CallableType | None:
"""
If `typ` refers to an attrs class, get the type of its initializer method.
"""
magic_attr = typ.type.get(MAGIC_ATTR_NAME)
if magic_attr is None or not magic_attr.plugin_generated:
return None
init_method = typ.type.get_method("__init__") or typ.type.get_method(ATTRS_INIT_NAME)
if not isinstance(init_method, FuncDef) or not isinstance(init_method.type, CallableType):
return None
return init_method.type
def _fail_not_attrs_class(ctx: mypy.plugin.FunctionSigContext, t: Type, parent_t: Type) -> None:
t_name = format_type_bare(t, ctx.api.options)
if parent_t is t:
msg = (
f'Argument 1 to "evolve" has a variable type "{t_name}" not bound to an attrs class'
if isinstance(t, TypeVarType)
else f'Argument 1 to "evolve" has incompatible type "{t_name}"; expected an attrs class'
)
else:
pt_name = format_type_bare(parent_t, ctx.api.options)
msg = (
f'Argument 1 to "evolve" has type "{pt_name}" whose item "{t_name}" is not bound to an attrs class'
if isinstance(t, TypeVarType)
else f'Argument 1 to "evolve" has incompatible type "{pt_name}" whose item "{t_name}" is not an attrs class'
)
ctx.api.fail(msg, ctx.context)
def _get_expanded_attr_types(
ctx: mypy.plugin.FunctionSigContext,
typ: ProperType,
display_typ: ProperType,
parent_typ: ProperType,
) -> list[Mapping[str, Type]] | None:
"""
For a given type, determine what attrs classes it can be: for each class, return the field types.
For generic classes, the field types are expanded.
If the type contains Any or a non-attrs type, returns None; in the latter case, also reports an error.
"""
if isinstance(typ, AnyType):
return None
elif isinstance(typ, UnionType):
ret: list[Mapping[str, Type]] | None = []
for item in typ.relevant_items():
item = get_proper_type(item)
item_types = _get_expanded_attr_types(ctx, item, item, parent_typ)
if ret is not None and item_types is not None:
ret += item_types
else:
ret = None # but keep iterating to emit all errors
return ret
elif isinstance(typ, TypeVarType):
return _get_expanded_attr_types(
ctx, get_proper_type(typ.upper_bound), display_typ, parent_typ
)
elif isinstance(typ, Instance):
init_func = _get_attrs_init_type(typ)
if init_func is None:
_fail_not_attrs_class(ctx, display_typ, parent_typ)
return None
init_func = expand_type_by_instance(init_func, typ)
# [1:] to skip the self argument of AttrClass.__init__
field_names = cast(list[str], init_func.arg_names[1:])
field_types = init_func.arg_types[1:]
return [dict(zip(field_names, field_types))]
else:
_fail_not_attrs_class(ctx, display_typ, parent_typ)
return None
def _meet_fields(types: list[Mapping[str, Type]]) -> Mapping[str, Type]:
"""
"Meet" the fields of a list of attrs classes, i.e. for each field, its new type will be the lower bound.
"""
field_to_types = defaultdict(list)
for fields in types:
for name, typ in fields.items():
field_to_types[name].append(typ)
return {
name: (
get_proper_type(reduce(meet_types, f_types))
if len(f_types) == len(types)
else UninhabitedType()
)
for name, f_types in field_to_types.items()
}
def evolve_function_sig_callback(ctx: mypy.plugin.FunctionSigContext) -> CallableType:
"""
Generate a signature for the 'attr.evolve' function that's specific to the call site
and dependent on the type of the first argument.
"""
if len(ctx.args) != 2:
# Ideally the name and context should be callee's, but we don't have it in FunctionSigContext.
ctx.api.fail(f'"{ctx.default_signature.name}" has unexpected type annotation', ctx.context)
return ctx.default_signature
if len(ctx.args[0]) != 1:
return ctx.default_signature # leave it to the type checker to complain
inst_arg = ctx.args[0][0]
inst_type = get_proper_type(ctx.api.get_expression_type(inst_arg))
inst_type_str = format_type_bare(inst_type, ctx.api.options)
attr_types = _get_expanded_attr_types(ctx, inst_type, inst_type, inst_type)
if attr_types is None:
return ctx.default_signature
fields = _meet_fields(attr_types)
return CallableType(
arg_names=["inst", *fields.keys()],
arg_kinds=[ARG_POS] + [ARG_NAMED_OPT] * len(fields),
arg_types=[inst_type, *fields.values()],
ret_type=inst_type,
fallback=ctx.default_signature.fallback,
name=f"{ctx.default_signature.name} of {inst_type_str}",
)
def fields_function_sig_callback(ctx: mypy.plugin.FunctionSigContext) -> CallableType:
"""Provide the signature for `attrs.fields`."""
if len(ctx.args) != 1 or len(ctx.args[0]) != 1:
return ctx.default_signature
proper_type = get_proper_type(ctx.api.get_expression_type(ctx.args[0][0]))
# fields(Any) -> Any, fields(type[Any]) -> Any
if (
isinstance(proper_type, AnyType)
or isinstance(proper_type, TypeType)
and isinstance(proper_type.item, AnyType)
):
return ctx.default_signature
cls = None
arg_types = ctx.default_signature.arg_types
if isinstance(proper_type, TypeVarType):
inner = get_proper_type(proper_type.upper_bound)
if isinstance(inner, Instance):
# We need to work arg_types to compensate for the attrs stubs.
arg_types = [proper_type]
cls = inner.type
elif isinstance(proper_type, CallableType):
cls = proper_type.type_object()
if cls is not None and MAGIC_ATTR_NAME in cls.names:
# This is a proper attrs class.
ret_type = cls.names[MAGIC_ATTR_NAME].type
assert ret_type is not None
return ctx.default_signature.copy_modified(arg_types=arg_types, ret_type=ret_type)
return ctx.default_signature
|
MethodAdder
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/memberAccess10.py
|
{
"start": 894,
"end": 1126
}
|
class ____:
@FlagValue
def suppress(self):
return 2
flags = Flags()
def func1(new: Any):
flags.suppress = new
def func2(new: int):
flags.suppress = new
def func3(new: bool):
flags.suppress = new
|
Flags
|
python
|
great-expectations__great_expectations
|
contrib/cli/great_expectations_contrib/package.py
|
{
"start": 676,
"end": 836
}
|
class ____(SerializableDictDot):
concept_only: int
experimental: int
beta: int
production: int
total: int
@dataclass
|
PackageCompletenessStatus
|
python
|
urllib3__urllib3
|
dummyserver/testcase.py
|
{
"start": 1025,
"end": 5497
}
|
class ____:
"""
A simple socket-based server is created for this class that is good for
exactly one request.
"""
scheme = "http"
host = "localhost"
server_thread: typing.ClassVar[SocketServerThread]
port: typing.ClassVar[int]
tmpdir: typing.ClassVar[str]
ca_path: typing.ClassVar[str]
cert_combined_path: typing.ClassVar[str]
cert_path: typing.ClassVar[str]
key_path: typing.ClassVar[str]
password_key_path: typing.ClassVar[str]
server_context: typing.ClassVar[ssl.SSLContext]
client_context: typing.ClassVar[ssl.SSLContext]
proxy_server: typing.ClassVar[SocketDummyServerTestCase]
@classmethod
def _start_server(
cls,
socket_handler: typing.Callable[[socket.socket], None],
quit_event: threading.Event | None = None,
) -> None:
ready_event = threading.Event()
cls.server_thread = SocketServerThread(
socket_handler=socket_handler,
ready_event=ready_event,
host=cls.host,
quit_event=quit_event,
)
cls.server_thread.start()
ready_event.wait(5)
if not ready_event.is_set():
raise Exception("most likely failed to start server")
cls.port = cls.server_thread.port
@classmethod
def start_response_handler(
cls,
response: bytes,
num: int = 1,
block_send: threading.Event | None = None,
) -> threading.Event:
ready_event = threading.Event()
quit_event = threading.Event()
def socket_handler(listener: socket.socket) -> None:
for _ in range(num):
ready_event.set()
listener.settimeout(LONG_TIMEOUT)
while True:
if quit_event.is_set():
return
try:
sock = listener.accept()[0]
break
except (TimeoutError, socket.timeout):
continue
consume_socket(sock, quit_event=quit_event)
if quit_event.is_set():
sock.close()
return
if block_send:
while not block_send.wait(LONG_TIMEOUT):
if quit_event.is_set():
sock.close()
return
block_send.clear()
sock.send(response)
sock.close()
cls._start_server(socket_handler, quit_event=quit_event)
return ready_event
@classmethod
def start_basic_handler(
cls, num: int = 1, block_send: threading.Event | None = None
) -> threading.Event:
return cls.start_response_handler(
b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n",
num,
block_send,
)
@staticmethod
def quit_server_thread(server_thread: SocketServerThread) -> None:
if server_thread.quit_event:
server_thread.quit_event.set()
# in principle the maximum time that the thread can take to notice
# the quit_event is LONG_TIMEOUT and the thread should terminate
# shortly after that, we give 5 seconds leeway just in case
server_thread.join(LONG_TIMEOUT * 2 + 5.0)
if server_thread.is_alive():
raise Exception("server_thread did not exit")
@classmethod
def teardown_class(cls) -> None:
if hasattr(cls, "server_thread"):
cls.quit_server_thread(cls.server_thread)
def teardown_method(self) -> None:
if hasattr(self, "server_thread"):
self.quit_server_thread(self.server_thread)
def assert_header_received(
self,
received_headers: typing.Iterable[bytes],
header_name: str,
expected_value: str | None = None,
) -> None:
header_name_bytes = header_name.encode("ascii")
if expected_value is None:
expected_value_bytes = None
else:
expected_value_bytes = expected_value.encode("ascii")
header_titles = []
for header in received_headers:
key, value = header.split(b": ")
header_titles.append(key)
if key == header_name_bytes and expected_value_bytes is not None:
assert value == expected_value_bytes
assert header_name_bytes in header_titles
|
SocketDummyServerTestCase
|
python
|
sqlalchemy__sqlalchemy
|
examples/inheritance/single.py
|
{
"start": 1211,
"end": 1680
}
|
class ____(Base):
__tablename__ = "person"
__table__: FromClause
id: Mapped[intpk]
company_id: Mapped[int] = mapped_column(ForeignKey("company.id"))
name: Mapped[str50]
type: Mapped[str50]
company: Mapped[Company] = relationship(back_populates="employees")
__mapper_args__ = {
"polymorphic_identity": "person",
"polymorphic_on": "type",
}
def __repr__(self):
return f"Ordinary person {self.name}"
|
Person
|
python
|
huggingface__transformers
|
src/transformers/generation/logits_process.py
|
{
"start": 24693,
"end": 27138
}
|
class ____(LogitsProcessor):
r"""
[`LogitsProcessor`] that performs top-k, i.e. restricting to the k highest probability elements. Often used
together with [`TemperatureLogitsWarper`] and [`TopPLogitsWarper`].
Args:
top_k (`int`):
The number of highest probability vocabulary tokens to keep for top-k-filtering.
filter_value (`float`, *optional*, defaults to -inf):
All filtered values will be set to this float value.
min_tokens_to_keep (`int`, *optional*, defaults to 1):
Minimum number of tokens that cannot be filtered.
Examples:
```python
>>> from transformers import AutoTokenizer, AutoModelForCausalLM, set_seed
>>> set_seed(1)
>>> model = AutoModelForCausalLM.from_pretrained("distilbert/distilgpt2")
>>> tokenizer = AutoTokenizer.from_pretrained("distilbert/distilgpt2")
>>> inputs = tokenizer("A sequence: A, B, C, D", return_tensors="pt")
>>> # With sampling, the output is unexpected -- sometimes too unexpected.
>>> outputs = model.generate(**inputs, do_sample=True)
>>> print(tokenizer.batch_decode(outputs, skip_special_tokens=True)[0])
A sequence: A, B, C, D, E — S — O, P — R
>>> # With `top_k` sampling, the output gets restricted the k most likely tokens.
>>> # Pro tip: In practice, LLMs use `top_k` in the 5-50 range.
>>> outputs = model.generate(**inputs, do_sample=True, top_k=2)
>>> print(tokenizer.batch_decode(outputs, skip_special_tokens=True)[0])
A sequence: A, B, C, D, E, F, G, H, I
```
"""
def __init__(self, top_k: int, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1):
if not isinstance(top_k, int) or top_k <= 0:
raise ValueError(f"`top_k` has to be a strictly positive integer, but is {top_k}")
self.top_k = max(top_k, min_tokens_to_keep)
self.filter_value = filter_value
@add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING)
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
top_k = min(self.top_k, scores.size(-1)) # Safety check
# Remove all tokens with a probability less than the last token of the top-k
indices_to_remove = scores < torch.topk(scores, top_k)[0][..., -1, None]
scores_processed = scores.masked_fill(indices_to_remove, self.filter_value)
return scores_processed
|
TopKLogitsWarper
|
python
|
kamyu104__LeetCode-Solutions
|
Python/intersection-of-two-arrays.py
|
{
"start": 1826,
"end": 2448
}
|
class ____(object):
def intersection(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: List[int]
"""
nums1.sort(), nums2.sort()
res = []
it1, it2 = 0, 0
while it1 < len(nums1) and it2 < len(nums2):
if nums1[it1] < nums2[it2]:
it1 += 1
elif nums1[it1] > nums2[it2]:
it2 += 1
else:
if not res or res[-1] != nums1[it1]:
res += nums1[it1],
it1 += 1
it2 += 1
return res
|
Solution3
|
python
|
apache__airflow
|
providers/cncf/kubernetes/src/airflow/providers/cncf/kubernetes/operators/pod.py
|
{
"start": 4305,
"end": 63330
}
|
class ____(BaseOperator):
"""
Execute a task in a Kubernetes Pod.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:KubernetesPodOperator`
.. note::
If you use `Google Kubernetes Engine <https://cloud.google.com/kubernetes-engine/>`__
and Airflow is not running in the same cluster, consider using
:class:`~airflow.providers.google.cloud.operators.kubernetes_engine.GKEStartPodOperator`, which
simplifies the authorization process.
:param kubernetes_conn_id: The :ref:`kubernetes connection id <howto/connection:kubernetes>`
for the Kubernetes cluster. (templated)
:param namespace: the namespace to run within kubernetes.
:param image: Container image you wish to launch. Defaults to hub.docker.com,
but fully qualified URLS will point to custom repositories. (templated)
:param name: name of the pod in which the task will run, will be used (plus a random
suffix if random_name_suffix is True) to generate a pod id (DNS-1123 subdomain,
containing only [a-z0-9.-]). (templated)
:param random_name_suffix: if True, will generate a random suffix.
:param cmds: entrypoint of the container.
The container images's entrypoint is used if this is not provided. (templated)
:param arguments: arguments of the entrypoint.
The container image's CMD is used if this is not provided. (templated)
:param ports: ports for the launched pod.
:param volume_mounts: volumeMounts for the launched pod. (templated)
:param volumes: volumes for the launched pod. Includes ConfigMaps and PersistentVolumes. (templated)
:param env_vars: Environment variables initialized in the container. (templated)
:param env_from: (Optional) List of sources to populate environment variables in the container. (templated)
:param secrets: Kubernetes secrets to inject in the container.
They can be exposed as environment vars or files in a volume.
:param in_cluster: run kubernetes client with in_cluster configuration.
:param cluster_context: context that points to kubernetes cluster.
Ignored when in_cluster is True. If None, current-context is used. (templated)
:param reattach_on_restart: if the worker dies while the pod is running, reattach and monitor
during the next try. If False, always create a new pod for each try.
:param labels: labels to apply to the Pod. (templated)
:param startup_timeout_seconds: timeout in seconds to startup the pod after pod was scheduled.
:param startup_check_interval_seconds: interval in seconds to check if the pod has already started
:param schedule_timeout_seconds: timeout in seconds to schedule pod in cluster.
:param get_logs: get the stdout of the base container as logs of the tasks.
:param init_container_logs: list of init containers whose logs will be published to stdout
Takes a sequence of containers, a single container name or True. If True,
all the containers logs are published.
:param container_logs: list of containers whose logs will be published to stdout
Takes a sequence of containers, a single container name or True. If True,
all the containers logs are published. Works in conjunction with get_logs param.
The default value is the base container.
:param image_pull_policy: Specify a policy to cache or always pull an image.
:param annotations: non-identifying metadata you can attach to the Pod.
Can be a large range of data, and can include characters
that are not permitted by labels. (templated)
:param container_resources: resources for the launched pod. (templated)
:param affinity: affinity scheduling rules for the launched pod.
:param config_file: The path to the Kubernetes config file. (templated)
If not specified, default value is ``~/.kube/config``
:param node_selector: A dict containing a group of scheduling rules. (templated)
:param image_pull_secrets: Any image pull secrets to be given to the pod.
If more than one secret is required, provide a
comma separated list: secret_a,secret_b
:param service_account_name: Name of the service account
:param automount_service_account_token: indicates whether pods running as this service account should have an API token automatically mounted
:param hostnetwork: If True enable host networking on the pod.
:param host_aliases: A list of host aliases to apply to the containers in the pod.
:param tolerations: A list of kubernetes tolerations.
:param security_context: security options the pod should run with (PodSecurityContext).
:param container_security_context: security options the container should run with.
:param dnspolicy: dnspolicy for the pod.
:param dns_config: dns configuration (ip addresses, searches, options) for the pod.
:param hostname: hostname for the pod. (templated)
:param subdomain: subdomain for the pod.
:param schedulername: Specify a schedulername for the pod
:param full_pod_spec: The complete podSpec
:param init_containers: init container for the launched Pod
:param log_events_on_failure: Log the pod's events if a failure occurs
:param do_xcom_push: If True, the content of the file
/airflow/xcom/return.json in the container will also be pushed to an
XCom when the container completes.
:param pod_template_file: path to pod template file (templated)
:param pod_template_dict: pod template dictionary (templated)
:param priority_class_name: priority class name for the launched Pod
:param pod_runtime_info_envs: (Optional) A list of environment variables,
to be set in the container.
:param termination_grace_period: Termination grace period (in seconds) for the pod.
This sets the pod's ``terminationGracePeriodSeconds`` and is also used as the grace period
when deleting the pod if the task is killed. If not specified, uses the Kubernetes default (30 seconds).
:param configmaps: (Optional) A list of names of config maps from which it collects ConfigMaps
to populate the environment variables with. The contents of the target
ConfigMap's Data field will represent the key-value pairs as environment variables.
Extends env_from.
:param skip_on_exit_code: If task exits with this exit code, leave the task
in ``skipped`` state (default: None). If set to ``None``, any non-zero
exit code will be treated as a failure.
:param base_container_name: The name of the base container in the pod. This container's logs
will appear as part of this task's logs if get_logs is True. Defaults to None. If None,
will consult the class variable BASE_CONTAINER_NAME (which defaults to "base") for the base
container name to use. (templated)
:param base_container_status_polling_interval: Polling period in seconds to check for the pod base
container status. Default to 1s.
:param deferrable: Run operator in the deferrable mode.
:param poll_interval: Polling period in seconds to check for the status. Used only in deferrable mode.
:param log_pod_spec_on_failure: Log the pod's specification if a failure occurs
:param on_finish_action: What to do when the pod reaches its final state, or the execution is interrupted.
If "delete_pod", the pod will be deleted regardless its state; if "delete_succeeded_pod",
only succeeded pod will be deleted. You can set to "keep_pod" to keep the pod.
:param termination_message_policy: The termination message policy of the base container.
Default value is "File"
:param active_deadline_seconds: The active_deadline_seconds which translates to active_deadline_seconds
in V1PodSpec.
:param callbacks: KubernetesPodOperatorCallback instance contains the callbacks methods on different step
of KubernetesPodOperator.
:param logging_interval: max time in seconds that task should be in deferred state before
resuming to fetch the latest logs. If ``None``, then the task will remain in deferred state until pod
is done, and no logs will be visible until that time.
:param trigger_kwargs: additional keyword parameters passed to the trigger
:param container_name_log_prefix_enabled: if True, will prefix container name to each log line.
Default to True.
:param log_formatter: custom log formatter function that takes two string arguments:
the first string is the container_name and the second string is the message_to_log.
The function should return a formatted string. If None, the default formatting will be used.
"""
# !!! Changes in KubernetesPodOperator's arguments should be also reflected in !!!
# - airflow-core/src/airflow/decorators/__init__.pyi (by a separate PR)
# This field can be overloaded at the instance level via base_container_name
BASE_CONTAINER_NAME = "base"
ISTIO_CONTAINER_NAME = "istio-proxy"
KILL_ISTIO_PROXY_SUCCESS_MSG = "HTTP/1.1 200"
POD_CHECKED_KEY = "already_checked"
POST_TERMINATION_TIMEOUT = 120
template_fields: Sequence[str] = (
"image",
"name",
"hostname",
"cmds",
"annotations",
"arguments",
"env_vars",
"labels",
"config_file",
"pod_template_file",
"pod_template_dict",
"namespace",
"container_resources",
"volumes",
"volume_mounts",
"cluster_context",
"env_from",
"node_selector",
"kubernetes_conn_id",
"base_container_name",
"trigger_kwargs",
)
template_fields_renderers = {"env_vars": "py"}
def __init__(
self,
*,
kubernetes_conn_id: str | None = KubernetesHook.default_conn_name,
namespace: str | None = None,
image: str | None = None,
name: str | None = None,
random_name_suffix: bool = True,
cmds: list[str] | None = None,
arguments: list[str] | None = None,
ports: list[k8s.V1ContainerPort] | None = None,
volume_mounts: list[k8s.V1VolumeMount] | None = None,
volumes: list[k8s.V1Volume] | None = None,
env_vars: list[k8s.V1EnvVar] | dict[str, str] | None = None,
env_from: list[k8s.V1EnvFromSource] | None = None,
secrets: list[Secret] | None = None,
in_cluster: bool | None = None,
cluster_context: str | None = None,
labels: dict | None = None,
reattach_on_restart: bool = True,
startup_timeout_seconds: int = 120,
startup_check_interval_seconds: int = 5,
schedule_timeout_seconds: int | None = None,
get_logs: bool = True,
base_container_name: str | None = None,
base_container_status_polling_interval: float = 1,
init_container_logs: Iterable[str] | str | Literal[True] | None = None,
container_logs: Iterable[str] | str | Literal[True] | None = None,
image_pull_policy: str | None = None,
annotations: dict | None = None,
container_resources: k8s.V1ResourceRequirements | None = None,
affinity: k8s.V1Affinity | None = None,
config_file: str | None = None,
node_selector: dict | None = None,
image_pull_secrets: list[k8s.V1LocalObjectReference] | None = None,
service_account_name: str | None = None,
automount_service_account_token: bool | None = None,
hostnetwork: bool = False,
host_aliases: list[k8s.V1HostAlias] | None = None,
tolerations: list[k8s.V1Toleration] | None = None,
security_context: k8s.V1PodSecurityContext | dict | None = None,
container_security_context: k8s.V1SecurityContext | dict | None = None,
dnspolicy: str | None = None,
dns_config: k8s.V1PodDNSConfig | None = None,
hostname: str | None = None,
subdomain: str | None = None,
schedulername: str | None = None,
full_pod_spec: k8s.V1Pod | None = None,
init_containers: list[k8s.V1Container] | None = None,
log_events_on_failure: bool = False,
do_xcom_push: bool = False,
pod_template_file: str | None = None,
pod_template_dict: dict | None = None,
priority_class_name: str | None = None,
pod_runtime_info_envs: list[k8s.V1EnvVar] | None = None,
termination_grace_period: int | None = None,
configmaps: list[str] | None = None,
skip_on_exit_code: int | Container[int] | None = None,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
poll_interval: float = 2,
log_pod_spec_on_failure: bool = True,
on_finish_action: str = "delete_pod",
is_delete_operator_pod: None | bool = None,
termination_message_policy: str = "File",
active_deadline_seconds: int | None = None,
callbacks: (
list[type[KubernetesPodOperatorCallback]] | type[KubernetesPodOperatorCallback] | None
) = None,
progress_callback: Callable[[str], None] | None = None,
logging_interval: int | None = None,
trigger_kwargs: dict | None = None,
container_name_log_prefix_enabled: bool = True,
log_formatter: Callable[[str, str], str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.kubernetes_conn_id = kubernetes_conn_id
self.do_xcom_push = do_xcom_push
self.image = image
self.namespace = namespace
self.cmds = cmds or []
self.arguments = arguments or []
self.labels = labels or {}
self.startup_timeout_seconds = startup_timeout_seconds
self.startup_check_interval_seconds = startup_check_interval_seconds
# New parameter startup_timeout_seconds adds breaking change, to handle this as smooth as possible just reuse startup time
self.schedule_timeout_seconds = schedule_timeout_seconds or startup_timeout_seconds
env_vars = convert_env_vars(env_vars) if env_vars else []
self.env_vars = env_vars
pod_runtime_info_envs = (
[convert_pod_runtime_info_env(p) for p in pod_runtime_info_envs] if pod_runtime_info_envs else []
)
self.pod_runtime_info_envs = pod_runtime_info_envs
self.env_from = env_from or []
if configmaps:
self.env_from.extend([convert_configmap(c) for c in configmaps])
self.ports = [convert_port(p) for p in ports] if ports else []
volume_mounts = [convert_volume_mount(v) for v in volume_mounts] if volume_mounts else []
self.volume_mounts = volume_mounts
volumes = [convert_volume(volume) for volume in volumes] if volumes else []
self.volumes = volumes
self.secrets = secrets or []
self.in_cluster = in_cluster
self.cluster_context = cluster_context
self.reattach_on_restart = reattach_on_restart
self.get_logs = get_logs
# Fallback to the class variable BASE_CONTAINER_NAME here instead of via default argument value
# in the init method signature, to be compatible with subclasses overloading the class variable value.
self.base_container_name = base_container_name or self.BASE_CONTAINER_NAME
self.base_container_status_polling_interval = base_container_status_polling_interval
self.init_container_logs = init_container_logs
self.container_logs = container_logs or self.base_container_name
self.image_pull_policy = image_pull_policy
self.node_selector = node_selector or {}
self.annotations = annotations or {}
self.affinity = convert_affinity(affinity) if affinity else {}
self.container_resources = container_resources
self.config_file = config_file
self.image_pull_secrets = convert_image_pull_secrets(image_pull_secrets) if image_pull_secrets else []
self.service_account_name = service_account_name
self.automount_service_account_token = automount_service_account_token
self.hostnetwork = hostnetwork
self.host_aliases = host_aliases
self.tolerations = (
[convert_toleration(toleration) for toleration in tolerations] if tolerations else []
)
self.security_context = security_context or {}
self.container_security_context = container_security_context
self.dnspolicy = dnspolicy
self.dns_config = dns_config
self.hostname = hostname
self.subdomain = subdomain
self.schedulername = schedulername
self.full_pod_spec = full_pod_spec
self.init_containers = init_containers or []
self.log_events_on_failure = log_events_on_failure
self.priority_class_name = priority_class_name
self.pod_template_file = pod_template_file
self.pod_template_dict = pod_template_dict
self.name = name
self.random_name_suffix = random_name_suffix
self.termination_grace_period = termination_grace_period
self.pod_request_obj: k8s.V1Pod | None = None
self.pod: k8s.V1Pod | None = None
self.skip_on_exit_code = (
skip_on_exit_code
if isinstance(skip_on_exit_code, Container)
else [skip_on_exit_code]
if skip_on_exit_code is not None
else []
)
self.deferrable = deferrable
self.poll_interval = poll_interval
self.remote_pod: k8s.V1Pod | None = None
self.log_pod_spec_on_failure = log_pod_spec_on_failure
self.on_finish_action = OnFinishAction(on_finish_action)
# The `is_delete_operator_pod` parameter should have been removed in provider version 10.0.0.
# TODO: remove it from here and from the operator's parameters list when the next major version bumped
self._is_delete_operator_pod = self.on_finish_action == OnFinishAction.DELETE_POD
self.termination_message_policy = termination_message_policy
self.active_deadline_seconds = active_deadline_seconds
self.logging_interval = logging_interval
self.trigger_kwargs = trigger_kwargs
self._config_dict: dict | None = None # TODO: remove it when removing convert_config_file_to_dict
self._progress_callback = progress_callback
self.callbacks = [] if not callbacks else callbacks if isinstance(callbacks, list) else [callbacks]
self._killed: bool = False
self.container_name_log_prefix_enabled = container_name_log_prefix_enabled
self.log_formatter = log_formatter
@cached_property
def _incluster_namespace(self):
from pathlib import Path
path = Path("/var/run/secrets/kubernetes.io/serviceaccount/namespace")
return path.exists() and path.read_text() or None
def _render_nested_template_fields(
self,
content: Any,
context: Context,
jinja_env: jinja2.Environment,
seen_oids: set,
) -> None:
if id(content) not in seen_oids:
template_fields: tuple | None
if isinstance(content, k8s.V1EnvVar):
template_fields = ("value", "name")
elif isinstance(content, k8s.V1ResourceRequirements):
template_fields = ("limits", "requests")
elif isinstance(content, k8s.V1Volume):
template_fields = ("name", "persistent_volume_claim", "config_map")
elif isinstance(content, k8s.V1VolumeMount):
template_fields = ("name", "sub_path")
elif isinstance(content, k8s.V1PersistentVolumeClaimVolumeSource):
template_fields = ("claim_name",)
elif isinstance(content, k8s.V1ConfigMapVolumeSource):
template_fields = ("name",)
elif isinstance(content, k8s.V1EnvFromSource):
template_fields = ("config_map_ref",)
elif isinstance(content, k8s.V1ConfigMapEnvSource):
template_fields = ("name",)
else:
template_fields = None
if template_fields:
seen_oids.add(id(content))
self._do_render_template_fields(content, template_fields, context, jinja_env, seen_oids)
return
super()._render_nested_template_fields(content, context, jinja_env, seen_oids)
@staticmethod
def _get_ti_pod_labels(context: Context | None = None, include_try_number: bool = True) -> dict[str, str]:
"""
Generate labels for the pod to track the pod in case of Operator crash.
:param context: task context provided by airflow DAG.
:param include_try_number: if set to True will add the try number
from the task context to the pod labels.
:return: dict
"""
if not context:
return {}
ti = context["ti"]
run_id = context["run_id"]
labels = {
"dag_id": ti.dag_id,
"task_id": ti.task_id,
"run_id": run_id,
"kubernetes_pod_operator": "True",
}
map_index = ti.map_index
if map_index is not None and map_index >= 0:
labels["map_index"] = str(map_index)
if include_try_number:
labels.update(try_number=str(ti.try_number))
# In the case of sub dags this is just useful
# TODO: Remove this when the minimum version of Airflow is bumped to 3.0
if getattr(context["dag"], "parent_dag", False):
labels["parent_dag_id"] = context["dag"].parent_dag.dag_id # type: ignore[attr-defined]
# Ensure that label is valid for Kube,
# and if not truncate/remove invalid chars and replace with short hash.
for label_id, label in labels.items():
safe_label = pod_generator.make_safe_label_value(str(label))
labels[label_id] = safe_label
return labels
@cached_property
def pod_manager(self) -> PodManager:
return PodManager(kube_client=self.client, callbacks=self.callbacks)
@cached_property
def hook(self) -> PodOperatorHookProtocol:
hook = KubernetesHook(
conn_id=self.kubernetes_conn_id,
in_cluster=self.in_cluster,
config_file=self.config_file,
cluster_context=self.cluster_context,
)
return hook
@cached_property
def client(self) -> CoreV1Api:
client = self.hook.core_v1_client
for callback in self.callbacks:
callback.on_sync_client_creation(client=client, operator=self)
return client
@generic_api_retry
def find_pod(self, namespace: str, context: Context, *, exclude_checked: bool = True) -> k8s.V1Pod | None:
"""Return an already-running pod for this task instance if one exists."""
label_selector = self._build_find_pod_label_selector(context, exclude_checked=exclude_checked)
pod_list = self.client.list_namespaced_pod(
namespace=namespace,
label_selector=label_selector,
).items
pod = None
num_pods = len(pod_list)
if num_pods == 1:
pod = pod_list[0]
self.log_matching_pod(pod=pod, context=context)
elif num_pods > 1:
if self.reattach_on_restart:
raise FoundMoreThanOnePodFailure(f"More than one pod running with labels {label_selector}")
self.log.warning("Found more than one pod running with labels %s, resolving ...", label_selector)
pod = self.process_duplicate_label_pods(pod_list)
self.log_matching_pod(pod=pod, context=context)
return pod
def log_matching_pod(self, pod: k8s.V1Pod, context: Context) -> None:
self.log.info("Found matching pod %s with labels %s", pod.metadata.name, pod.metadata.labels)
self.log.info("`try_number` of task_instance: %s", context["ti"].try_number)
self.log.info("`try_number` of pod: %s", pod.metadata.labels["try_number"])
def get_or_create_pod(self, pod_request_obj: k8s.V1Pod, context: Context) -> k8s.V1Pod:
if self.reattach_on_restart:
pod = self.find_pod(pod_request_obj.metadata.namespace, context=context)
if pod:
# If pod is terminated then delete the pod an create a new as not possible to get xcom
pod_phase = pod.status.phase if pod.status and pod.status.phase else None
pod_reason = pod.status.reason.lower() if pod.status and pod.status.reason else ""
if pod_phase not in (PodPhase.SUCCEEDED, PodPhase.FAILED) and pod_reason != "evicted":
self.log.info(
"Reusing existing pod '%s' (phase=%s, reason=%s) since it is not terminated or evicted.",
pod.metadata.name,
pod_phase,
pod_reason,
)
return pod
self.log.info(
"Found terminated old matching pod %s with labels %s",
pod.metadata.name,
pod.metadata.labels,
)
# if not required to delete the pod then keep old logic and not automatically create new pod
deleted_pod = self.process_pod_deletion(pod)
if not deleted_pod:
return pod
self.log.info("Deleted pod to handle rerun and create new pod!")
self.log.debug("Starting pod:\n%s", yaml.safe_dump(pod_request_obj.to_dict()))
self.pod_manager.create_pod(pod=pod_request_obj)
return pod_request_obj
def await_pod_start(self, pod: k8s.V1Pod) -> None:
try:
async def _await_pod_start():
events_task = self.pod_manager.watch_pod_events(pod, self.startup_check_interval_seconds)
pod_start_task = self.pod_manager.await_pod_start(
pod=pod,
schedule_timeout=self.schedule_timeout_seconds,
startup_timeout=self.startup_timeout_seconds,
check_interval=self.startup_check_interval_seconds,
)
await asyncio.gather(pod_start_task, events_task)
asyncio.run(_await_pod_start())
except PodLaunchFailedException:
if self.log_events_on_failure:
self._read_pod_container_states(pod, reraise=False)
self._read_pod_events(pod, reraise=False)
raise
def extract_xcom(self, pod: k8s.V1Pod) -> dict[Any, Any] | None:
"""Retrieve xcom value and kill xcom sidecar container."""
result = self.pod_manager.extract_xcom(pod)
if isinstance(result, str) and result.rstrip() == EMPTY_XCOM_RESULT:
self.log.info("xcom result file is empty.")
return None
self.log.debug("xcom result: \n%s", result)
return json.loads(result)
def execute(self, context: Context):
"""Based on the deferrable parameter runs the pod asynchronously or synchronously."""
self.name = self._set_name(self.name)
if not self.deferrable:
return self.execute_sync(context)
self.execute_async(context)
def execute_sync(self, context: Context):
result = None
try:
if self.pod_request_obj is None:
self.pod_request_obj = self.build_pod_request_obj(context)
for callback in self.callbacks:
callback.on_pod_manifest_created(
pod_request=self.pod_request_obj,
client=self.client,
mode=ExecutionMode.SYNC,
context=context,
operator=self,
)
if self.pod is None:
self.pod = self.get_or_create_pod( # must set `self.pod` for `on_kill`
pod_request_obj=self.pod_request_obj,
context=context,
)
# push to xcom now so that if there is an error we still have the values
ti = context["ti"]
ti.xcom_push(key="pod_name", value=self.pod.metadata.name)
ti.xcom_push(key="pod_namespace", value=self.pod.metadata.namespace)
# get remote pod for use in cleanup methods
self.remote_pod = self.find_pod(self.pod.metadata.namespace, context=context)
for callback in self.callbacks:
callback.on_pod_creation(
pod=self.remote_pod,
client=self.client,
mode=ExecutionMode.SYNC,
context=context,
operator=self,
)
self.await_init_containers_completion(pod=self.pod)
self.await_pod_start(pod=self.pod)
if self.callbacks:
pod = self.find_pod(self.pod.metadata.namespace, context=context)
for callback in self.callbacks:
callback.on_pod_starting(
pod=pod,
client=self.client,
mode=ExecutionMode.SYNC,
context=context,
operator=self,
)
self.await_pod_completion(pod=self.pod)
if self.callbacks:
pod = self.find_pod(self.pod.metadata.namespace, context=context)
for callback in self.callbacks:
callback.on_pod_completion(
pod=pod,
client=self.client,
mode=ExecutionMode.SYNC,
context=context,
operator=self,
)
for callback in self.callbacks:
callback.on_pod_teardown(
pod=pod,
client=self.client,
mode=ExecutionMode.SYNC,
context=context,
operator=self,
)
if self.do_xcom_push:
self.pod_manager.await_xcom_sidecar_container_start(pod=self.pod)
result = self.extract_xcom(pod=self.pod)
istio_enabled = self.is_istio_enabled(self.pod)
self.remote_pod = self.pod_manager.await_pod_completion(
self.pod, istio_enabled, self.base_container_name
)
finally:
pod_to_clean = self.pod or self.pod_request_obj
self.post_complete_action(
pod=pod_to_clean, remote_pod=self.remote_pod, context=context, result=result
)
if self.do_xcom_push:
return result
@tenacity.retry(
wait=tenacity.wait_exponential(max=15),
retry=tenacity.retry_if_exception_type(PodCredentialsExpiredFailure),
reraise=True,
)
def await_init_containers_completion(self, pod: k8s.V1Pod):
try:
if self.init_container_logs:
self.pod_manager.fetch_requested_init_container_logs(
pod=pod,
init_containers=self.init_container_logs,
follow_logs=True,
container_name_log_prefix_enabled=self.container_name_log_prefix_enabled,
log_formatter=self.log_formatter,
)
except kubernetes.client.exceptions.ApiException as exc:
self._handle_api_exception(exc, pod)
@tenacity.retry(
wait=tenacity.wait_exponential(max=15),
retry=tenacity.retry_if_exception_type(PodCredentialsExpiredFailure),
reraise=True,
)
def await_pod_completion(self, pod: k8s.V1Pod):
try:
if self.get_logs:
self.pod_manager.fetch_requested_container_logs(
pod=pod,
containers=self.container_logs,
follow_logs=True,
container_name_log_prefix_enabled=self.container_name_log_prefix_enabled,
log_formatter=self.log_formatter,
)
if not self.get_logs or (
self.container_logs is not True and self.base_container_name not in self.container_logs
):
self.pod_manager.await_container_completion(
pod=pod,
container_name=self.base_container_name,
polling_time=self.base_container_status_polling_interval,
)
except kubernetes.client.exceptions.ApiException as exc:
self._handle_api_exception(exc, pod)
def _handle_api_exception(
self,
exc: kubernetes.client.exceptions.ApiException,
pod: k8s.V1Pod,
):
if exc.status and str(exc.status) == "401":
self.log.warning(
"Failed to check container status due to permission error. Refreshing credentials and retrying."
)
self._refresh_cached_properties()
self.pod_manager.read_pod(pod=pod) # attempt using refreshed credentials, raises if still invalid
raise PodCredentialsExpiredFailure("Kubernetes credentials expired, retrying after refresh.")
raise exc
def _refresh_cached_properties(self):
del self.hook
del self.client
del self.pod_manager
def execute_async(self, context: Context) -> None:
if self.pod_request_obj is None:
self.pod_request_obj = self.build_pod_request_obj(context)
for callback in self.callbacks:
callback.on_pod_manifest_created(
pod_request=self.pod_request_obj,
client=self.client,
mode=ExecutionMode.SYNC,
context=context,
operator=self,
)
if self.pod is None:
self.pod = self.get_or_create_pod( # must set `self.pod` for `on_kill`
pod_request_obj=self.pod_request_obj,
context=context,
)
if self.callbacks:
pod = self.find_pod(self.pod.metadata.namespace, context=context)
for callback in self.callbacks:
callback.on_pod_creation(
pod=pod,
client=self.client,
mode=ExecutionMode.SYNC,
context=context,
operator=self,
)
ti = context["ti"]
ti.xcom_push(key="pod_name", value=self.pod.metadata.name)
ti.xcom_push(key="pod_namespace", value=self.pod.metadata.namespace)
self.invoke_defer_method()
def convert_config_file_to_dict(self):
"""Convert passed config_file to dict representation."""
config_file = self.config_file if self.config_file else os.environ.get(KUBE_CONFIG_ENV_VAR)
if config_file:
with open(config_file) as f:
self._config_dict = yaml.safe_load(f)
else:
self._config_dict = None
def invoke_defer_method(self, last_log_time: DateTime | None = None) -> None:
"""Redefine triggers which are being used in child classes."""
self.convert_config_file_to_dict()
trigger_start_time = datetime.datetime.now(tz=datetime.timezone.utc)
self.defer(
trigger=KubernetesPodTrigger(
pod_name=self.pod.metadata.name, # type: ignore[union-attr]
pod_namespace=self.pod.metadata.namespace, # type: ignore[union-attr]
trigger_start_time=trigger_start_time,
kubernetes_conn_id=self.kubernetes_conn_id,
cluster_context=self.cluster_context,
config_dict=self._config_dict,
in_cluster=self.in_cluster,
poll_interval=self.poll_interval,
get_logs=self.get_logs,
startup_timeout=self.startup_timeout_seconds,
startup_check_interval=self.startup_check_interval_seconds,
schedule_timeout=self.schedule_timeout_seconds,
base_container_name=self.base_container_name,
on_finish_action=self.on_finish_action.value,
last_log_time=last_log_time,
logging_interval=self.logging_interval,
trigger_kwargs=self.trigger_kwargs,
),
method_name="trigger_reentry",
)
def trigger_reentry(self, context: Context, event: dict[str, Any]) -> Any:
"""
Point of re-entry from trigger.
If ``logging_interval`` is None, then at this point, the pod should be done, and we'll just fetch
the logs and exit.
If ``logging_interval`` is not None, it could be that the pod is still running, and we'll just
grab the latest logs and defer back to the trigger again.
"""
self.pod = None
xcom_sidecar_output = None
try:
pod_name = event["name"]
pod_namespace = event["namespace"]
self.pod = self.hook.get_pod(pod_name, pod_namespace)
if not self.pod:
raise PodNotFoundException("Could not find pod after resuming from deferral")
follow = self.logging_interval is None
last_log_time = event.get("last_log_time")
if event["status"] in ("error", "failed", "timeout", "success"):
if self.get_logs:
self._write_logs(self.pod, follow=follow, since_time=last_log_time)
for callback in self.callbacks:
callback.on_pod_completion(
pod=self.pod,
client=self.client,
mode=ExecutionMode.SYNC,
context=context,
operator=self,
)
for callback in self.callbacks:
callback.on_pod_teardown(
pod=self.pod,
client=self.client,
mode=ExecutionMode.SYNC,
context=context,
operator=self,
)
xcom_sidecar_output = self.extract_xcom(pod=self.pod) if self.do_xcom_push else None
if event["status"] != "success":
self.log.error(
"Trigger emitted an %s event, failing the task: %s", event["status"], event["message"]
)
message = event.get("stack_trace", event["message"])
raise AirflowException(message)
except TaskDeferred:
raise
finally:
self._clean(event=event, context=context, result=xcom_sidecar_output)
if self.do_xcom_push:
return xcom_sidecar_output
def _clean(self, event: dict[str, Any], result: dict | None, context: Context) -> None:
if self.pod is None:
return
istio_enabled = self.is_istio_enabled(self.pod)
# Skip await_pod_completion when the event is 'timeout' due to the pod can hang
# on the ErrImagePull or ContainerCreating step and it will never complete
if event["status"] != "timeout":
try:
self.pod = self.pod_manager.await_pod_completion(
self.pod, istio_enabled, self.base_container_name
)
except ApiException as e:
if e.status == 404:
self.pod = None
self.log.warning(
"Pod not found while waiting for completion. The last status was %r", event["status"]
)
else:
raise e
if self.pod is not None:
self.post_complete_action(
pod=self.pod,
remote_pod=self.pod,
context=context,
result=result,
)
def _write_logs(self, pod: k8s.V1Pod, follow: bool = False, since_time: DateTime | None = None) -> None:
try:
since_seconds = (
math.ceil((datetime.datetime.now(tz=datetime.timezone.utc) - since_time).total_seconds())
if since_time
else None
)
logs = self.client.read_namespaced_pod_log(
name=pod.metadata.name,
namespace=pod.metadata.namespace,
container=self.base_container_name,
follow=follow,
timestamps=False,
since_seconds=since_seconds,
_preload_content=False,
)
for raw_line in logs:
line = raw_line.decode("utf-8", errors="backslashreplace").rstrip("\n")
if line:
self.log.info("[%s] logs: %s", self.base_container_name, line)
except (HTTPError, ApiException) as e:
self.log.warning(
"Reading of logs interrupted with error %r; will retry. "
"Set log level to DEBUG for traceback.",
e if not isinstance(e, ApiException) else e.reason,
)
def post_complete_action(
self, *, pod: k8s.V1Pod, remote_pod: k8s.V1Pod, context: Context, result: dict | None, **kwargs
) -> None:
"""Actions that must be done after operator finishes logic of the deferrable_execution."""
self.cleanup(
pod=pod,
remote_pod=remote_pod,
xcom_result=result,
context=context,
)
for callback in self.callbacks:
callback.on_pod_cleanup(
pod=pod, client=self.client, mode=ExecutionMode.SYNC, operator=self, context=context
)
def cleanup(
self,
pod: k8s.V1Pod,
remote_pod: k8s.V1Pod,
xcom_result: dict | None = None,
context: Context | None = None,
) -> None:
# Skip cleaning the pod in the following scenarios.
# 1. If a task got marked as failed, "on_kill" method would be called and the pod will be cleaned up
# there. Cleaning it up again will raise an exception (which might cause retry).
# 2. remote pod is null (ex: pod creation failed)
if self._killed or not remote_pod:
return
istio_enabled = self.is_istio_enabled(remote_pod)
pod_phase = remote_pod.status.phase if hasattr(remote_pod, "status") else None
# if the pod fails or success, but we don't want to delete it
if pod_phase != PodPhase.SUCCEEDED or self.on_finish_action == OnFinishAction.KEEP_POD:
self.patch_already_checked(remote_pod, reraise=False)
failed = (pod_phase != PodPhase.SUCCEEDED and not istio_enabled) or (
istio_enabled and not container_is_succeeded(remote_pod, self.base_container_name)
)
if failed:
if self.do_xcom_push and xcom_result and context:
# Ensure that existing XCom is pushed even in case of failure
context["ti"].xcom_push(XCOM_RETURN_KEY, xcom_result)
if self.log_events_on_failure:
self._read_pod_container_states(pod, reraise=False)
self._read_pod_events(pod, reraise=False)
self.process_pod_deletion(remote_pod, reraise=False)
if self.skip_on_exit_code:
container_statuses = (
remote_pod.status.container_statuses if remote_pod and remote_pod.status else None
) or []
base_container_status = next(
(x for x in container_statuses if x.name == self.base_container_name), None
)
exit_code = (
base_container_status.state.terminated.exit_code
if base_container_status
and base_container_status.state
and base_container_status.state.terminated
else None
)
if exit_code in self.skip_on_exit_code:
raise AirflowSkipException(
f"Pod {pod and pod.metadata.name} returned exit code {exit_code}. Skipping."
)
if failed:
error_message = get_container_termination_message(remote_pod, self.base_container_name)
raise AirflowException(
"\n".join(
filter(
None,
[
f"Pod {pod and pod.metadata.name} returned a failure.",
error_message if isinstance(error_message, str) else None,
f"remote_pod: {remote_pod}" if self.log_pod_spec_on_failure else None,
],
)
)
)
def _read_pod_events(self, pod, *, reraise=True) -> None:
"""Will fetch and emit events from pod."""
with _optionally_suppress(reraise=reraise):
for event in self.pod_manager.read_pod_events(pod).items:
if event.type == PodEventType.WARNING.value:
self.log.warning("Pod Event: %s - %s", event.reason, event.message)
else:
# events.k8s.io/v1 at this stage will always be Normal
self.log.info("Pod Event: %s - %s", event.reason, event.message)
def _read_pod_container_states(self, pod, *, reraise=True) -> None:
"""Log detailed container states of pod for debugging."""
with _optionally_suppress(reraise=reraise):
remote_pod = self.pod_manager.read_pod(pod)
pod_phase = getattr(remote_pod.status, "phase", None)
pod_reason = getattr(remote_pod.status, "reason", None)
self.log.info("Pod phase: %s, reason: %s", pod_phase, pod_reason)
container_statuses = getattr(remote_pod.status, "container_statuses", None) or []
for status in container_statuses:
name = status.name
state = status.state
if state.terminated:
level = self.log.error if state.terminated.exit_code != 0 else self.log.info
level(
"Container '%s': state='TERMINATED', reason='%s', exit_code=%s, message='%s'",
name,
state.terminated.reason,
state.terminated.exit_code,
state.terminated.message,
)
elif state.waiting:
self.log.warning(
"Container '%s': state='WAITING', reason='%s', message='%s'",
name,
state.waiting.reason,
state.waiting.message,
)
elif state.running:
self.log.info(
"Container '%s': state='RUNNING', started_at=%s",
name,
state.running.started_at,
)
def is_istio_enabled(self, pod: V1Pod) -> bool:
"""Check if istio is enabled for the namespace of the pod by inspecting the namespace labels."""
if not pod:
return False
remote_pod = self.pod_manager.read_pod(pod)
return any(container.name == self.ISTIO_CONTAINER_NAME for container in remote_pod.spec.containers)
def kill_istio_sidecar(self, pod: V1Pod) -> None:
command = "/bin/sh -c 'curl -fsI -X POST http://localhost:15020/quitquitquit'"
command_to_container = shlex.split(command)
resp = stream(
self.client.connect_get_namespaced_pod_exec,
name=pod.metadata.name,
namespace=pod.metadata.namespace,
container=self.ISTIO_CONTAINER_NAME,
command=command_to_container,
stderr=True,
stdin=True,
stdout=True,
tty=False,
_preload_content=False,
)
output = []
while resp.is_open():
if resp.peek_stdout():
output.append(resp.read_stdout())
resp.close()
output_str = "".join(output)
self.log.info("Output of curl command to kill istio: %s", output_str)
resp.close()
if self.KILL_ISTIO_PROXY_SUCCESS_MSG not in output_str:
raise AirflowException("Error while deleting istio-proxy sidecar: %s", output_str)
def process_pod_deletion(self, pod: k8s.V1Pod, *, reraise=True) -> bool:
with _optionally_suppress(reraise=reraise):
if pod is not None:
should_delete_pod = (self.on_finish_action == OnFinishAction.DELETE_POD) or (
self.on_finish_action == OnFinishAction.DELETE_SUCCEEDED_POD
and (
pod.status.phase == PodPhase.SUCCEEDED
or container_is_succeeded(pod, self.base_container_name)
)
)
if should_delete_pod:
self.log.info("Deleting pod: %s", pod.metadata.name)
self.pod_manager.delete_pod(pod)
return True
self.log.info("Skipping deleting pod: %s", pod.metadata.name)
return False
def _build_find_pod_label_selector(self, context: Context | None = None, *, exclude_checked=True) -> str:
labels = {
**self.labels,
**self._get_ti_pod_labels(context, include_try_number=False),
}
labels = _normalize_labels_dict(labels)
label_strings = [f"{label_id}={label}" for label_id, label in sorted(labels.items())]
labels_value = ",".join(label_strings)
if exclude_checked:
labels_value = f"{labels_value},{self.POD_CHECKED_KEY}!=True"
labels_value = f"{labels_value},!airflow-worker"
return labels_value
@staticmethod
def _set_name(name: str | None) -> str | None:
if name is not None:
validate_key(name, max_length=220)
return re.sub(r"[^a-z0-9-]+", "-", name.lower())
return None
def patch_already_checked(self, pod: k8s.V1Pod, *, reraise=True):
"""Add an "already checked" label to ensure we don't reattach on retries."""
with _optionally_suppress(reraise=reraise):
@generic_api_retry
def _patch_with_retry():
self.client.patch_namespaced_pod(
name=pod.metadata.name,
namespace=pod.metadata.namespace,
body={"metadata": {"labels": {self.POD_CHECKED_KEY: "True"}}},
)
_patch_with_retry()
def on_kill(self) -> None:
self._killed = True
if self.pod:
pod = self.pod
kwargs = {
"name": pod.metadata.name,
"namespace": pod.metadata.namespace,
}
if self.termination_grace_period is not None:
kwargs.update(grace_period_seconds=self.termination_grace_period)
@generic_api_retry
def _delete_with_retry():
self.client.delete_namespaced_pod(**kwargs)
try:
_delete_with_retry()
except kubernetes.client.exceptions.ApiException:
self.log.exception("Unable to delete pod %s", self.pod.metadata.name)
def build_pod_request_obj(self, context: Context | None = None) -> k8s.V1Pod:
"""
Return V1Pod object based on pod template file, full pod spec, and other operator parameters.
The V1Pod attributes are derived (in order of precedence) from operator params, full pod spec, pod
template file.
"""
self.log.debug("Creating pod for KubernetesPodOperator task %s", self.task_id)
self.env_vars = convert_env_vars_or_raise_error(self.env_vars) if self.env_vars else []
if self.pod_runtime_info_envs:
self.env_vars.extend(self.pod_runtime_info_envs)
if self.pod_template_file:
self.log.debug("Pod template file found, will parse for base pod")
pod_template = pod_generator.PodGenerator.deserialize_model_file(self.pod_template_file)
if self.full_pod_spec:
pod_template = PodGenerator.reconcile_pods(pod_template, self.full_pod_spec)
elif self.pod_template_dict:
self.log.debug("Pod template dict found, will parse for base pod")
pod_template = pod_generator.PodGenerator.deserialize_model_dict(self.pod_template_dict)
if self.full_pod_spec:
pod_template = PodGenerator.reconcile_pods(pod_template, self.full_pod_spec)
elif self.full_pod_spec:
pod_template = self.full_pod_spec
else:
pod_template = k8s.V1Pod(metadata=k8s.V1ObjectMeta())
pod = k8s.V1Pod(
api_version="v1",
kind="Pod",
metadata=k8s.V1ObjectMeta(
namespace=self.namespace,
labels=_normalize_labels_dict(self.labels),
name=self.name,
annotations=self.annotations,
),
spec=k8s.V1PodSpec(
node_selector=self.node_selector,
affinity=self.affinity,
tolerations=self.tolerations,
init_containers=self.init_containers,
host_aliases=self.host_aliases,
containers=[
k8s.V1Container(
image=self.image,
name=self.base_container_name,
command=self.cmds,
ports=self.ports,
image_pull_policy=self.image_pull_policy,
resources=self.container_resources,
volume_mounts=self.volume_mounts,
args=self.arguments,
env=self.env_vars,
env_from=self.env_from,
security_context=self.container_security_context,
termination_message_policy=self.termination_message_policy,
)
],
image_pull_secrets=self.image_pull_secrets,
service_account_name=self.service_account_name,
automount_service_account_token=self.automount_service_account_token,
host_network=self.hostnetwork,
hostname=self.hostname,
subdomain=self.subdomain,
security_context=self.security_context,
dns_policy=self.dnspolicy,
dns_config=self.dns_config,
scheduler_name=self.schedulername,
restart_policy="Never",
priority_class_name=self.priority_class_name,
volumes=self.volumes,
active_deadline_seconds=self.active_deadline_seconds,
termination_grace_period_seconds=self.termination_grace_period,
),
)
pod = PodGenerator.reconcile_pods(pod_template, pod)
if not pod.metadata.name:
pod.metadata.name = create_unique_id(
task_id=self.task_id, unique=self.random_name_suffix, max_length=POD_NAME_MAX_LENGTH
)
elif self.random_name_suffix:
# user has supplied pod name, we're just adding suffix
pod.metadata.name = add_unique_suffix(name=pod.metadata.name)
if not pod.metadata.namespace:
hook_namespace = self.hook.get_namespace()
pod_namespace = self.namespace or hook_namespace or self._incluster_namespace or "default"
pod.metadata.namespace = pod_namespace
for secret in self.secrets:
self.log.debug("Adding secret to task %s", self.task_id)
pod = secret.attach_to_pod(pod)
if self.do_xcom_push:
self.log.debug("Adding xcom sidecar to task %s", self.task_id)
pod = xcom_sidecar.add_xcom_sidecar(
pod,
sidecar_container_image=self.hook.get_xcom_sidecar_container_image(),
sidecar_container_resources=self.hook.get_xcom_sidecar_container_resources(),
)
labels = self._get_ti_pod_labels(context)
self.log.info("Building pod %s with labels: %s", pod.metadata.name, labels)
# Merge Pod Identifying labels with labels passed to operator
pod.metadata.labels.update(labels)
# Add Airflow Version to the label
# And a label to identify that pod is launched by KubernetesPodOperator
pod.metadata.labels.update(
{
"airflow_version": airflow_version.replace("+", "-"),
"airflow_kpo_in_cluster": str(self.hook.is_in_cluster),
}
)
pod_mutation_hook(pod)
return pod
def dry_run(self) -> None:
"""
Print out the pod definition that would be created by this operator.
Does not include labels specific to the task instance (since there isn't
one in a dry_run) and excludes all empty elements.
"""
pod = self.build_pod_request_obj()
print(yaml.dump(prune_dict(pod.to_dict(), mode="strict")))
def process_duplicate_label_pods(self, pod_list: list[k8s.V1Pod]) -> k8s.V1Pod:
"""
Patch or delete the existing pod with duplicate labels.
This is to handle an edge case that can happen only if reattach_on_restart
flag is False, and the previous run attempt has failed because the task
process has been killed externally by the cluster or another process.
If the task process is killed externally, it breaks the code execution and
immediately exists the task. As a result the pod created in the previous attempt
will not be properly deleted or patched by cleanup() method.
Return the newly created pod to be used for the next run attempt.
"""
new_pod = pod_list.pop(self._get_most_recent_pod_index(pod_list))
old_pod = pod_list[0]
self.patch_already_checked(old_pod, reraise=False)
if self.on_finish_action == OnFinishAction.DELETE_POD:
self.process_pod_deletion(old_pod)
return new_pod
@staticmethod
def _get_most_recent_pod_index(pod_list: list[k8s.V1Pod]) -> int:
"""Loop through a list of V1Pod objects and get the index of the most recent one."""
pod_start_times: list[datetime.datetime] = [
pod.to_dict().get("status").get("start_time") for pod in pod_list
]
most_recent_start_time = max(pod_start_times)
return pod_start_times.index(most_recent_start_time)
|
KubernetesPodOperator
|
python
|
gevent__gevent
|
src/greentest/3.10/test_asyncore.py
|
{
"start": 25702,
"end": 25927
}
|
class ____(BaseTestAPI):
if HAS_UNIX_SOCKETS:
family = socket.AF_UNIX
addr = os_helper.TESTFN
def tearDown(self):
os_helper.unlink(self.addr)
BaseTestAPI.tearDown(self)
|
TestAPI_UseUnixSockets
|
python
|
anthropics__anthropic-sdk-python
|
src/anthropic/lib/foundry.py
|
{
"start": 1862,
"end": 2129
}
|
class ____(Beta):
@cached_property
@override
def messages(self) -> BetaMessages: # type: ignore[override]
"""Return beta messages resource instance with excluded unsupported endpoints."""
return BetaFoundryMessages(self._client)
|
BetaFoundry
|
python
|
huggingface__transformers
|
tests/models/vipllava/test_modeling_vipllava.py
|
{
"start": 10928,
"end": 12188
}
|
class ____(unittest.TestCase):
def setUp(self):
self.processor = AutoProcessor.from_pretrained("llava-hf/vip-llava-7b-hf")
def tearDown(self):
cleanup(torch_device, gc_collect=True)
@slow
@require_bitsandbytes
def test_small_model_integration_test(self):
model_id = "llava-hf/vip-llava-7b-hf"
model = VipLlavaForConditionalGeneration.from_pretrained(
model_id, quantization_config=BitsAndBytesConfig(load_in_4bit=True)
)
processor = AutoProcessor.from_pretrained(model_id)
url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/compel-neg.png"
image = Image.open(requests.get(url, stream=True).raw)
prompt = "USER: <image>\nCan you please describe this image?\nASSISTANT:"
inputs = processor(prompt, image, return_tensors="pt").to(torch_device, torch.float16)
outputs = model.generate(**inputs, max_new_tokens=10)
EXPECTED_OUTPUT = "USER: \nCan you please describe this image?\nASSISTANT: The image features a brown and white cat sitting on"
self.assertEqual(processor.decode(outputs[0], skip_special_tokens=True), EXPECTED_OUTPUT)
|
VipLlavaForConditionalGenerationIntegrationTest
|
python
|
joke2k__faker
|
faker/providers/color/cs_CZ/__init__.py
|
{
"start": 43,
"end": 449
}
|
class ____(ColorProvider):
"""Implement color provider for ``cs_CZ`` locale."""
safe_colors = (
"černá",
"kaštanová",
"zelená",
"námořnická",
"olivová",
"fialová",
"zelenomodrá",
"limetková",
"modrá",
"stříbrná",
"šedá",
"žlutá",
"fuchsiová",
"aquamarinová",
"bílá",
)
|
Provider
|
python
|
PrefectHQ__prefect
|
tests/results/test_flow_results.py
|
{
"start": 692,
"end": 14033
}
|
class ____(Serializer):
"""
Custom serializer for test coverage of user-defined serializers
"""
type: str = "int-custom"
def dumps(self, obj: int):
return obj.to_bytes(8, byteorder="little")
def loads(self, blob):
return int.from_bytes(blob, byteorder="little")
async def test_flow_with_unpersisted_result(prefect_client):
@flow(persist_result=False)
def foo():
return 1
state = foo(return_state=True)
assert await state.result() == 1
api_state = (
await prefect_client.read_flow_run(state.state_details.flow_run_id)
).state
with pytest.raises(MissingResult):
await api_state.result()
async def test_flow_with_uncached_and_unpersisted_null_result(prefect_client):
@flow(persist_result=False, cache_result_in_memory=False)
def foo():
return None
state = foo(return_state=True)
# Nulls do not consume memory and are still available
assert await state.result() is None
api_state = (
await prefect_client.read_flow_run(state.state_details.flow_run_id)
).state
with pytest.raises(MissingResult):
await api_state.result()
async def test_flow_with_uncached_but_persisted_result(prefect_client):
store = None
@flow(persist_result=True, cache_result_in_memory=False)
def foo():
nonlocal store
store = get_result_store()
return 1
state = foo(return_state=True)
assert state.data.metadata.storage_key not in store.cache
assert await state.result() == 1
api_state = (
await prefect_client.read_flow_run(state.state_details.flow_run_id)
).state
assert await api_state.result() == 1
async def test_flow_result_missing_with_null_return(prefect_client):
@flow(persist_result=False)
def foo():
return None
state = foo(return_state=True)
assert await state.result() is None
api_state = (
await prefect_client.read_flow_run(state.state_details.flow_run_id)
).state
with pytest.raises(MissingResult):
await api_state.result()
@pytest.mark.parametrize("value", [True, False, None])
async def test_flow_literal_result_is_available_but_not_serialized_or_persisted(
prefect_client, value
):
@flow(
persist_result=True,
result_serializer="pickle",
)
def foo():
return value
state = foo(return_state=True)
assert await state.result() is value
api_state = (
await prefect_client.read_flow_run(state.state_details.flow_run_id)
).state
assert await api_state.result() is value
async def test_flow_exception_is_persisted(prefect_client):
@flow(persist_result=True)
def foo():
raise ValueError("Hello world")
state = foo(return_state=True)
with pytest.raises(ValueError, match="Hello world"):
await state.result()
api_state = (
await prefect_client.read_flow_run(state.state_details.flow_run_id)
).state
with pytest.raises(ValueError, match="Hello world"):
await api_state.result()
@pytest.mark.parametrize(
"serializer",
[
"json",
"pickle",
JSONSerializer(),
PickleSerializer(),
MyIntSerializer(),
"int-custom",
"compressed/pickle",
"compressed/json",
CompressedSerializer(serializer=MyIntSerializer()),
],
)
async def test_flow_result_serializer(serializer, prefect_client):
@flow(result_serializer=serializer, persist_result=True)
def foo():
return 1
state = foo(return_state=True)
assert await state.result() == 1
await assert_uses_result_serializer(state, serializer, prefect_client)
api_state = (
await prefect_client.read_flow_run(state.state_details.flow_run_id)
).state
assert await api_state.result() == 1
await assert_uses_result_serializer(api_state, serializer, prefect_client)
async def test_flow_result_storage_by_instance(prefect_client):
storage = LocalFileSystem(basepath=PREFECT_HOME.value() / "test-storage")
await storage.save("test-storage-stuff")
@flow(result_storage=storage, persist_result=True)
def foo():
return 1
state = foo(return_state=True)
assert await state.result() == 1
await assert_uses_result_storage(state, storage)
api_state = (
await prefect_client.read_flow_run(state.state_details.flow_run_id)
).state
assert await api_state.result() == 1
await assert_uses_result_storage(api_state, storage)
async def test_flow_result_storage_by_slug(prefect_client):
await LocalFileSystem(basepath=PREFECT_HOME.value() / "test-storage").save("test")
slug = LocalFileSystem.get_block_type_slug() + "/test"
@flow(result_storage=slug, persist_result=True)
def foo():
return 1
state = foo(return_state=True)
assert await state.result() == 1
await assert_uses_result_storage(state, slug, client=prefect_client)
api_state = (
await prefect_client.read_flow_run(state.state_details.flow_run_id)
).state
assert await api_state.result() == 1
await assert_uses_result_storage(api_state, slug, client=prefect_client)
async def test_child_flow_persisted_result_due_to_opt_in(prefect_client):
@flow
def foo():
return bar(return_state=True)
@flow(persist_result=True)
def bar():
return 1
parent_state = foo(return_state=True)
child_state = await parent_state.result()
assert await child_state.result() == 1
api_state = (
await prefect_client.read_flow_run(child_state.state_details.flow_run_id)
).state
assert await api_state.result() == 1
@pytest.mark.parametrize("source", ["child", "parent"])
async def test_child_flow_result_serializer(prefect_client, source):
serializer = "json"
@flow(result_serializer=serializer if source == "parent" else None)
def foo():
return bar(return_state=True)
@flow(
result_serializer=serializer if source == "child" else None,
persist_result=True,
)
def bar():
return 1
parent_state = foo(return_state=True)
child_state = await parent_state.result()
assert await child_state.result() == 1
await assert_uses_result_serializer(child_state, serializer, prefect_client)
api_state = (
await prefect_client.read_flow_run(child_state.state_details.flow_run_id)
).state
assert await api_state.result() == 1
await assert_uses_result_serializer(api_state, serializer, prefect_client)
@pytest.mark.parametrize("source", ["child", "parent"])
async def test_child_flow_result_storage(prefect_client, source):
storage = LocalFileSystem(basepath=PREFECT_HOME.value() / "test-storage")
await storage.save("child-flow-test")
@flow(result_storage=storage if source == "parent" else None)
def foo():
return bar(return_state=True)
@flow(result_storage=storage if source == "child" else None, persist_result=True)
def bar():
return 1
parent_state = foo(return_state=True)
child_state = await parent_state.result()
assert await child_state.result() == 1
await assert_uses_result_storage(child_state, storage)
api_state = (
await prefect_client.read_flow_run(child_state.state_details.flow_run_id)
).state
assert await api_state.result() == 1
await assert_uses_result_storage(api_state, storage)
async def test_child_flow_result_missing_with_null_return(prefect_client):
@flow
def foo():
return bar(return_state=True)
@flow(persist_result=False)
def bar():
return None
parent_state = foo(return_state=True)
child_state = await parent_state.result()
assert isinstance(child_state.data, ResultRecord)
assert await child_state.result() is None
api_state = (
await prefect_client.read_flow_run(child_state.state_details.flow_run_id)
).state
with pytest.raises(MissingResult):
await api_state.result()
@pytest.mark.parametrize("empty_type", [dict, list])
@pytest.mark.parametrize("persist_result", [True, False])
def test_flow_empty_result_is_retained(
persist_result: bool, empty_type, tmp_path: Path
):
@flow(persist_result=persist_result)
def my_flow():
return empty_type()
result = my_flow()
assert result == empty_type()
@pytest.mark.parametrize(
"resultlike",
[
{"type": "foo"},
{"type": "literal", "user-stuff": "bar"},
{"type": "persisted"},
{"type": "persisted", "value": "test"},
{"type": "unpersisted"},
],
)
@pytest.mark.parametrize("persist_result", [True, False])
def test_flow_resultlike_result_is_retained(
persist_result: bool, resultlike, tmp_path: Path
):
"""
Since Pydantic will coerce dictionaries into `BaseResult` types, we need to be sure
that user dicts that look like a bit like results do not cause problems
"""
@flow(persist_result=persist_result)
def my_flow():
return resultlike
result = my_flow()
assert result == resultlike
async def test_root_flow_default_remote_storage(tmp_path: Path):
@flow
async def foo():
result_fac = get_run_context().result_store
return result_fac.result_storage
block = LocalFileSystem(basepath=tmp_path)
await block.save("my-result-storage")
with temporary_settings(
{
PREFECT_DEFAULT_RESULT_STORAGE_BLOCK: "local-file-system/my-result-storage",
}
):
storage_block = await foo()
assert_blocks_equal(storage_block, block)
assert storage_block._is_anonymous is False
async def test_root_flow_default_remote_storage_saves_correct_result(tmp_path):
await LocalFileSystem(basepath=tmp_path).save("my-result-storage")
@task(result_storage_key="my-result.pkl", persist_result=True)
async def bar():
return {"foo": "bar"}
@flow
async def foo():
return await bar()
with temporary_settings(
{
PREFECT_DEFAULT_RESULT_STORAGE_BLOCK: "local-file-system/my-result-storage",
}
):
result = await foo()
assert result == {"foo": "bar"}
local_storage = await LocalFileSystem.load("my-result-storage")
result_bytes = await local_storage.read_path(f"{tmp_path / 'my-result.pkl'}")
saved_python_result = ResultRecord.deserialize(result_bytes).result
assert saved_python_result == {"foo": "bar"}
async def test_root_flow_nonexistent_default_storage_block_fails():
@flow
async def foo():
result_fac = get_run_context().result_store
return result_fac.result_storage
with temporary_settings(
{
PREFECT_DEFAULT_RESULT_STORAGE_BLOCK: "fake-block-type-slug/my-result-storage",
}
):
with pytest.raises(
ValueError,
match="Unable to find block document",
):
await foo()
async def test_root_flow_explicit_result_storage_settings_overrides_default():
await LocalFileSystem(basepath="~/.prefect/results").save("explicit-storage")
await LocalFileSystem(basepath="~/.prefect/other-results").save(
"default-result-storage"
)
@flow(result_storage=await LocalFileSystem.load("explicit-storage"))
async def foo():
return get_run_context().result_store.result_storage
with temporary_settings(
{
PREFECT_DEFAULT_RESULT_STORAGE_BLOCK: (
"local-file-system/default-result-storage"
),
}
):
result = await foo()
assert_blocks_equal(result, await LocalFileSystem.load("explicit-storage"))
def test_flow_version_result_storage_key():
@task(result_storage_key="{prefect.runtime.flow_run.flow_version}")
def some_task():
return "hello"
@flow(version="somespecialflowversion")
def some_flow() -> Block:
some_task()
return get_run_context().result_store.result_storage
storage_block = some_flow()
assert isinstance(storage_block, LocalFileSystem)
result = ResultRecord.deserialize(
storage_block.read_path("somespecialflowversion")
).result
assert result == "hello"
def test_subflow_in_task_uses_own_result_serializer():
"""
Regression test for https://github.com/PrefectHQ/prefect/issues/19449
When a subflow runs inside a task, it should use its own result_serializer,
not inherit from the parent flow.
"""
from prefect.context import FlowRunContext
@flow(result_serializer="json")
def child_flow():
# Both FlowRunContext and get_run_context should return the child's context
flow_ctx = FlowRunContext.get()
run_ctx = get_run_context()
assert flow_ctx is not None
assert run_ctx is not None
# Verify we're in the child flow context
assert flow_ctx.flow.name == "child-flow"
assert run_ctx.flow.name == "child-flow"
# Both should use JSON serializer
assert flow_ctx.result_store.serializer.type == "json"
assert run_ctx.result_store.serializer.type == "json"
return {"message": "child result"}
@flow(result_serializer="pickle")
def parent_flow():
result = task(child_flow)()
return result
parent_flow()
|
MyIntSerializer
|
python
|
pytorch__pytorch
|
torch/nn/modules/activation.py
|
{
"start": 25300,
"end": 25933
}
|
class ____(Module):
r"""Applies the Logsigmoid function element-wise.
.. math::
\text{LogSigmoid}(x) = \log\left(\frac{ 1 }{ 1 + \exp(-x)}\right)
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Output: :math:`(*)`, same shape as the input.
.. image:: ../scripts/activation_images/LogSigmoid.png
Examples::
>>> m = nn.LogSigmoid()
>>> input = torch.randn(2)
>>> output = m(input)
"""
def forward(self, input: Tensor) -> Tensor:
"""
Run forward pass.
"""
return F.logsigmoid(input)
|
LogSigmoid
|
python
|
tornadoweb__tornado
|
tornado/test/web_test.py
|
{
"start": 118970,
"end": 119462
}
|
class ____(SimpleHandlerTestCase):
class Handler(RequestHandler):
def get(self):
# remote_ip is optional, although it's set by
# both HTTPServer and WSGIAdapter.
# Clobber it to make sure it doesn't break logging.
self.request.remote_ip = None
self.finish(self._request_summary())
def test_missing_remote_ip(self):
resp = self.fetch("/")
self.assertEqual(resp.body, b"GET / (None)")
|
RequestSummaryTest
|
python
|
pytorch__pytorch
|
test/distributions/test_distributions.py
|
{
"start": 185119,
"end": 197192
}
|
class ____(DistributionsTestCase):
@unittest.skipIf(not TEST_NUMPY, "NumPy not found")
def test_gamma(self):
num_samples = 100
for alpha in [1e-2, 1e-1, 1e0, 1e1, 1e2, 1e3, 1e4]:
alphas = torch.tensor(
[alpha] * num_samples, dtype=torch.float, requires_grad=True
)
betas = alphas.new_ones(num_samples)
x = Gamma(alphas, betas).rsample()
x.sum().backward()
x, ind = x.sort()
x = x.detach().numpy()
actual_grad = alphas.grad[ind].numpy()
# Compare with expected gradient dx/dalpha along constant cdf(x,alpha).
cdf = scipy.stats.gamma.cdf
pdf = scipy.stats.gamma.pdf
eps = 0.01 * alpha / (1.0 + alpha**0.5)
cdf_alpha = (cdf(x, alpha + eps) - cdf(x, alpha - eps)) / (2 * eps)
cdf_x = pdf(x, alpha)
expected_grad = -cdf_alpha / cdf_x
rel_error = np.abs(actual_grad - expected_grad) / (expected_grad + 1e-30)
self.assertLess(
np.max(rel_error),
0.0005,
"\n".join(
[
f"Bad gradient dx/alpha for x ~ Gamma({alpha}, 1)",
f"x {x}",
f"expected {expected_grad}",
f"actual {actual_grad}",
f"rel error {rel_error}",
f"max error {rel_error.max()}",
f"at alpha={alpha}, x={x[rel_error.argmax()]}",
]
),
)
@unittest.skipIf(not TEST_NUMPY, "NumPy not found")
def test_chi2(self):
num_samples = 100
for df in [1e-2, 1e-1, 1e0, 1e1, 1e2, 1e3, 1e4]:
dfs = torch.tensor(
[df] * num_samples, dtype=torch.float, requires_grad=True
)
x = Chi2(dfs).rsample()
x.sum().backward()
x, ind = x.sort()
x = x.detach().numpy()
actual_grad = dfs.grad[ind].numpy()
# Compare with expected gradient dx/ddf along constant cdf(x,df).
cdf = scipy.stats.chi2.cdf
pdf = scipy.stats.chi2.pdf
eps = 0.01 * df / (1.0 + df**0.5)
cdf_df = (cdf(x, df + eps) - cdf(x, df - eps)) / (2 * eps)
cdf_x = pdf(x, df)
expected_grad = -cdf_df / cdf_x
rel_error = np.abs(actual_grad - expected_grad) / (expected_grad + 1e-30)
self.assertLess(
np.max(rel_error),
0.001,
"\n".join(
[
f"Bad gradient dx/ddf for x ~ Chi2({df})",
f"x {x}",
f"expected {expected_grad}",
f"actual {actual_grad}",
f"rel error {rel_error}",
f"max error {rel_error.max()}",
]
),
)
@unittest.skipIf(not TEST_NUMPY, "NumPy not found")
def test_dirichlet_on_diagonal(self):
num_samples = 20
grid = [1e-1, 1e0, 1e1]
for a0, a1, a2 in product(grid, grid, grid):
alphas = torch.tensor(
[[a0, a1, a2]] * num_samples, dtype=torch.float, requires_grad=True
)
x = Dirichlet(alphas).rsample()[:, 0]
x.sum().backward()
x, ind = x.sort()
x = x.detach().numpy()
actual_grad = alphas.grad[ind].numpy()[:, 0]
# Compare with expected gradient dx/dalpha0 along constant cdf(x,alpha).
# This reduces to a distribution Beta(alpha[0], alpha[1] + alpha[2]).
cdf = scipy.stats.beta.cdf
pdf = scipy.stats.beta.pdf
alpha, beta = a0, a1 + a2
eps = 0.01 * alpha / (1.0 + np.sqrt(alpha))
cdf_alpha = (cdf(x, alpha + eps, beta) - cdf(x, alpha - eps, beta)) / (
2 * eps
)
cdf_x = pdf(x, alpha, beta)
expected_grad = -cdf_alpha / cdf_x
rel_error = np.abs(actual_grad - expected_grad) / (expected_grad + 1e-30)
self.assertLess(
np.max(rel_error),
0.001,
"\n".join(
[
f"Bad gradient dx[0]/dalpha[0] for Dirichlet([{a0}, {a1}, {a2}])",
f"x {x}",
f"expected {expected_grad}",
f"actual {actual_grad}",
f"rel error {rel_error}",
f"max error {rel_error.max()}",
f"at x={x[rel_error.argmax()]}",
]
),
)
@unittest.skipIf(not TEST_NUMPY, "NumPy not found")
def test_beta_wrt_alpha(self):
num_samples = 20
grid = [1e-2, 1e-1, 1e0, 1e1, 1e2]
for con1, con0 in product(grid, grid):
con1s = torch.tensor(
[con1] * num_samples, dtype=torch.float, requires_grad=True
)
con0s = con1s.new_tensor([con0] * num_samples)
x = Beta(con1s, con0s).rsample()
x.sum().backward()
x, ind = x.sort()
x = x.detach().numpy()
actual_grad = con1s.grad[ind].numpy()
# Compare with expected gradient dx/dcon1 along constant cdf(x,con1,con0).
cdf = scipy.stats.beta.cdf
pdf = scipy.stats.beta.pdf
eps = 0.01 * con1 / (1.0 + np.sqrt(con1))
cdf_alpha = (cdf(x, con1 + eps, con0) - cdf(x, con1 - eps, con0)) / (
2 * eps
)
cdf_x = pdf(x, con1, con0)
expected_grad = -cdf_alpha / cdf_x
rel_error = np.abs(actual_grad - expected_grad) / (expected_grad + 1e-30)
self.assertLess(
np.max(rel_error),
0.005,
"\n".join(
[
f"Bad gradient dx/dcon1 for x ~ Beta({con1}, {con0})",
f"x {x}",
f"expected {expected_grad}",
f"actual {actual_grad}",
f"rel error {rel_error}",
f"max error {rel_error.max()}",
f"at x = {x[rel_error.argmax()]}",
]
),
)
@unittest.skipIf(not TEST_NUMPY, "NumPy not found")
def test_beta_wrt_beta(self):
num_samples = 20
grid = [1e-2, 1e-1, 1e0, 1e1, 1e2]
for con1, con0 in product(grid, grid):
con0s = torch.tensor(
[con0] * num_samples, dtype=torch.float, requires_grad=True
)
con1s = con0s.new_tensor([con1] * num_samples)
x = Beta(con1s, con0s).rsample()
x.sum().backward()
x, ind = x.sort()
x = x.detach().numpy()
actual_grad = con0s.grad[ind].numpy()
# Compare with expected gradient dx/dcon0 along constant cdf(x,con1,con0).
cdf = scipy.stats.beta.cdf
pdf = scipy.stats.beta.pdf
eps = 0.01 * con0 / (1.0 + np.sqrt(con0))
cdf_beta = (cdf(x, con1, con0 + eps) - cdf(x, con1, con0 - eps)) / (2 * eps)
cdf_x = pdf(x, con1, con0)
expected_grad = -cdf_beta / cdf_x
rel_error = np.abs(actual_grad - expected_grad) / (expected_grad + 1e-30)
self.assertLess(
np.max(rel_error),
0.005,
"\n".join(
[
f"Bad gradient dx/dcon0 for x ~ Beta({con1}, {con0})",
f"x {x}",
f"expected {expected_grad}",
f"actual {actual_grad}",
f"rel error {rel_error}",
f"max error {rel_error.max()}",
f"at x = {x[rel_error.argmax()]!r}",
]
),
)
def test_dirichlet_multivariate(self):
alpha_crit = 0.25 * (5.0**0.5 - 1.0)
num_samples = 100000
for shift in [-0.1, -0.05, -0.01, 0.0, 0.01, 0.05, 0.10]:
alpha = alpha_crit + shift
alpha = torch.tensor([alpha], dtype=torch.float, requires_grad=True)
alpha_vec = torch.cat([alpha, alpha, alpha.new([1])])
z = Dirichlet(alpha_vec.expand(num_samples, 3)).rsample()
mean_z3 = 1.0 / (2.0 * alpha + 1.0)
loss = torch.pow(z[:, 2] - mean_z3, 2.0).mean()
actual_grad = grad(loss, [alpha])[0]
# Compute expected gradient by hand.
num = 1.0 - 2.0 * alpha - 4.0 * alpha**2
den = (1.0 + alpha) ** 2 * (1.0 + 2.0 * alpha) ** 3
expected_grad = num / den
self.assertEqual(
actual_grad,
expected_grad,
atol=0.002,
rtol=0,
msg="\n".join(
[
"alpha = alpha_c + %.2g" % shift, # noqa: UP031
"expected_grad: %.5g" % expected_grad, # noqa: UP031
"actual_grad: %.5g" % actual_grad, # noqa: UP031
"error = %.2g" # noqa: UP031
% torch.abs(expected_grad - actual_grad).max(), # noqa: UP031
]
),
)
@set_default_dtype(torch.double)
def test_dirichlet_tangent_field(self):
num_samples = 20
alpha_grid = [0.5, 1.0, 2.0]
# v = dx/dalpha[0] is the reparameterized gradient aka tangent field.
def compute_v(x, alpha):
return torch.stack(
[
_Dirichlet_backward(x, alpha, torch.eye(3, 3)[i].expand_as(x))[:, 0]
for i in range(3)
],
dim=-1,
)
for a1, a2, a3 in product(alpha_grid, alpha_grid, alpha_grid):
alpha = torch.tensor([a1, a2, a3], requires_grad=True).expand(
num_samples, 3
)
x = Dirichlet(alpha).rsample()
dlogp_da = grad(
[Dirichlet(alpha).log_prob(x.detach()).sum()],
[alpha],
retain_graph=True,
)[0][:, 0]
dlogp_dx = grad(
[Dirichlet(alpha.detach()).log_prob(x).sum()], [x], retain_graph=True
)[0]
v = torch.stack(
[
grad([x[:, i].sum()], [alpha], retain_graph=True)[0][:, 0]
for i in range(3)
],
dim=-1,
)
# Compute ramaining properties by finite difference.
self.assertEqual(compute_v(x, alpha), v, msg="Bug in compute_v() helper")
# dx is an arbitrary orthonormal basis tangent to the simplex.
dx = torch.tensor([[2.0, -1.0, -1.0], [0.0, 1.0, -1.0]])
dx /= dx.norm(2, -1, True)
eps = 1e-2 * x.min(-1, True)[0] # avoid boundary
dv0 = (
compute_v(x + eps * dx[0], alpha) - compute_v(x - eps * dx[0], alpha)
) / (2 * eps)
dv1 = (
compute_v(x + eps * dx[1], alpha) - compute_v(x - eps * dx[1], alpha)
) / (2 * eps)
div_v = (dv0 * dx[0] + dv1 * dx[1]).sum(-1)
# This is a modification of the standard continuity equation, using the product rule to allow
# expression in terms of log_prob rather than the less numerically stable log_prob.exp().
error = dlogp_da + (dlogp_dx * v).sum(-1) + div_v
self.assertLess(
torch.abs(error).max(),
0.005,
"\n".join(
[
f"Dirichlet([{a1}, {a2}, {a3}]) gradient violates continuity equation:",
f"error = {error}",
]
),
)
|
TestRsample
|
python
|
ansible__ansible
|
lib/ansible/module_utils/compat/version.py
|
{
"start": 1399,
"end": 3429
}
|
class ____:
"""Abstract base class for version numbering classes. Just provides
constructor (__init__) and reproducer (__repr__), because those
seem to be the same for all version numbering classes; and route
rich comparisons to _cmp.
"""
def __init__(self, vstring=None):
if vstring:
self.parse(vstring)
def __repr__(self):
return "%s ('%s')" % (self.__class__.__name__, str(self))
def __eq__(self, other):
c = self._cmp(other)
if c is NotImplemented:
return c
return c == 0
def __lt__(self, other):
c = self._cmp(other)
if c is NotImplemented:
return c
return c < 0
def __le__(self, other):
c = self._cmp(other)
if c is NotImplemented:
return c
return c <= 0
def __gt__(self, other):
c = self._cmp(other)
if c is NotImplemented:
return c
return c > 0
def __ge__(self, other):
c = self._cmp(other)
if c is NotImplemented:
return c
return c >= 0
# Interface for version-number classes -- must be implemented
# by the following classes (the concrete ones -- Version should
# be treated as an abstract class).
# __init__ (string) - create and take same action as 'parse'
# (string parameter is optional)
# parse (string) - convert a string representation to whatever
# internal representation is appropriate for
# this style of version numbering
# __str__ (self) - convert back to a string; should be very similar
# (if not identical to) the string supplied to parse
# __repr__ (self) - generate Python code to recreate
# the instance
# _cmp (self, other) - compare two version numbers ('other' may
# be an unparsed version string, or another
# instance of your version class)
|
Version
|
python
|
kubernetes-client__python
|
kubernetes/client/models/v1_stateful_set_persistent_volume_claim_retention_policy.py
|
{
"start": 383,
"end": 5817
}
|
class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'when_deleted': 'str',
'when_scaled': 'str'
}
attribute_map = {
'when_deleted': 'whenDeleted',
'when_scaled': 'whenScaled'
}
def __init__(self, when_deleted=None, when_scaled=None, local_vars_configuration=None): # noqa: E501
"""V1StatefulSetPersistentVolumeClaimRetentionPolicy - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._when_deleted = None
self._when_scaled = None
self.discriminator = None
if when_deleted is not None:
self.when_deleted = when_deleted
if when_scaled is not None:
self.when_scaled = when_scaled
@property
def when_deleted(self):
"""Gets the when_deleted of this V1StatefulSetPersistentVolumeClaimRetentionPolicy. # noqa: E501
WhenDeleted specifies what happens to PVCs created from StatefulSet VolumeClaimTemplates when the StatefulSet is deleted. The default policy of `Retain` causes PVCs to not be affected by StatefulSet deletion. The `Delete` policy causes those PVCs to be deleted. # noqa: E501
:return: The when_deleted of this V1StatefulSetPersistentVolumeClaimRetentionPolicy. # noqa: E501
:rtype: str
"""
return self._when_deleted
@when_deleted.setter
def when_deleted(self, when_deleted):
"""Sets the when_deleted of this V1StatefulSetPersistentVolumeClaimRetentionPolicy.
WhenDeleted specifies what happens to PVCs created from StatefulSet VolumeClaimTemplates when the StatefulSet is deleted. The default policy of `Retain` causes PVCs to not be affected by StatefulSet deletion. The `Delete` policy causes those PVCs to be deleted. # noqa: E501
:param when_deleted: The when_deleted of this V1StatefulSetPersistentVolumeClaimRetentionPolicy. # noqa: E501
:type: str
"""
self._when_deleted = when_deleted
@property
def when_scaled(self):
"""Gets the when_scaled of this V1StatefulSetPersistentVolumeClaimRetentionPolicy. # noqa: E501
WhenScaled specifies what happens to PVCs created from StatefulSet VolumeClaimTemplates when the StatefulSet is scaled down. The default policy of `Retain` causes PVCs to not be affected by a scaledown. The `Delete` policy causes the associated PVCs for any excess pods above the replica count to be deleted. # noqa: E501
:return: The when_scaled of this V1StatefulSetPersistentVolumeClaimRetentionPolicy. # noqa: E501
:rtype: str
"""
return self._when_scaled
@when_scaled.setter
def when_scaled(self, when_scaled):
"""Sets the when_scaled of this V1StatefulSetPersistentVolumeClaimRetentionPolicy.
WhenScaled specifies what happens to PVCs created from StatefulSet VolumeClaimTemplates when the StatefulSet is scaled down. The default policy of `Retain` causes PVCs to not be affected by a scaledown. The `Delete` policy causes the associated PVCs for any excess pods above the replica count to be deleted. # noqa: E501
:param when_scaled: The when_scaled of this V1StatefulSetPersistentVolumeClaimRetentionPolicy. # noqa: E501
:type: str
"""
self._when_scaled = when_scaled
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1StatefulSetPersistentVolumeClaimRetentionPolicy):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1StatefulSetPersistentVolumeClaimRetentionPolicy):
return True
return self.to_dict() != other.to_dict()
|
V1StatefulSetPersistentVolumeClaimRetentionPolicy
|
python
|
pytorch__pytorch
|
torch/export/graph_signature.py
|
{
"start": 1587,
"end": 2525
}
|
class ____:
kind: InputKind
arg: ArgumentSpec
target: Optional[str]
persistent: Optional[bool] = None
def __post_init__(self):
if self.kind == InputKind.BUFFER:
assert self.persistent is not None, (
"Failed to specify persistent flag on BUFFER."
)
assert isinstance(
self.arg,
(
TensorArgument,
SymIntArgument,
SymFloatArgument,
SymBoolArgument,
ConstantArgument,
CustomObjArgument,
TokenArgument,
),
), f"got {type(self.arg)}"
def __str__(self):
target = "" if self.target is None else f" target='{self.target}'"
persistent = "" if self.persistent is None else f" persistent={self.persistent}"
return f"{str(self.arg.name)}: {str(self.kind.name)}{target}{persistent}"
|
InputSpec
|
python
|
scikit-learn__scikit-learn
|
sklearn/externals/_arff.py
|
{
"start": 15224,
"end": 15791
}
|
class ____:
def __init__(self, values):
self.values = set(values)
self.zero_value = values[0]
def __call__(self, value):
if value not in self.values:
if value == 0:
# Sparse decode
# See issue #52: nominals should take their first value when
# unspecified in a sparse matrix. Naturally, this is consistent
# with EncodedNominalConversor.
return self.zero_value
raise BadNominalValue(value)
return str(value)
|
NominalConversor
|
python
|
django__django
|
tests/admin_views/admin.py
|
{
"start": 30793,
"end": 30879
}
|
class ____(admin.ModelAdmin):
autocomplete_fields = ["living_country"]
|
TravelerAdmin
|
python
|
python-openxml__python-docx
|
src/docx/types.py
|
{
"start": 256,
"end": 510
}
|
class ____(Protocol):
"""An object that provides access to the StoryPart.
This type is for objects that have a story part like document or header as their
root part.
"""
@property
def part(self) -> StoryPart: ...
|
ProvidesStoryPart
|
python
|
huggingface__transformers
|
src/transformers/models/starcoder2/configuration_starcoder2.py
|
{
"start": 878,
"end": 8144
}
|
class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Starcoder2Model`]. It is used to instantiate a
Starcoder2 model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the [bigcode/starcoder2-7b](https://huggingface.co/bigcode/starcoder2-7b) model.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 49152):
Vocabulary size of the Starcoder2 model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`Starcoder2Model`]
hidden_size (`int`, *optional*, defaults to 3072):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 12288):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 30):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 24):
Number of attention heads for each attention layer in the Transformer encoder.
num_key_value_heads (`int`, *optional*, defaults to 2):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `8`.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 4096):
The maximum sequence length that this model might ever be used with. Starcoder2's sliding window attention
allows sequence of up to 4096*32 tokens.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
norm_epsilon (`float`, *optional*, defaults to 1e-05):
Epsilon value for the layer norm
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
bos_token_id (`int`, *optional*, defaults to 50256):
The id of the "beginning-of-sequence" token.
eos_token_id (`int`, *optional*, defaults to 50256):
The id of the "end-of-sequence" token.
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
sliding_window (`int`, *optional*):
Sliding window attention window size. If not specified, will default to `None` (no sliding window).
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
residual_dropout (`float`, *optional*, defaults to 0.0):
Residual connection dropout value.
embedding_dropout (`float`, *optional*, defaults to 0.0):
Embedding dropout.
use_bias (`bool`, *optional*, defaults to `True`):
Whether to use bias term on linear layers of the model.
```python
>>> from transformers import Starcoder2Model, Starcoder2Config
>>> # Initializing a Starcoder2 7B style configuration
>>> configuration = Starcoder2Config()
>>> # Initializing a model from the Starcoder2 7B style configuration
>>> model = Starcoder2Model(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "starcoder2"
keys_to_ignore_at_inference = ["past_key_values"]
# Default tensor parallel plan for base model `Starcoder2`
base_model_tp_plan = {
"layers.*.self_attn.q_proj": "colwise",
"layers.*.self_attn.k_proj": "colwise",
"layers.*.self_attn.v_proj": "colwise",
"layers.*.self_attn.o_proj": "rowwise",
"layers.*.mlp.c_fc": "colwise",
"layers.*.mlp.c_proj": "rowwise",
}
base_model_pp_plan = {
"embed_tokens": (["input_ids"], ["inputs_embeds"]),
"layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
"norm": (["hidden_states"], ["hidden_states"]),
}
def __init__(
self,
vocab_size: Optional[int] = 49152,
hidden_size: Optional[int] = 3072,
intermediate_size: Optional[int] = 12288,
num_hidden_layers: Optional[int] = 30,
num_attention_heads: Optional[int] = 24,
num_key_value_heads: Optional[int] = 2,
hidden_act: Optional[str] = "gelu_pytorch_tanh",
max_position_embeddings: Optional[int] = 4096,
initializer_range: Optional[float] = 0.018042,
norm_epsilon: Optional[int] = 1e-5,
use_cache: Optional[bool] = True,
bos_token_id: Optional[int] = 50256,
eos_token_id: Optional[int] = 50256,
rope_parameters: Optional[RopeParameters | dict[str, RopeParameters]] = None,
sliding_window: Optional[int] = None,
attention_dropout: Optional[float] = 0.0,
residual_dropout: Optional[float] = 0.0,
embedding_dropout: Optional[float] = 0.0,
use_bias: Optional[bool] = True,
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.sliding_window = sliding_window
self.use_bias = use_bias
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.norm_epsilon = norm_epsilon
self.use_cache = use_cache
self.attention_dropout = attention_dropout
self.residual_dropout = residual_dropout
self.embedding_dropout = embedding_dropout
self.rope_parameters = rope_parameters
super().__init__(
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
**kwargs,
)
__all__ = ["Starcoder2Config"]
|
Starcoder2Config
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/typeNarrowingIsinstance6.py
|
{
"start": 1654,
"end": 1696
}
|
class ____(Generic[_T1]):
x: _T1
|
ParentD
|
python
|
astropy__astropy
|
astropy/table/tests/test_init_table.py
|
{
"start": 910,
"end": 2054
}
|
class ____:
def test_init(self):
"""Test initialisation with lists, tuples, dicts of arrays
rather than Columns [regression test for #2647]"""
x1 = np.arange(10.0)
x2 = np.arange(5.0)
x3 = np.arange(7.0)
col_list = [("x1", x1), ("x2", x2), ("x3", x3)]
tc_list = TableColumns(col_list)
for col in col_list:
assert col[0] in tc_list
assert tc_list[col[0]] is col[1]
col_tuple = (("x1", x1), ("x2", x2), ("x3", x3))
tc_tuple = TableColumns(col_tuple)
for col in col_tuple:
assert col[0] in tc_tuple
assert tc_tuple[col[0]] is col[1]
col_dict = {"x1": x1, "x2": x2, "x3": x3}
tc_dict = TableColumns(col_dict)
for col in tc_dict.keys():
assert col in tc_dict
assert tc_dict[col] is col_dict[col]
columns = [Column(col[1], name=col[0]) for col in col_list]
tc = TableColumns(columns)
for col in columns:
assert col.name in tc
assert tc[col.name] is col
# pytest.mark.usefixtures('table_type')
|
TestTableColumnsInit
|
python
|
ray-project__ray
|
python/ray/llm/_internal/serve/config_generator/utils/prompt.py
|
{
"start": 251,
"end": 465
}
|
class ____(IntPrompt):
@classmethod
def ask(cls, prompt: str, **kwargs):
# Automatically apply bold style to the BoldPrompt
return IntPrompt.ask(f"[bold]{prompt}[/bold]", **kwargs)
|
BoldIntPrompt
|
python
|
sympy__sympy
|
sympy/solvers/ode/single.py
|
{
"start": 53884,
"end": 58475
}
|
class ____(SinglePatternODESolver):
r"""
Solves a 1st order differential equation with homogeneous coefficients
using the substitution `u_2 = \frac{\text{<independent
variable>}}{\text{<dependent variable>}}`.
This is a differential equation
.. math:: P(x, y) + Q(x, y) dy/dx = 0
such that `P` and `Q` are homogeneous and of the same order. A function
`F(x, y)` is homogeneous of order `n` if `F(x t, y t) = t^n F(x, y)`.
Equivalently, `F(x, y)` can be rewritten as `G(y/x)` or `H(x/y)`. See
also the docstring of :py:meth:`~sympy.solvers.ode.homogeneous_order`.
If the coefficients `P` and `Q` in the differential equation above are
homogeneous functions of the same order, then it can be shown that the
substitution `x = u_2 y` (i.e. `u_2 = x/y`) will turn the differential
equation into an equation separable in the variables `y` and `u_2`. If
`h(u_2)` is the function that results from making the substitution `u_2 =
x/f(x)` on `P(x, f(x))` and `g(u_2)` is the function that results from the
substitution on `Q(x, f(x))` in the differential equation `P(x, f(x)) +
Q(x, f(x)) f'(x) = 0`, then the general solution is:
>>> from sympy import Function, dsolve, pprint
>>> from sympy.abc import x
>>> f, g, h = map(Function, ['f', 'g', 'h'])
>>> genform = g(x/f(x)) + h(x/f(x))*f(x).diff(x)
>>> pprint(genform)
/ x \ / x \ d
g|----| + h|----|*--(f(x))
\f(x)/ \f(x)/ dx
>>> pprint(dsolve(genform, f(x),
... hint='1st_homogeneous_coeff_subs_indep_div_dep_Integral'))
x
----
f(x)
/
|
| -g(u1)
| ---------------- d(u1)
| u1*g(u1) + h(u1)
|
/
<BLANKLINE>
f(x) = C1*e
Where `u_1 g(u_1) + h(u_1) \ne 0` and `f(x) \ne 0`.
See also the docstrings of
:obj:`~sympy.solvers.ode.single.HomogeneousCoeffBest` and
:obj:`~sympy.solvers.ode.single.HomogeneousCoeffSubsDepDivIndep`.
Examples
========
>>> from sympy import Function, pprint, dsolve
>>> from sympy.abc import x
>>> f = Function('f')
>>> pprint(dsolve(2*x*f(x) + (x**2 + f(x)**2)*f(x).diff(x), f(x),
... hint='1st_homogeneous_coeff_subs_indep_div_dep',
... simplify=False))
/ 2 \
|3*x |
log|----- + 1|
| 2 |
\f (x) /
log(f(x)) = log(C1) - --------------
3
References
==========
- https://en.wikipedia.org/wiki/Homogeneous_differential_equation
- M. Tenenbaum & H. Pollard, "Ordinary Differential Equations",
Dover 1963, pp. 59
# indirect doctest
"""
hint = "1st_homogeneous_coeff_subs_indep_div_dep"
has_integral = True
order = [1]
def _wilds(self, f, x, order):
d = Wild('d', exclude=[f(x).diff(x), f(x).diff(x, 2)])
e = Wild('e', exclude=[f(x).diff(x)])
return d, e
def _equation(self, fx, x, order):
d, e = self.wilds()
return d + e*fx.diff(x)
def _verify(self, fx):
self.d, self.e = self.wilds_match()
self.y = Dummy('y')
x = self.ode_problem.sym
self.d = separatevars(self.d.subs(fx, self.y))
self.e = separatevars(self.e.subs(fx, self.y))
ordera = homogeneous_order(self.d, x, self.y)
orderb = homogeneous_order(self.e, x, self.y)
if ordera == orderb and ordera is not None:
self.u = Dummy('u')
if simplify((self.e + self.u*self.d).subs({x: self.u, self.y: 1})) != 0:
return True
return False
return False
def _get_match_object(self):
fx = self.ode_problem.func
x = self.ode_problem.sym
self.u1 = Dummy('u1')
xarg = 0
yarg = 0
return [self.d, self.e, fx, x, self.u, self.u1, self.y, xarg, yarg]
def _get_general_solution(self, *, simplify_flag: bool = True):
d, e, fx, x, u, u1, y, xarg, yarg = self._get_match_object()
(C1,) = self.ode_problem.get_numbered_constants(num=1)
int = Integral(simplify((-d/(e + u1*d)).subs({x: u1, y: 1})), (u1, None, x/fx)) # type: ignore
sol = logcombine(Eq(log(fx), int + log(C1)), force=True)
gen_sol = sol.subs(fx, u).subs(((u, u - yarg), (x, x - xarg), (u, fx)))
return [gen_sol]
|
HomogeneousCoeffSubsIndepDivDep
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
|
{
"start": 1054193,
"end": 1054419
}
|
class ____(sgqlc.types.Union):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__types__ = (CreatedRepositoryContribution, RestrictedContribution)
|
CreatedRepositoryOrRestrictedContribution
|
python
|
huggingface__transformers
|
src/transformers/models/sew/modular_sew.py
|
{
"start": 1650,
"end": 1718
}
|
class ____(Wav2Vec2GroupNormConvLayer):
pass
|
SEWGroupNormConvLayer
|
python
|
streamlit__streamlit
|
lib/tests/streamlit/elements/time_input_test.py
|
{
"start": 1164,
"end": 11559
}
|
class ____(DeltaGeneratorTestCase):
"""Test ability to marshall time_input protos."""
def test_just_label(self):
"""Test that it can be called with no value."""
st.time_input("the label")
c = self.get_delta_from_queue().new_element.time_input
assert c.label == "the label"
assert (
c.label_visibility.value
== LabelVisibilityMessage.LabelVisibilityOptions.VISIBLE
)
assert datetime.strptime(c.default, "%H:%M").time() <= datetime.now().time()
assert c.HasField("default")
assert not c.disabled
def test_just_disabled(self):
"""Test that it can be called with disabled param."""
st.time_input("the label", disabled=True)
c = self.get_delta_from_queue().new_element.time_input
assert c.disabled
def test_none_value(self):
"""Test that it can be called with None as initial value."""
st.time_input("the label", value=None)
c = self.get_delta_from_queue().new_element.time_input
assert c.label == "the label"
# If a proto property is null is not determined by this value,
# but by the check via the HasField method:
assert c.default == ""
assert not c.HasField("default")
@parameterized.expand(
[
(time(8, 45), "08:45"),
(datetime(2019, 7, 6, 21, 15), "21:15"),
("21:15:00", "21:15"),
("21:15:10.123", "21:15"),
("2019-07-06 21:15:10.123", "21:15"),
]
)
def test_value_types(self, arg_value, proto_value):
"""Test that it supports different types of values."""
st.time_input("the label", arg_value)
c = self.get_delta_from_queue().new_element.time_input
assert c.label == "the label"
assert c.default == proto_value
def test_inside_column(self):
"""Test that it works correctly inside of a column."""
col1, _ = st.columns([3, 2])
with col1:
st.time_input("foo")
all_deltas = self.get_all_deltas_from_queue()
# 4 elements will be created: 1 horizontal block, 2 columns, 1 widget
assert len(all_deltas) == 4
time_input_proto = self.get_delta_from_queue().new_element.time_input
assert time_input_proto.label == "foo"
@parameterized.expand(
[
("visible", LabelVisibilityMessage.LabelVisibilityOptions.VISIBLE),
("hidden", LabelVisibilityMessage.LabelVisibilityOptions.HIDDEN),
("collapsed", LabelVisibilityMessage.LabelVisibilityOptions.COLLAPSED),
]
)
def test_label_visibility(self, label_visibility_value, proto_value):
"""Test that it can be called with label_visibility param."""
st.time_input("the label", label_visibility=label_visibility_value)
c = self.get_delta_from_queue().new_element.time_input
assert c.label_visibility.value == proto_value
def test_label_visibility_wrong_value(self):
with pytest.raises(StreamlitAPIException) as e:
st.time_input("the label", label_visibility="wrong_value")
assert (
str(e.value)
== "Unsupported label_visibility option 'wrong_value'. Valid values are 'visible', 'hidden' or 'collapsed'."
)
def test_st_time_input(self):
"""Test st.time_input."""
value = time(8, 45)
st.time_input("Set an alarm for", value)
el = self.get_delta_from_queue().new_element
assert el.time_input.default == "08:45"
assert el.time_input.step == timedelta(minutes=15).seconds
def test_st_time_input_with_step(self):
"""Test st.time_input with step."""
value = time(9, 00)
st.time_input("Set an alarm for", value, step=timedelta(minutes=5))
el = self.get_delta_from_queue().new_element
assert el.time_input.default == "09:00"
assert el.time_input.step == timedelta(minutes=5).seconds
def test_st_time_input_exceptions(self):
"""Test st.time_input exceptions."""
value = time(9, 00)
with pytest.raises(StreamlitAPIException):
st.time_input("Set an alarm for", value, step=True)
with pytest.raises(StreamlitAPIException):
st.time_input("Set an alarm for", value, step=(90, 0))
with pytest.raises(StreamlitAPIException):
st.time_input("Set an alarm for", value, step=1)
with pytest.raises(StreamlitAPIException):
st.time_input("Set an alarm for", value, step=59)
with pytest.raises(StreamlitAPIException):
st.time_input("Set an alarm for", value, step=timedelta(hours=24))
with pytest.raises(StreamlitAPIException):
st.time_input("Set an alarm for", value, step=timedelta(days=1))
def test_shows_cached_widget_replay_warning(self):
"""Test that a warning is shown when this widget is used inside a cached function."""
st.cache_data(lambda: st.time_input("the label"))()
# The widget itself is still created, so we need to go back one element more:
el = self.get_delta_from_queue(-2).new_element.exception
assert el.type == "CachedWidgetWarning"
assert el.is_warning
def test_width_config_default(self):
"""Test that default width is 'stretch'."""
st.time_input("the label")
c = self.get_delta_from_queue().new_element
assert (
c.width_config.WhichOneof("width_spec")
== WidthConfigFields.USE_STRETCH.value
)
assert c.width_config.use_stretch
def test_width_config_pixel(self):
"""Test that pixel width works properly."""
st.time_input("the label", width=200)
c = self.get_delta_from_queue().new_element
assert (
c.width_config.WhichOneof("width_spec")
== WidthConfigFields.PIXEL_WIDTH.value
)
assert c.width_config.pixel_width == 200
def test_width_config_stretch(self):
"""Test that 'stretch' width works properly."""
st.time_input("the label", width="stretch")
c = self.get_delta_from_queue().new_element
assert (
c.width_config.WhichOneof("width_spec")
== WidthConfigFields.USE_STRETCH.value
)
assert c.width_config.use_stretch
@parameterized.expand(
[
"invalid",
-100,
0,
100.5,
None,
]
)
def test_invalid_width(self, width):
"""Test that invalid width values raise exceptions."""
with pytest.raises(StreamlitInvalidWidthError):
st.time_input("the label", width=width)
def test_stable_id_with_key(self):
"""Test that the widget ID is stable when a stable key is provided."""
with patch(
"streamlit.elements.lib.utils._register_element_id",
return_value=MagicMock(),
):
# First render with certain params (keep whitelisted kwargs stable)
st.time_input(
label="Label 1",
key="time_input_key",
value=time(8, 45),
help="Help 1",
disabled=False,
width="stretch",
on_change=lambda: None,
args=("arg1", "arg2"),
kwargs={"kwarg1": "kwarg1"},
label_visibility="visible",
# Whitelisted kwargs:
step=timedelta(minutes=15),
)
c1 = self.get_delta_from_queue().new_element.time_input
id1 = c1.id
# Second render with different non-whitelisted params but same key
st.time_input(
label="Label 2",
key="time_input_key",
value=time(9, 0),
help="Help 2",
disabled=True,
width=200,
on_change=lambda: None,
args=("arg_1", "arg_2"),
kwargs={"kwarg_1": "kwarg_1"},
label_visibility="hidden",
# Keep whitelisted the same to ensure ID stability
step=timedelta(minutes=15),
)
c2 = self.get_delta_from_queue().new_element.time_input
id2 = c2.id
assert id1 == id2
@parameterized.expand(
[
("step", timedelta(minutes=15), timedelta(minutes=5)),
]
)
def test_whitelisted_stable_key_kwargs(self, kwarg_name, value1, value2):
"""Test that the widget ID changes when a whitelisted kwarg changes even when the key is provided."""
with patch(
"streamlit.elements.lib.utils._register_element_id",
return_value=MagicMock(),
):
base_kwargs = {
"label": "Label",
"key": "time_input_key",
# keep other params stable
"value": time(8, 45),
"step": value1,
}
st.time_input(**base_kwargs)
c1 = self.get_delta_from_queue().new_element.time_input
id1 = c1.id
base_kwargs[kwarg_name] = value2
st.time_input(**base_kwargs)
c2 = self.get_delta_from_queue().new_element.time_input
id2 = c2.id
assert id1 != id2
def test_time_input_interaction():
"""Test interactions with an empty time_input widget."""
def script():
import streamlit as st
st.time_input("the label", value=None)
at = AppTest.from_function(script).run()
time_input = at.time_input[0]
assert time_input.value is None
# Input a time:
at = time_input.set_value(time(8, 45)).run()
time_input = at.time_input[0]
assert time_input.value == time(8, 45)
# # Clear the value
at = time_input.set_value(None).run()
time_input = at.time_input[0]
assert time_input.value is None
def test_None_session_state_value_retained():
def script():
import streamlit as st
if "time_input" not in st.session_state:
st.session_state["time_input"] = None
st.time_input("time_input", key="time_input")
st.button("button")
at = AppTest.from_function(script).run()
at = at.button[0].click().run()
assert at.time_input[0].value is None
|
TimeInputTest
|
python
|
pallets__werkzeug
|
src/werkzeug/exceptions.py
|
{
"start": 8078,
"end": 10682
}
|
class ____(HTTPException):
"""*401* ``Unauthorized``
Raise if the user is not authorized to access a resource.
The ``www_authenticate`` argument should be used to set the
``WWW-Authenticate`` header. This is used for HTTP basic auth and
other schemes. Use :class:`~werkzeug.datastructures.WWWAuthenticate`
to create correctly formatted values. Strictly speaking a 401
response is invalid if it doesn't provide at least one value for
this header, although real clients typically don't care.
:param description: Override the default message used for the body
of the response.
:param www-authenticate: A single value, or list of values, for the
WWW-Authenticate header(s).
.. versionchanged:: 2.0
Serialize multiple ``www_authenticate`` items into multiple
``WWW-Authenticate`` headers, rather than joining them
into a single value, for better interoperability.
.. versionchanged:: 0.15.3
If the ``www_authenticate`` argument is not set, the
``WWW-Authenticate`` header is not set.
.. versionchanged:: 0.15.3
The ``response`` argument was restored.
.. versionchanged:: 0.15.1
``description`` was moved back as the first argument, restoring
its previous position.
.. versionchanged:: 0.15.0
``www_authenticate`` was added as the first argument, ahead of
``description``.
"""
code = 401
description = (
"The server could not verify that you are authorized to access"
" the URL requested. You either supplied the wrong credentials"
" (e.g. a bad password), or your browser doesn't understand"
" how to supply the credentials required."
)
def __init__(
self,
description: str | None = None,
response: SansIOResponse | None = None,
www_authenticate: None | (WWWAuthenticate | t.Iterable[WWWAuthenticate]) = None,
) -> None:
super().__init__(description, response)
from .datastructures import WWWAuthenticate
if isinstance(www_authenticate, WWWAuthenticate):
www_authenticate = (www_authenticate,)
self.www_authenticate = www_authenticate
def get_headers(
self,
environ: WSGIEnvironment | None = None,
scope: dict[str, t.Any] | None = None,
) -> list[tuple[str, str]]:
headers = super().get_headers(environ, scope)
if self.www_authenticate:
headers.extend(("WWW-Authenticate", str(x)) for x in self.www_authenticate)
return headers
|
Unauthorized
|
python
|
crytic__slither
|
slither/printers/inheritance/inheritance.py
|
{
"start": 225,
"end": 2998
}
|
class ____(AbstractPrinter):
ARGUMENT = "inheritance"
HELP = "Print the inheritance relations between contracts"
WIKI = "https://github.com/trailofbits/slither/wiki/Printer-documentation#inheritance"
def _get_child_contracts(self, base):
# Generate function to get all child contracts of a base contract
for child in self.contracts:
if base in child.inheritance:
yield child
def output(self, filename):
"""
Output the inheritance relation
_filename is not used
Args:
_filename(string)
"""
info = "Inheritance\n"
info += blue("Child_Contract -> ") + green("Immediate_Base_Contracts")
info += green(" [Not_Immediate_Base_Contracts]")
result = {"child_to_base": {}}
for child in self.contracts:
info += blue(f"\n+ {child.name}\n")
result["child_to_base"][child.name] = {"immediate": [], "not_immediate": []}
if child.inheritance:
immediate = child.immediate_inheritance
not_immediate = [i for i in child.inheritance if i not in immediate]
info += " -> " + green(", ".join(map(str, immediate))) + "\n"
result["child_to_base"][child.name]["immediate"] = list(map(str, immediate))
if not_immediate:
info += ", [" + green(", ".join(map(str, not_immediate))) + "]\n"
result["child_to_base"][child.name]["not_immediate"] = list(
map(str, not_immediate)
)
info += green("\n\nBase_Contract -> ") + blue("Immediate_Child_Contracts") + "\n"
info += blue(" [Not_Immediate_Child_Contracts]") + "\n"
result["base_to_child"] = {}
for base in self.contracts:
info += green(f"\n+ {base.name}") + "\n"
children = list(self._get_child_contracts(base))
result["base_to_child"][base.name] = {"immediate": [], "not_immediate": []}
if children:
immediate = [child for child in children if base in child.immediate_inheritance]
not_immediate = [child for child in children if not child in immediate]
info += " -> " + blue(", ".join(map(str, immediate))) + "\n"
result["base_to_child"][base.name]["immediate"] = list(map(str, immediate))
if not_immediate:
info += ", [" + blue(", ".join(map(str, not_immediate))) + "]" + "\n"
result["base_to_child"][base.name]["not_immediate"] = list(map(str, immediate))
self.info(info)
res = self.generate_output(info, additional_fields=result)
return res
|
PrinterInheritance
|
python
|
getsentry__responses
|
responses/tests/test_registries.py
|
{
"start": 3132,
"end": 5811
}
|
class ____:
def test_invocation_index(self):
@responses.activate(registry=OrderedRegistry)
def run():
responses.add(
responses.GET,
"http://twitter.com/api/1/foobar",
status=666,
)
responses.add(
responses.GET,
"http://twitter.com/api/1/foobar",
status=667,
)
responses.add(
responses.GET,
"http://twitter.com/api/1/foobar",
status=668,
)
responses.add(
responses.GET,
"http://twitter.com/api/1/foobar",
status=669,
)
resp = requests.get("http://twitter.com/api/1/foobar")
assert resp.status_code == 666
resp = requests.get("http://twitter.com/api/1/foobar")
assert resp.status_code == 667
resp = requests.get("http://twitter.com/api/1/foobar")
assert resp.status_code == 668
resp = requests.get("http://twitter.com/api/1/foobar")
assert resp.status_code == 669
run()
assert_reset()
def test_not_match(self):
@responses.activate(registry=OrderedRegistry)
def run():
responses.add(
responses.GET,
"http://twitter.com/api/1/foobar",
json={"msg": "not found"},
status=667,
)
responses.add(
responses.GET,
"http://twitter.com/api/1/barfoo",
json={"msg": "not found"},
status=404,
)
responses.add(
responses.GET,
"http://twitter.com/api/1/foobar",
json={"msg": "OK"},
status=200,
)
resp = requests.get("http://twitter.com/api/1/foobar")
assert resp.status_code == 667
with pytest.raises(ConnectionError) as excinfo:
requests.get("http://twitter.com/api/1/foobar")
msg = str(excinfo.value)
assert (
"- GET http://twitter.com/api/1/barfoo Next 'Response' in the "
"order doesn't match due to the following reason: URL does not match"
) in msg
run()
assert_reset()
def test_empty_registry(self):
@responses.activate(registry=OrderedRegistry)
def run():
with pytest.raises(ConnectionError):
requests.get("http://twitter.com/api/1/foobar")
run()
assert_reset()
|
TestOrderedRegistry
|
python
|
walkccc__LeetCode
|
solutions/235. Lowest Common Ancestor of a Binary Search Tree/235.py
|
{
"start": 0,
"end": 344
}
|
class ____:
def lowestCommonAncestor(
self,
root: 'TreeNode',
p: 'TreeNode',
q: 'TreeNode',
) -> 'TreeNode':
if root.val > max(p.val, q.val):
return self.lowestCommonAncestor(root.left, p, q)
if root.val < min(p.val, q.val):
return self.lowestCommonAncestor(root.right, p, q)
return root
|
Solution
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_23/tasks.py
|
{
"start": 43810,
"end": 45030
}
|
class ____(NonStrictDataModel):
"""
:param entries: List of view entries. All tasks must have at least one view.
:type entries: Sequence[ViewEntry]
"""
_schema = {
"properties": {
"entries": {
"description": "List of view entries. All tasks must have at least one view.",
"items": {"$ref": "#/definitions/view_entry"},
"type": ["array", "null"],
}
},
"type": "object",
}
def __init__(self, entries=None, **kwargs):
super(View, self).__init__(**kwargs)
self.entries = entries
@schema_property("entries")
def entries(self):
return self._property_entries
@entries.setter
def entries(self, value):
if value is None:
self._property_entries = None
return
self.assert_isinstance(value, "entries", (list, tuple))
if any(isinstance(v, dict) for v in value):
value = [
ViewEntry.from_dict(v) if isinstance(v, dict) else v for v in value
]
else:
self.assert_isinstance(value, "entries", ViewEntry, is_array=True)
self._property_entries = value
|
View
|
python
|
ray-project__ray
|
rllib/examples/algorithms/ppo/benchmark_ppo_mujoco.py
|
{
"start": 2025,
"end": 4330
}
|
class ____(Stopper):
def __init__(self, benchmark_envs):
self.benchmark_envs = benchmark_envs
def __call__(self, trial_id, result):
# Stop training if the mean reward is reached.
if (
result[ENV_RUNNER_RESULTS][EPISODE_RETURN_MEAN]
>= self.benchmark_envs[result["env"]][
f"{ENV_RUNNER_RESULTS}/{EPISODE_RETURN_MEAN}"
]
):
return True
# Otherwise check, if the total number of timesteps is exceeded.
elif (
result[f"{NUM_ENV_STEPS_SAMPLED_LIFETIME}"]
>= self.benchmark_envs[result["env"]][f"{NUM_ENV_STEPS_SAMPLED_LIFETIME}"]
):
return True
# Otherwise continue training.
else:
return False
# Note, this needs to implemented b/c the parent class is abstract.
def stop_all(self):
return False
config = (
PPOConfig()
.environment(env=tune.grid_search(list(benchmark_envs.keys())))
.env_runners(
# Following the paper.
num_env_runners=32,
rollout_fragment_length=512,
)
.learners(
# Let's start with a small number of learner workers and
# add later a tune grid search for these resources.
num_learners=1,
num_gpus_per_learner=1,
)
# TODO (simon): Adjust to new model_config_dict.
.training(
# Following the paper.
lambda_=0.95,
lr=0.0003,
num_epochs=15,
train_batch_size=32 * 512,
minibatch_size=4096,
vf_loss_coeff=0.01,
model={
"fcnet_hiddens": [64, 64],
"fcnet_activation": "tanh",
"vf_share_layers": True,
},
)
.reporting(
metrics_num_episodes_for_smoothing=5,
min_sample_timesteps_per_iteration=1000,
)
.evaluation(
evaluation_duration="auto",
evaluation_interval=1,
evaluation_num_env_runners=1,
evaluation_parallel_to_training=True,
evaluation_config={
"explore": True,
},
)
)
tuner = tune.Tuner(
"PPO",
param_space=config,
run_config=tune.RunConfig(
stop=BenchmarkStopper(benchmark_envs=benchmark_envs),
name="benchmark_ppo_mujoco",
),
)
tuner.fit()
|
BenchmarkStopper
|
python
|
pydantic__pydantic
|
tests/test_type_adapter.py
|
{
"start": 26010,
"end": 26734
}
|
class ____:
x: int
@pytest.mark.parametrize('type_,repr_', [(int, 'int'), (list[int], 'list[int]'), (SimpleDataclass, 'SimpleDataclass')])
def test_ta_repr(type_: Any, repr_: str) -> None:
ta = TypeAdapter(type_)
assert repr(ta) == f'TypeAdapter({repr_})'
def test_correct_frame_used_parametrized(create_module) -> None:
"""https://github.com/pydantic/pydantic/issues/10892"""
@create_module
def module_1() -> None:
from pydantic import TypeAdapter
Any = int # noqa: F841
# 'Any' should resolve to `int`, not `typing.Any`:
ta = TypeAdapter[int]('Any') # noqa: F841
with pytest.raises(ValidationError):
module_1.ta.validate_python('a')
|
SimpleDataclass
|
python
|
openai__openai-python
|
src/openai/types/realtime/realtime_transcription_session_audio_input_turn_detection.py
|
{
"start": 2401,
"end": 3489
}
|
class ____(BaseModel):
type: Literal["semantic_vad"]
"""Type of turn detection, `semantic_vad` to turn on Semantic VAD."""
create_response: Optional[bool] = None
"""
Whether or not to automatically generate a response when a VAD stop event
occurs.
"""
eagerness: Optional[Literal["low", "medium", "high", "auto"]] = None
"""Used only for `semantic_vad` mode.
The eagerness of the model to respond. `low` will wait longer for the user to
continue speaking, `high` will respond more quickly. `auto` is the default and
is equivalent to `medium`. `low`, `medium`, and `high` have max timeouts of 8s,
4s, and 2s respectively.
"""
interrupt_response: Optional[bool] = None
"""
Whether or not to automatically interrupt any ongoing response with output to
the default conversation (i.e. `conversation` of `auto`) when a VAD start event
occurs.
"""
RealtimeTranscriptionSessionAudioInputTurnDetection: TypeAlias = Annotated[
Union[ServerVad, SemanticVad, None], PropertyInfo(discriminator="type")
]
|
SemanticVad
|
python
|
run-llama__llama_index
|
llama-index-integrations/readers/llama-index-readers-microsoft-outlook/llama_index/readers/microsoft_outlook/base.py
|
{
"start": 880,
"end": 3948
}
|
class ____(BaseReader):
"""
Outlook local calendar reader for Windows.
Reads events from local copy of Outlook calendar.
"""
def load_data(
self,
number_of_results: Optional[int] = 100,
start_date: Optional[Union[str, datetime.date]] = None,
end_date: Optional[Union[str, datetime.date]] = None,
more_attributes: Optional[List[str]] = None,
) -> List[Document]:
"""
Load data from user's local calendar.
Args:
number_of_results (Optional[int]): the number of events to return. Defaults to 100.
start_date (Optional[Union[str, datetime.date]]): the start date to return events from. Defaults to today.
end_date (Optional[Union[str, datetime.date]]): the last date (inclusive) to return events from. Defaults to 2199-01-01.
more_attributes (Optional[ List[str]]): additional attributes to be retrieved from calendar entries. Non-existnat attributes are ignored.
Returns a list of documents sutitable for indexing by llam_index. Always returns Start, End, Subject, Location, and Organizer
attributes and optionally returns additional attributes specified in the more_attributes parameter.
"""
if platform.system().lower() != "windows":
return []
attributes = [
"Start",
"End",
"Subject",
"Location",
"Organizer",
] # base attributes to return
if more_attributes is not None: # if the user has specified more attributes
attributes += more_attributes
if start_date is None:
start_date = datetime.date.today()
elif isinstance(start_date, str):
start_date = datetime.date.fromisoformat(start_date)
# Initialize the Outlook application
winstuff = importlib.import_module("win32com.client")
outlook = winstuff.Dispatch("Outlook.Application").GetNamespace("MAPI")
# Get the Calendar folder
calendar_folder = outlook.GetDefaultFolder(9)
# Retrieve calendar items
events = calendar_folder.Items
if not events:
return []
events.Sort("[Start]") # Sort items by start time
numberReturned = 0
results = []
for event in events:
converted_date = datetime.date(
event.Start.year, event.Start.month, event.Start.day
)
if converted_date > start_date: # if past start date
numberReturned += 1
eventstring = ""
for attribute in attributes:
if hasattr(event, attribute):
eventstring += f"{attribute}: {getattr(event, attribute)}, "
results.append(Document(text=eventstring))
if numberReturned >= number_of_results:
break
return results
if __name__ == "__main__":
reader = OutlookLocalCalendarReader()
print(reader.load_data())
|
OutlookLocalCalendarReader
|
python
|
great-expectations__great_expectations
|
tests/expectations/fixtures/expect_column_values_to_equal_three.py
|
{
"start": 1098,
"end": 1346
}
|
class ____(ColumnMapExpectation):
map_metric = "column_values.equal_three"
success_keys = ("mostly",)
def validate_configuration(self, configuration) -> None:
pass # no-op to make test setup easier
|
ExpectColumnValuesToEqualThree
|
python
|
PrefectHQ__prefect
|
tests/server/services/test_scheduler.py
|
{
"start": 18031,
"end": 22110
}
|
class ____:
async def test_tight_loop_by_default(self):
assert RecentDeploymentsScheduler().loop_seconds == 5
async def test_tight_loop_can_be_configured(self):
assert RecentDeploymentsScheduler(loop_seconds=1).loop_seconds == 1
with temporary_settings(
{PREFECT_SERVER_SERVICES_SCHEDULER_RECENT_DEPLOYMENTS_LOOP_SECONDS: 42}
):
assert RecentDeploymentsScheduler().loop_seconds == 42
async def test_schedules_runs_for_recently_created_deployments(
self,
deployment: schemas.core.Deployment,
session: AsyncSession,
db: PrefectDBInterface,
):
recent_scheduler = RecentDeploymentsScheduler()
count_query = (
sa.select(sa.func.count())
.select_from(db.FlowRun)
.where(db.FlowRun.deployment_id == deployment.id)
)
runs_count = (await session.execute(count_query)).scalar()
assert runs_count == 0
await recent_scheduler.start(loops=1)
runs_count = (await session.execute(count_query)).scalar()
assert runs_count == recent_scheduler.min_runs
async def test_schedules_runs_for_recently_updated_deployments(
self,
deployment: schemas.core.Deployment,
session: AsyncSession,
db: PrefectDBInterface,
):
# artificially move the created time back (updated time will still be recent)
await session.execute(
sa.update(db.Deployment)
.where(db.Deployment.id == deployment.id)
.values(
created=datetime.datetime.now(timezone.utc)
- datetime.timedelta(hours=1)
)
)
await session.commit()
count_query = (
sa.select(sa.func.count())
.select_from(db.FlowRun)
.where(db.FlowRun.deployment_id == deployment.id)
)
recent_scheduler = RecentDeploymentsScheduler()
runs_count = (await session.execute(count_query)).scalar()
assert runs_count == 0
await recent_scheduler.start(loops=1)
runs_count = (await session.execute(count_query)).scalar()
assert runs_count == recent_scheduler.min_runs
async def test_schedules_no_runs_for_deployments_updated_a_while_ago(
self,
deployment: schemas.core.Deployment,
session: AsyncSession,
db: PrefectDBInterface,
):
# artificially move the updated time back
await session.execute(
sa.update(db.Deployment)
.where(db.Deployment.id == deployment.id)
.values(
updated=datetime.datetime.now(timezone.utc)
- datetime.timedelta(minutes=1)
)
)
await session.commit()
count_query = (
sa.select(sa.func.count())
.select_from(db.FlowRun)
.where(db.FlowRun.deployment_id == deployment.id)
)
recent_scheduler = RecentDeploymentsScheduler()
runs_count = (await session.execute(count_query)).scalar()
assert runs_count == 0
await recent_scheduler.start(loops=1)
runs_count = (await session.execute(count_query)).scalar()
assert runs_count == 0
async def test_only_looks_at_deployments_with_active_schedules(
self,
session: AsyncSession,
db: PrefectDBInterface,
deployment_without_schedules: schemas.core.Deployment,
deployment_with_inactive_schedules: schemas.core.Deployment,
deployment_with_active_schedules: schemas.core.Deployment,
):
n_runs = await models.flow_runs.count_flow_runs(session=session)
assert n_runs == 0
query = (
RecentDeploymentsScheduler()
._get_select_deployments_to_schedule_query()
.limit(10)
)
deployment_ids = (await session.execute(query)).scalars().all()
assert len(deployment_ids) == 1
assert deployment_ids[0] == deployment_with_active_schedules.id
|
TestRecentDeploymentsScheduler
|
python
|
walkccc__LeetCode
|
solutions/2782. Number of Unique Categories/2782.py
|
{
"start": 130,
"end": 407
}
|
class ____:
def numberOfCategories(
self,
n: int,
categoryHandler: Optional['CategoryHandler'],
) -> int:
ans = 0
for i in range(n):
if not any(categoryHandler.haveSameCategory(i, j) for j in range(i)):
ans += 1
return ans
|
Solution
|
python
|
automl__auto-sklearn
|
autosklearn/pipeline/base.py
|
{
"start": 667,
"end": 22448
}
|
class ____(Pipeline):
"""Base class for all pipeline objects.
Notes
-----
This class should not be instantiated, only subclassed."""
__metaclass__ = ABCMeta
def __init__(
self,
config=None,
feat_type: Optional[FEAT_TYPE_TYPE] = None,
steps=None,
dataset_properties=None,
include=None,
exclude=None,
random_state=None,
init_params=None,
):
self.init_params = init_params if init_params is not None else {}
self.include = include if include is not None else {}
self.exclude = exclude if exclude is not None else {}
self.dataset_properties = (
dataset_properties if dataset_properties is not None else {}
)
self.random_state = random_state
self.feat_type = feat_type
if steps is None:
self.steps = self._get_pipeline_steps(
feat_type=feat_type, dataset_properties=dataset_properties
)
else:
self.steps = steps
self._validate_include_exclude_params()
self.config_space = self.get_hyperparameter_search_space(feat_type=feat_type)
if config is None:
self.config = self.config_space.get_default_configuration()
else:
if isinstance(config, dict):
config = Configuration(self.config_space, config)
if self.config_space != config.configuration_space:
print(self.config_space._children)
print(config.configuration_space._children)
import difflib
diff = difflib.unified_diff(
str(self.config_space).splitlines(),
str(config.configuration_space).splitlines(),
)
diff = "\n".join(diff)
raise ValueError(
"Configuration passed does not come from the "
"same configuration space. Differences are: "
"%s" % diff
)
self.config = config
self.set_hyperparameters(
self.config, feat_type=feat_type, init_params=init_params
)
super().__init__(steps=self.steps)
self._additional_run_info = {}
def fit(self, X, y, **fit_params):
"""Fit the selected algorithm to the training data.
Parameters
----------
X : array-like or sparse, shape = (n_samples, n_features)
Training data. The preferred type of the matrix (dense or sparse)
depends on the estimator selected.
y : array-like
Targets
fit_params : dict
See the documentation of sklearn.pipeline.Pipeline for formatting
instructions.
Returns
-------
self : returns an instance of self.
Raises
------
NoModelException
NoModelException is raised if fit() is called without specifying
a classification algorithm first.
"""
X, fit_params = self.fit_transformer(X, y, **fit_params)
self.fit_estimator(X, y, **fit_params)
return self
def fit_transformer(self, X, y, fit_params=None):
self.num_targets = 1 if len(y.shape) == 1 else y.shape[1]
if fit_params is None:
fit_params = {}
fit_params = {
key.replace(":", "__"): value for key, value in fit_params.items()
}
fit_params_steps = self._check_fit_params(**fit_params)
Xt = self._fit(X, y, **fit_params_steps)
return Xt, fit_params_steps[self.steps[-1][0]]
def fit_estimator(self, X, y, **fit_params):
fit_params = {
key.replace(":", "__"): value for key, value in fit_params.items()
}
self._final_estimator.fit(X, y, **fit_params)
return self
def iterative_fit(self, X, y, n_iter=1, **fit_params):
self._final_estimator.iterative_fit(X, y, n_iter=n_iter, **fit_params)
def estimator_supports_iterative_fit(self):
return self._final_estimator.estimator_supports_iterative_fit()
def get_max_iter(self):
if self.estimator_supports_iterative_fit():
return self._final_estimator.get_max_iter()
else:
raise NotImplementedError()
def configuration_fully_fitted(self):
return self._final_estimator.configuration_fully_fitted()
def get_current_iter(self):
return self._final_estimator.get_current_iter()
def predict(self, X, batch_size=None):
"""Predict the classes using the selected model.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
batch_size: int or None, defaults to None
batch_size controls whether the pipeline will be
called on small chunks of the data. Useful when calling the
predict method on the whole array X results in a MemoryError.
Returns
-------
array, shape=(n_samples,) if n_classes == 2 else (n_samples, n_classes)
Returns the predicted values"""
if batch_size is None:
return super().predict(X).astype(self._output_dtype)
else:
if not isinstance(batch_size, int):
raise ValueError(
"Argument 'batch_size' must be of type int, "
"but is '%s'" % type(batch_size)
)
if batch_size <= 0:
raise ValueError(
"Argument 'batch_size' must be positive, " "but is %d" % batch_size
)
else:
if self.num_targets == 1:
y = np.zeros((X.shape[0],), dtype=self._output_dtype)
else:
y = np.zeros(
(X.shape[0], self.num_targets), dtype=self._output_dtype
)
# Copied and adapted from the scikit-learn GP code
for k in range(max(1, int(np.ceil(float(X.shape[0]) / batch_size)))):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size, X.shape[0]])
y[batch_from:batch_to] = self.predict(
X[batch_from:batch_to], batch_size=None
)
return y
def set_hyperparameters(
self,
configuration,
feat_type: Optional[FEAT_TYPE_TYPE] = None,
init_params=None,
):
self.config = configuration
for node_idx, n_ in enumerate(self.steps):
node_name, node = n_
sub_configuration_space = node.get_hyperparameter_search_space(
feat_type=feat_type, dataset_properties=self.dataset_properties
)
sub_config_dict = {}
for param in configuration:
if param.startswith("%s:" % node_name):
value = configuration[param]
new_name = param.replace("%s:" % node_name, "", 1)
sub_config_dict[new_name] = value
sub_configuration = Configuration(
sub_configuration_space, values=sub_config_dict
)
if init_params is not None:
sub_init_params_dict = {}
for param in init_params:
if param.startswith("%s:" % node_name):
value = init_params[param]
new_name = param.replace("%s:" % node_name, "", 1)
sub_init_params_dict[new_name] = value
else:
sub_init_params_dict = None
if isinstance(
node, (AutoSklearnChoice, AutoSklearnComponent, BasePipeline)
):
node.set_hyperparameters(
feat_type=feat_type,
configuration=sub_configuration,
init_params=sub_init_params_dict,
)
else:
raise NotImplementedError("Not supported yet!")
# In-code check to make sure init params
# is checked after pipeline creation
self._check_init_params_honored(init_params)
return self
def get_hyperparameter_search_space(
self, feat_type: Optional[FEAT_TYPE_TYPE] = None, dataset_properties=None
):
"""Return the configuration space for the CASH problem.
Returns
-------
cs : ConfigSpace.configuration_space.Configuration
The configuration space describing the AutoSklearnClassifier.
"""
if not hasattr(self, "config_space") or self.config_space is None:
self.config_space = self._get_hyperparameter_search_space(
feat_type=feat_type,
include=self.include,
exclude=self.exclude,
dataset_properties=self.dataset_properties,
)
return self.config_space
def _get_hyperparameter_search_space(
self,
feat_type: Optional[FEAT_TYPE_TYPE] = None,
include=None,
exclude=None,
dataset_properties=None,
):
"""Return the configuration space for the CASH problem.
This method should be called by the method
get_hyperparameter_search_space of a subclass. After the subclass
assembles a list of available estimators and preprocessor components,
_get_hyperparameter_search_space can be called to do the work of
creating the actual
ConfigSpace.configuration_space.ConfigurationSpace object.
Parameters
----------
feat_type: dict
python dictionary which maps the columns of the dataset to the data types
estimator_name : str
Name of the estimator hyperparameter which will be used in the
configuration space. For a classification task, this would be
'classifier'.
estimator_components : dict {name: component}
Dictionary with all estimator components to be included in the
configuration space.
preprocessor_components : dict {name: component}
Dictionary with all preprocessor components to be included in the
configuration space. .
always_active : list of str
A list of components which will always be active in the pipeline.
This is useful for components like imputation which have
hyperparameters to be configured, but which do not have any parent.
default_estimator : str
Default value for the estimator hyperparameter.
Returns
-------
cs : ConfigSpace.configuration_space.Configuration
The configuration space describing the AutoSklearnClassifier.
"""
raise NotImplementedError()
def _get_base_search_space(
self,
cs,
dataset_properties,
include,
exclude,
pipeline,
feat_type: Optional[FEAT_TYPE_TYPE] = None,
):
if include is None:
if self.include is None:
include = {}
else:
include = self.include
keys = [pair[0] for pair in pipeline]
for key in include:
if key not in keys:
raise ValueError(
"Invalid key in include: %s; should be one " "of %s" % (key, keys)
)
if exclude is None:
if self.exclude is None:
exclude = {}
else:
exclude = self.exclude
keys = [pair[0] for pair in pipeline]
for key in exclude:
if key not in keys:
raise ValueError(
"Invalid key in exclude: %s; should be one " "of %s" % (key, keys)
)
if "sparse" not in dataset_properties:
# This dataset is probably dense
dataset_properties["sparse"] = False
if "signed" not in dataset_properties:
# This dataset probably contains unsigned data
dataset_properties["signed"] = False
matches = autosklearn.pipeline.create_searchspace_util.get_match_array(
pipeline=pipeline,
dataset_properties=dataset_properties,
include=include,
exclude=exclude,
)
# Now we have only legal combinations at this step of the pipeline
# Simple sanity checks
assert np.sum(matches) != 0, "No valid pipeline found."
assert np.sum(matches) <= np.size(
matches
), "'matches' is not binary; %s <= %d, %s" % (
str(np.sum(matches)),
np.size(matches),
str(matches.shape),
)
# Iterate each dimension of the matches array (each step of the
# pipeline) to see if we can add a hyperparameter for that step
for node_idx, n_ in enumerate(pipeline):
node_name, node = n_
is_choice = isinstance(node, AutoSklearnChoice)
# if the node isn't a choice we can add it immediately because it
# must be active (if it wasn't, np.sum(matches) would be zero
if not is_choice:
cs.add_configuration_space(
node_name,
node.get_hyperparameter_search_space(
dataset_properties=dataset_properties, feat_type=feat_type
),
)
# If the node is a choice, we have to figure out which of its
# choices are actually legal choices
else:
choices_list = (
autosklearn.pipeline.create_searchspace_util.find_active_choices(
matches,
node,
node_idx,
dataset_properties,
include.get(node_name),
exclude.get(node_name),
)
)
sub_config_space = node.get_hyperparameter_search_space(
feat_type=feat_type,
dataset_properties=dataset_properties,
include=choices_list,
)
cs.add_configuration_space(node_name, sub_config_space)
# And now add forbidden parameter configurations
# According to matches
if np.sum(matches) < np.size(matches):
cs = autosklearn.pipeline.create_searchspace_util.add_forbidden(
conf_space=cs,
pipeline=pipeline,
matches=matches,
dataset_properties=dataset_properties,
include=include,
exclude=exclude,
)
return cs
def _check_init_params_honored(self, init_params):
"""
Makes sure that init params is honored at the implementation level
"""
if init_params is None or len(init_params) < 1:
# None/empty dict, so no further check required
return
# There is the scenario, where instance is passed as an argument to the
# init_params 'instance': '{"task_id": "73543c4a360aa24498c0967fbc2f926b"}'}
# coming from smac instance. Remove this key to make the testing stricter
init_params.pop("instance", None)
for key, value in init_params.items():
if ":" not in key:
raise ValueError(
"Unsupported argument to init_params {}."
"When using init_params, a hierarchical format like "
"node_name:parameter must be provided.".format(key)
)
node_name = key.split(":", 1)[0]
if node_name not in self.named_steps.keys():
raise ValueError(
"The current node name specified via key={} of init_params "
"is not valid. Valid node names are {}".format(
key, self.named_steps.keys()
)
)
continue
variable_name = key.split(":")[-1]
node = self.named_steps[node_name]
if isinstance(node, BasePipeline):
# If dealing with a sub pipe,
# Call the child _check_init_params_honored with the updated config
node._check_init_params_honored(
{key.replace("%s:" % node_name, "", 1): value}
)
continue
if isinstance(node, AutoSklearnComponent):
node_dict = vars(node)
elif isinstance(node, AutoSklearnChoice):
node_dict = vars(node.choice)
else:
raise ValueError("Unsupported node type {}".format(type(node)))
if variable_name not in node_dict or node_dict[variable_name] != value:
raise ValueError(
"Cannot properly set the pair {}->{} via init_params"
"".format(key, value)
)
def __repr__(self):
class_name = self.__class__.__name__
configuration = {}
self.config._populate_values()
for hp_name in self.config:
if self.config[hp_name] is not None:
configuration[hp_name] = self.config[hp_name]
configuration_string = "".join(
[
"configuration={\n ",
",\n ".join(
[
"'%s': %s" % (hp_name, repr(configuration[hp_name]))
for hp_name in sorted(configuration)
]
),
"}",
]
)
if len(self.dataset_properties) > 0:
dataset_properties_string = []
dataset_properties_string.append("dataset_properties={")
for i, item in enumerate(self.dataset_properties.items()):
if i != 0:
dataset_properties_string.append(",\n ")
else:
dataset_properties_string.append("\n ")
if isinstance(item[1], str):
dataset_properties_string.append("'%s': '%s'" % (item[0], item[1]))
else:
dataset_properties_string.append("'%s': %s" % (item[0], item[1]))
dataset_properties_string.append("}")
dataset_properties_string = "".join(dataset_properties_string)
return_value = "%s(%s,\n%s)" % (
class_name,
configuration,
dataset_properties_string,
)
else:
return_value = "%s(%s)" % (class_name, configuration_string)
return return_value
def _get_pipeline_steps(
self, dataset_properties, feat_type: Optional[FEAT_TYPE_TYPE] = None
):
raise NotImplementedError()
def _get_estimator_hyperparameter_name(self):
raise NotImplementedError()
def get_additional_run_info(self):
"""Allows retrieving additional run information from the pipeline.
Can be overridden by subclasses to return additional information to
the optimization algorithm.
"""
return self._additional_run_info
def _validate_include_exclude_params(self):
if self.include is not None and self.exclude is not None:
for key in self.include.keys():
if key in self.exclude.keys():
raise ValueError(
"Cannot specify include and exclude for same step '{}'.".format(
key
)
)
supported_steps = {
step[0]: step[1]
for step in self.steps
if isinstance(step[1], AutoSklearnChoice)
}
for arg in ["include", "exclude"]:
argument = getattr(self, arg)
if not argument:
continue
for key in list(argument.keys()):
if key not in supported_steps:
raise ValueError(
"The provided key '{}' in the '{}' argument is not valid. The"
" only supported keys for this task are {}".format(
key, arg, list(supported_steps.keys())
)
)
candidate_components = argument[key]
if not (
isinstance(candidate_components, list) and candidate_components
):
raise ValueError(
"The provided value of the key '{}' in the '{}' argument is "
"not valid. The value must be a non-empty list.".format(
key, arg
)
)
available_components = list(
supported_steps[key]
.get_available_components(
dataset_properties=self.dataset_properties
)
.keys()
)
for component in candidate_components:
if component not in available_components:
raise ValueError(
"The provided component '{}' for the key '{}' in the '{}'"
" argument is not valid. The supported components for the"
" step '{}' for this task are {}".format(
component, key, arg, key, available_components
)
)
|
BasePipeline
|
python
|
huggingface__transformers
|
src/transformers/models/dia/modeling_dia.py
|
{
"start": 4626,
"end": 5345
}
|
class ____(nn.Module):
def __init__(self, hidden_size, eps=1e-6):
"""
DiaRMSNorm is equivalent to T5LayerNorm
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
return self.weight * hidden_states.to(input_dtype)
def extra_repr(self):
return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
|
DiaRMSNorm
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/comparison/test_chart_crossing01.py
|
{
"start": 315,
"end": 1441
}
|
class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_crossing01.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "column"})
chart.axis_ids = [43812352, 43814272]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series({"values": "=Sheet1!$A$1:$A$5"})
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
chart.set_y_axis({"crossing": "max"})
chart.set_x_axis({"position": "t"})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
|
TestCompareXLSXFiles
|
python
|
huggingface__transformers
|
src/transformers/models/canine/modeling_canine.py
|
{
"start": 29076,
"end": 29798
}
|
class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states: tuple[torch.FloatTensor]) -> torch.FloatTensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
|
CaninePredictionHeadTransform
|
python
|
doocs__leetcode
|
solution/1000-1099/1095.Find in Mountain Array/Solution.py
|
{
"start": 216,
"end": 972
}
|
class ____:
def findInMountainArray(self, target: int, mountain_arr: 'MountainArray') -> int:
def search(l: int, r: int, k: int) -> int:
while l < r:
mid = (l + r) >> 1
if k * mountain_arr.get(mid) >= k * target:
r = mid
else:
l = mid + 1
return -1 if mountain_arr.get(l) != target else l
n = mountain_arr.length()
l, r = 0, n - 1
while l < r:
mid = (l + r) >> 1
if mountain_arr.get(mid) > mountain_arr.get(mid + 1):
r = mid
else:
l = mid + 1
ans = search(0, l, 1)
return search(l + 1, n - 1, -1) if ans == -1 else ans
|
Solution
|
python
|
lepture__mistune
|
src/mistune/markdown.py
|
{
"start": 231,
"end": 4019
}
|
class ____:
"""Markdown instance to convert markdown text into HTML or other formats.
Here is an example with the HTMLRenderer::
from mistune import HTMLRenderer
md = Markdown(renderer=HTMLRenderer(escape=False))
md('hello **world**')
:param renderer: a renderer to convert parsed tokens
:param block: block level syntax parser
:param inline: inline level syntax parser
:param plugins: mistune plugins to use
"""
def __init__(
self,
renderer: Optional[BaseRenderer] = None,
block: Optional[BlockParser] = None,
inline: Optional[InlineParser] = None,
plugins: Optional[Iterable[Plugin]] = None,
):
if block is None:
block = BlockParser()
if inline is None:
inline = InlineParser()
self.renderer = renderer
self.block: BlockParser = block
self.inline: InlineParser = inline
self.before_parse_hooks: List[Callable[["Markdown", BlockState], None]] = []
self.before_render_hooks: List[Callable[["Markdown", BlockState], Any]] = []
self.after_render_hooks: List[
Callable[["Markdown", Union[str, List[Dict[str, Any]]], BlockState], Union[str, List[Dict[str, Any]]]]
] = []
if plugins:
for plugin in plugins:
plugin(self)
def use(self, plugin: Plugin) -> None:
plugin(self)
def render_state(self, state: BlockState) -> Union[str, List[Dict[str, Any]]]:
data = self._iter_render(state.tokens, state)
if self.renderer:
return self.renderer(data, state)
return list(data)
def _iter_render(self, tokens: Iterable[Dict[str, Any]], state: BlockState) -> Iterable[Dict[str, Any]]:
for tok in tokens:
if "children" in tok:
children = self._iter_render(tok["children"], state)
tok["children"] = list(children)
elif "text" in tok:
text = tok.pop("text")
# process inline text
# avoid striping emsp or other unicode spaces
tok["children"] = self.inline(text.strip(" \r\n\t\f"), state.env)
yield tok
def parse(self, s: str, state: Optional[BlockState] = None) -> Tuple[Union[str, List[Dict[str, Any]]], BlockState]:
"""Parse and convert the given markdown string. If renderer is None,
the returned **result** will be parsed markdown tokens.
:param s: markdown string
:param state: instance of BlockState
:returns: result, state
"""
if state is None:
state = self.block.state_cls()
# normalize line separator
s = s.replace("\r\n", "\n")
s = s.replace("\r", "\n")
if not s.endswith("\n"):
s += "\n"
state.process(s)
for hook in self.before_parse_hooks:
hook(self, state)
self.block.parse(state)
for hook2 in self.before_render_hooks:
hook2(self, state)
result = self.render_state(state)
for hook3 in self.after_render_hooks:
result = hook3(self, result, state)
return result, state
def read(
self, filepath: str, encoding: str = "utf-8", state: Optional[BlockState] = None
) -> Tuple[Union[str, List[Dict[str, Any]]], BlockState]:
if state is None:
state = self.block.state_cls()
state.env["__file__"] = filepath
with open(filepath, "rb") as f:
s = f.read()
s2 = s.decode(encoding)
return self.parse(s2, state)
def __call__(self, s: str) -> Union[str, List[Dict[str, Any]]]:
if s is None:
s = "\n"
return self.parse(s)[0]
|
Markdown
|
python
|
kubernetes-client__python
|
kubernetes/client/models/v1_ingress_class_spec.py
|
{
"start": 383,
"end": 5087
}
|
class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'controller': 'str',
'parameters': 'V1IngressClassParametersReference'
}
attribute_map = {
'controller': 'controller',
'parameters': 'parameters'
}
def __init__(self, controller=None, parameters=None, local_vars_configuration=None): # noqa: E501
"""V1IngressClassSpec - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._controller = None
self._parameters = None
self.discriminator = None
if controller is not None:
self.controller = controller
if parameters is not None:
self.parameters = parameters
@property
def controller(self):
"""Gets the controller of this V1IngressClassSpec. # noqa: E501
controller refers to the name of the controller that should handle this class. This allows for different \"flavors\" that are controlled by the same controller. For example, you may have different parameters for the same implementing controller. This should be specified as a domain-prefixed path no more than 250 characters in length, e.g. \"acme.io/ingress-controller\". This field is immutable. # noqa: E501
:return: The controller of this V1IngressClassSpec. # noqa: E501
:rtype: str
"""
return self._controller
@controller.setter
def controller(self, controller):
"""Sets the controller of this V1IngressClassSpec.
controller refers to the name of the controller that should handle this class. This allows for different \"flavors\" that are controlled by the same controller. For example, you may have different parameters for the same implementing controller. This should be specified as a domain-prefixed path no more than 250 characters in length, e.g. \"acme.io/ingress-controller\". This field is immutable. # noqa: E501
:param controller: The controller of this V1IngressClassSpec. # noqa: E501
:type: str
"""
self._controller = controller
@property
def parameters(self):
"""Gets the parameters of this V1IngressClassSpec. # noqa: E501
:return: The parameters of this V1IngressClassSpec. # noqa: E501
:rtype: V1IngressClassParametersReference
"""
return self._parameters
@parameters.setter
def parameters(self, parameters):
"""Sets the parameters of this V1IngressClassSpec.
:param parameters: The parameters of this V1IngressClassSpec. # noqa: E501
:type: V1IngressClassParametersReference
"""
self._parameters = parameters
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1IngressClassSpec):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1IngressClassSpec):
return True
return self.to_dict() != other.to_dict()
|
V1IngressClassSpec
|
python
|
more-itertools__more-itertools
|
tests/test_more.py
|
{
"start": 74392,
"end": 75221
}
|
class ____(TestCase):
"""Tests for divide()"""
def test_invalid_n(self):
self.assertRaises(ValueError, lambda: mi.divide(-1, [1, 2, 3]))
self.assertRaises(ValueError, lambda: mi.divide(0, [1, 2, 3]))
def test_basic(self):
iterable = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
for n, expected in [
(1, [iterable]),
(2, [[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]]),
(3, [[1, 2, 3, 4], [5, 6, 7], [8, 9, 10]]),
(10, [[n] for n in range(1, 10 + 1)]),
]:
self.assertEqual(
[list(x) for x in mi.divide(n, iterable)], expected
)
def test_large_n(self):
self.assertEqual(
[list(x) for x in mi.divide(6, iter(range(1, 4 + 1)))],
[[1], [2], [3], [4], [], []],
)
|
DivideTest
|
python
|
huggingface__transformers
|
tests/models/whisper/test_tokenization_whisper.py
|
{
"start": 1089,
"end": 14932
}
|
class ____(TokenizerTesterMixin, unittest.TestCase):
from_pretrained_id = "openai/whisper-tiny"
tokenizer_class = WhisperTokenizer
rust_tokenizer_class = WhisperTokenizer
test_rust_tokenizer = True # We only have one tokenizer now
test_slow_tokenizer = False # No slow tokenizer
test_sentencepiece = False
test_seq2seq = False
@classmethod
def setUpClass(cls):
super().setUpClass()
tokenizer = WhisperTokenizer.from_pretrained("openai/whisper-tiny")
tokenizer.pad_token_id = 50256
tokenizer.pad_token = "<|endoftext|>"
cls.tokenizers = [tokenizer]
tokenizer.save_pretrained(cls.tmpdirname)
def test_convert_token_and_id(self):
"""Test ``_convert_token_to_id`` and ``_convert_id_to_token``."""
token = "Where"
token_id = 14436
# Test the public API instead of private methods
tokenizer = self.get_tokenizer()
self.assertEqual(tokenizer.convert_tokens_to_ids(token), token_id)
self.assertEqual(tokenizer.convert_ids_to_tokens(token_id), token)
def test_full_tokenizer(self):
tokenizer = WhisperTokenizer.from_pretrained(self.tmpdirname)
tokens = tokenizer.tokenize("This is a test")
self.assertListEqual(tokens, ["This", "Ġis", "Ġa", "Ġtest"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(tokens),
[5723, 307, 257, 1500],
)
tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
tokens,
["I", "Ġwas", "Ġborn", "Ġin", "Ġ9", "2000", ",", "Ġand", "Ġthis", "Ġis", "Ġfals", "é", "."], # fmt: skip
)
ids = tokenizer.convert_tokens_to_ids(tokens)
self.assertListEqual(ids, [40, 390, 4232, 294, 1722, 25743, 11, 293, 341, 307, 16720, 526, 13])
back_tokens = tokenizer.convert_ids_to_tokens(ids)
self.assertListEqual(
back_tokens,
["I", "Ġwas", "Ġborn", "Ġin", "Ġ9", "2000", ",", "Ġand", "Ġthis", "Ġis", "Ġfals", "é", "."], # fmt: skip
)
@unittest.skip
def test_tokenizer_slow_store_full_signature(self):
pass
@unittest.skip
def test_tokenizer_fast_store_full_signature(self):
pass
@unittest.skip
def test_special_tokens_initialization(self):
# Whisper relies on specific additional special tokens, so we skip this
# general test. In particular, this test loads fast tokenizer from slow
# tokenizer, and the conversion uses prefix_tokens, where we reference
# additional special tokens by specific indices, hence overriding the
# list with less tokens leads to out of index error
pass
@slow
def test_tokenizer_integration(self):
expected_encoding = {'input_ids': [[50257, 50362, 41762, 364, 357, 36234, 1900, 355, 12972, 13165, 354, 12, 35636, 364, 290, 12972, 13165, 354, 12, 5310, 13363, 12, 4835, 8, 3769, 2276, 12, 29983, 45619, 357, 13246, 51, 11, 402, 11571, 12, 17, 11, 5564, 13246, 38586, 11, 16276, 44, 11, 4307, 346, 33, 861, 11, 16276, 7934, 23029, 329, 12068, 15417, 28491, 357, 32572, 52, 8, 290, 12068, 15417, 16588, 357, 32572, 38, 8, 351, 625, 3933, 10, 2181, 13363, 4981, 287, 1802, 10, 8950, 290, 2769, 48817, 1799, 1022, 449, 897, 11, 9485, 15884, 354, 290, 309, 22854, 37535, 13, 50256], [50257, 50362, 13246, 51, 318, 3562, 284, 662, 12, 27432, 2769, 8406, 4154, 282, 24612, 422, 9642, 9608, 276, 2420, 416, 26913, 21143, 319, 1111, 1364, 290, 826, 4732, 287, 477, 11685, 13, 50256], [50257, 50362, 464, 2068, 7586, 21831, 18045, 625, 262, 16931, 3290, 13, 50256]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # fmt: skip
self.tokenizer_integration_test_util(
expected_encoding=expected_encoding, model_name="openai/whisper-tiny.en", padding=False
)
def test_output_offsets(self):
tokenizer = self.get_tokenizer()
previous_sequence = [51492, 406, 3163, 1953, 466, 13, 51612, 51612]
self.assertEqual(
tokenizer.decode(previous_sequence, output_offsets=True),
{
"text": " not worth thinking about.",
"offsets": [{"text": " not worth thinking about.", "timestamp": (22.56, 24.96)}],
},
)
# Merge when the previous sequence is a suffix of the next sequence
next_sequences_1 = [50364, 295, 6177, 3391, 11, 19817, 3337, 507, 307, 406, 3163, 1953, 466, 13, 50614, 50614, 2812, 9836, 14783, 390, 6263, 538, 257, 1359, 11, 8199, 6327, 1090, 322, 702, 7443, 13, 50834, 50257] # fmt: skip
self.assertEqual(
tokenizer.decode(next_sequences_1, output_offsets=True),
{
"text": (
" of spectators, retrievality is not worth thinking about. His instant panic was followed by a"
" small, sharp blow high on his chest.<|endoftext|>"
),
"offsets": [
{"text": " of spectators, retrievality is not worth thinking about.", "timestamp": (0.0, 5.0)},
{
"text": " His instant panic was followed by a small, sharp blow high on his chest.",
"timestamp": (5.0, 9.4),
},
],
},
)
def test_find_longest_common_subsequence(self):
previous_sequence = [1, 2, 3]
next_sequence = [2, 3, 4, 5]
merge = _find_longest_common_sequence([previous_sequence, next_sequence])
self.assertEqual(merge, [1, 2, 3, 4, 5])
# Now previous is larger than next.
# We merge what we can and remove the extra right side of the left sequence
previous_sequence = [1, 2, 3, 4, 5, 6, 7]
next_sequence = [2, 3, 4, 5]
merge = _find_longest_common_sequence([previous_sequence, next_sequence])
self.assertEqual(merge, [1, 2, 3, 4, 5])
# Nothing in common
previous_sequence = [1, 2, 3]
next_sequence = [4, 5, 6]
merge = _find_longest_common_sequence([previous_sequence, next_sequence])
self.assertEqual(merge, [1, 2, 3, 4, 5, 6])
# Some errors in the overlap.
# We take from previous on the left, from the next on the right of the overlap
previous_sequence = [1, 2, 3, 4, 99]
next_sequence = [2, 98, 4, 5, 6]
merge = _find_longest_common_sequence([previous_sequence, next_sequence])
self.assertEqual(merge, [1, 2, 3, 4, 5, 6])
# We take from previous on the left, from the next on the right of the overlap
previous_sequence = [1, 2, 99, 4, 5]
next_sequence = [2, 3, 4, 98, 6]
merge = _find_longest_common_sequence([previous_sequence, next_sequence])
self.assertEqual(merge, [1, 2, 99, 4, 98, 6])
# This works on 3 sequences
seq1 = [1, 2, 3]
seq2 = [2, 3, 4]
seq3 = [3, 4, 5]
merge = _find_longest_common_sequence([seq1, seq2, seq3])
self.assertEqual(merge, [1, 2, 3, 4, 5])
# This works on 3 sequences with errors
seq1 = [1, 2, 3, 98, 5]
seq2 = [2, 99, 4, 5, 6, 7]
seq3 = [4, 97, 6, 7, 8]
merge = _find_longest_common_sequence([seq1, seq2, seq3])
self.assertEqual(merge, [1, 2, 3, 4, 5, 6, 7, 8])
def test_skip_special_tokens_skips_prompt_ids(self):
tokenizer = self.get_tokenizer()
# fmt: off
encoded_input = [
50361, 2221, 13, 2326, 388, 391, 50258, 50259, 50359,
50363, 1282, 264, 2674, 9156, 295, 1523, 11, 2221, 13,
2326, 388, 391, 13657, 365, 2681, 21296, 17711, 13, 50257,
]
# fmt: on
expected_with_special_tokens = "<|startofprev|> Mr. Quilter<|startoftranscript|><|en|><|transcribe|><|notimestamps|> On the general principles of art, Mr. Quilter writes with equal lucidity.<|endoftext|>"
expected_without_special_tokens = " On the general principles of art, Mr. Quilter writes with equal lucidity."
self.assertEqual(tokenizer.decode(encoded_input, skip_special_tokens=False), expected_with_special_tokens)
self.assertEqual(tokenizer.decode(encoded_input, skip_special_tokens=True), expected_without_special_tokens)
def test_skip_special_tokens_with_timestamps(self):
tokenizer = self.get_tokenizer()
# fmt: off
encoded_input = [
50258, 50363, 50364, 634, 575, 12525, 22618, 1968, 6144,
35617, 20084, 1756, 311, 589, 307, 534, 10281, 934,
439, 293, 50676, 50676, 393, 4411, 294, 309, 457,
707, 295, 33301, 286, 392, 6628, 13, 50836, 50257,
]
# fmt: on
expected_with_special_tokens = "<|startoftranscript|><|notimestamps|><|0.00|> He has grave doubts whether Sir Frederick Layton's work is really Greek after all and<|6.24|><|6.24|> can discover in it but little of rocky Ithaca.<|9.44|><|endoftext|>"
expected_without_special_tokens = "<|0.00|> He has grave doubts whether Sir Frederick Layton's work is really Greek after all and<|6.24|><|6.24|> can discover in it but little of rocky Ithaca.<|9.44|>"
self.assertEqual(
tokenizer.decode(encoded_input, decode_with_timestamps=True, skip_special_tokens=False),
expected_with_special_tokens,
)
self.assertEqual(
tokenizer.decode(encoded_input, decode_with_timestamps=True, skip_special_tokens=True),
expected_without_special_tokens,
)
def test_fast_tokenizer_get_prompt_ids(self):
tokenizer = self.get_tokenizer()
prompt = "This is test prompt text."
tokenizer_prompt_ids = tokenizer.get_prompt_ids(prompt)
# Just check that we can get prompt ids
self.assertIsNotNone(tokenizer_prompt_ids)
def test_tokenizer_decode_prompt(self):
prompt_text = "What does the fox say?"
input_text = "Hatee hatee hatee ho"
tokenizer = self.get_tokenizer()
# encode prompt and input text using tokenizer
prompt_ids = tokenizer.get_prompt_ids(prompt_text, return_tensors="np")
input_ids = tokenizer(input_text, return_tensors="np").input_ids[0]
input_ids = np.hstack([prompt_ids, input_ids])
# check with prompt in output
pred_text = tokenizer.decode(input_ids, skip_special_tokens=False)
# check correctness
expected_text = f"<|startofprev|> {prompt_text}<|startoftranscript|><|notimestamps|>{input_text}<|endoftext|>"
self.assertEqual(pred_text.strip(), expected_text)
# check stripping prompt from output
pred_text = tokenizer.decode(input_ids, skip_special_tokens=True)
self.assertEqual(pred_text.strip(), input_text)
def test_combine_tokens_into_words(self):
tokenizer = self.get_tokenizer()
# 'whatever "whatever" said someone, clever!?'
encoded_input = [1363, 7969, 503, 1363, 7969, 1, 848, 1580, 11, 13494, 7323]
expected_words = ["whatever", ' "whatever"', " said", " someone,", " clever!?"]
expected_tokens = [[1363, 7969], [503, 1363, 7969, 1], [848], [1580, 11], [13494, 7323]]
expected_indices = [[0, 1], [2, 3, 4, 5], [6], [7, 8], [9, 10]]
output = _combine_tokens_into_words(tokenizer, encoded_input)
self.assertEqual(expected_words, output[0])
self.assertEqual(expected_tokens, output[1])
self.assertEqual(expected_indices, output[2])
def test_basic_normalizer(self):
tokenizer = self.get_tokenizer()
input_str = "Hola güey!"
expected_output_normalize = "hola güey "
expected_output_diacritics = "hola guey "
# tokenizer tests
encoded_input = tokenizer(input_str).input_ids
decoded_output = tokenizer.decode(encoded_input, skip_special_tokens=True, basic_normalize=False)
self.assertEqual(decoded_output, input_str)
decoded_output_normalize = tokenizer.decode(encoded_input, skip_special_tokens=True, basic_normalize=True)
self.assertEqual(decoded_output_normalize, expected_output_normalize)
decoded_output_diacritics = tokenizer.decode(
encoded_input, skip_special_tokens=True, basic_normalize=True, remove_diacritics=True
)
self.assertEqual(decoded_output_diacritics, expected_output_diacritics)
def test_decode_asr_with_word_level_timestamps(self):
# fmt: off
model_outputs = [
{
'stride': [10, 0, 5],
'tokens': np.array([[50363, 3363, 11, 345, 460, 0, 50423]]),
'token_timestamps': np.array([[0.0, 0.5, 0.52, 0.78, 1.2, 1.28, 1.28]])
}
]
# fmt: on
tokenizer = WhisperTokenizer.from_pretrained("onnx-community/whisper-tiny.en_timestamped")
result = tokenizer._decode_asr(
model_outputs, return_timestamps="word", return_language=False, time_precision=0.02
)
EXPECTED_OUTPUT = (
" Yes, you can!",
{
"chunks": [
{"text": " Yes,", "timestamp": (0.0, 0.52)},
{"text": " you", "timestamp": (0.52, 0.78)},
{"text": " can!", "timestamp": (0.78, 1.28)},
]
},
)
self.assertEqual(result, EXPECTED_OUTPUT)
|
WhisperTokenizerTest
|
python
|
wandb__wandb
|
wandb/_pydantic/base.py
|
{
"start": 1754,
"end": 4495
}
|
class ____(CompatBaseModel, ABC):
# Base class with sensible defaults for converting to and from JSON.
#
# Automatically parse or serialize "raw" API data (e.g. convert to and from
# camelCase keys):
# - `.model_{dump,dump_json}()` should return JSON-ready dicts or JSON
# strings.
# - `.model_{validate,validate_json}()` should accept JSON-ready dicts or
# JSON strings.
#
# Ensure round-trip serialization <-> deserialization between:
# - `model_dump()` <-> `model_validate()`
# - `model_dump_json()` <-> `model_validate_json()`
#
# These behaviors help models predictably handle GraphQL request or response
# data.
model_config = ConfigDict(
# ---------------------------------------------------------------------------
# Discouraged in v2.11+, deprecated in v3. Kept here for compatibility.
populate_by_name=True,
# ---------------------------------------------------------------------------
# Introduced in v2.11, ignored in earlier versions
validate_by_name=True,
validate_by_alias=True,
serialize_by_alias=True,
# ---------------------------------------------------------------------------
validate_assignment=True,
use_attribute_docstrings=True,
from_attributes=True,
)
# Custom default kwargs for `JsonableModel.model_{dump,dump_json}`:
# - by_alias: Convert keys to JSON-ready names and objects to JSON-ready
# dicts.
# - round_trip: Ensure the result can round-trip.
__DUMP_DEFAULTS: ClassVar[Dict[str, Any]] = dict(by_alias=True, round_trip=True)
@overload # Actual signature
def model_dump(
self, *, mode: str, **kwargs: Unpack[ModelDumpKwargs]
) -> dict[str, Any]: ...
@overload # In case pydantic adds more kwargs in future releases
def model_dump(self, **kwargs: Any) -> dict[str, Any]: ...
@override
def model_dump(self, *, mode: str = "json", **kwargs: Any) -> dict[str, Any]:
kwargs = {**self.__DUMP_DEFAULTS, **kwargs} # allows overrides, if needed
return super().model_dump(mode=mode, **kwargs)
@overload # Actual signature
def model_dump_json(
self, *, indent: int | None, **kwargs: Unpack[ModelDumpKwargs]
) -> str: ...
@overload # In case pydantic adds more kwargs in future releases
def model_dump_json(self, **kwargs: Any) -> str: ...
@override
def model_dump_json(self, *, indent: int | None = None, **kwargs: Any) -> str:
kwargs = {**self.__DUMP_DEFAULTS, **kwargs} # allows overrides, if needed
return super().model_dump_json(indent=indent, **kwargs)
# Base class for all GraphQL-derived types.
|
JsonableModel
|
python
|
scipy__scipy
|
scipy/signal/tests/test_ltisys.py
|
{
"start": 31674,
"end": 32581
}
|
class ____:
def test_initialization(self):
# Check that all initializations work
TransferFunction(1, 1)
TransferFunction([1], [2])
TransferFunction(np.array([1]), np.array([2]))
def test_conversion(self):
# Check the conversion functions
s = TransferFunction([1, 0], [1, -1])
assert isinstance(s.to_ss(), StateSpace)
assert isinstance(s.to_tf(), TransferFunction)
assert isinstance(s.to_zpk(), ZerosPolesGain)
# Make sure copies work
assert TransferFunction(s) is not s
assert s.to_tf() is not s
def test_properties(self):
# Test setters/getters for cross class properties.
# This implicitly tests to_ss() and to_zpk()
# Getters
s = TransferFunction([1, 0], [1, -1])
xp_assert_equal(s.poles, [1.])
xp_assert_equal(s.zeros, [0.])
|
TestTransferFunction
|
python
|
sympy__sympy
|
sympy/codegen/ast.py
|
{
"start": 36530,
"end": 36772
}
|
class ____(_SizedIntType):
""" Represents an unsigned integer type. """
__slots__ = ()
@property
def min(self):
return 0
@property
def max(self):
return 2**self.nbits - 1
two = Integer(2)
|
UnsignedIntType
|
python
|
numba__numba
|
numba/cuda/cudadrv/driver.py
|
{
"start": 56018,
"end": 57281
}
|
class ____(object):
"""Implementation of GPU IPC using CUDA driver API.
This requires the devices to be peer accessible.
"""
def __init__(self, parent):
self.base = parent.base
self.handle = parent.handle
self.size = parent.size
self.offset = parent.offset
# remember if the handle is already opened
self._opened_mem = None
def open(self, context):
"""
Import the IPC memory and returns a raw CUDA memory pointer object
"""
if self.base is not None:
raise ValueError('opening IpcHandle from original process')
if self._opened_mem is not None:
raise ValueError('IpcHandle is already opened')
mem = context.open_ipc_handle(self.handle, self.offset + self.size)
# this object owns the opened allocation
# note: it is required the memory be freed after the ipc handle is
# closed by the importing context.
self._opened_mem = mem
return mem.own().view(self.offset)
def close(self):
if self._opened_mem is None:
raise ValueError('IpcHandle not opened')
driver.cuIpcCloseMemHandle(self._opened_mem.handle)
self._opened_mem = None
|
_CudaIpcImpl
|
python
|
django-haystack__django-haystack
|
test_haystack/test_fields.py
|
{
"start": 13984,
"end": 15205
}
|
class ____(TestCase):
def test_init(self):
try:
foo = DateTimeField(model_attr="foo")
except:
self.fail()
def test_convert(self):
pub_date = DateTimeField()
self.assertEqual(
pub_date.convert("2016-02-16T10:02:03"),
datetime.datetime(2016, 2, 16, 10, 2, 3),
)
def test_prepare(self):
mock = MockModel()
mock.pub_date = datetime.datetime(2009, 2, 13, 10, 1, 0)
pub_date = DateTimeField(model_attr="pub_date")
self.assertEqual(
pub_date.prepare(mock), datetime.datetime(2009, 2, 13, 10, 1, 0)
)
# Simulate default=datetime.datetime(2009, 2, 13, 10, 01, 00).
mock = MockModel()
default = DateTimeField(default=datetime.datetime(2000, 1, 1, 0, 0, 0))
self.assertEqual(default.prepare(mock), datetime.datetime(2000, 1, 1, 0, 0, 0))
def test_prepare_from_string(self):
mock = MockModel()
mock.pub_date = "2016-02-16T10:01:02Z"
pub_date = DateTimeField(model_attr="pub_date")
self.assertEqual(
pub_date.prepare(mock), datetime.datetime(2016, 2, 16, 10, 1, 2)
)
|
DateTimeFieldTestCase
|
python
|
getsentry__sentry
|
tests/sentry/integrations/models/deletions/test_organizationintegration.py
|
{
"start": 1192,
"end": 7772
}
|
class ____(TransactionTestCase, HybridCloudTestMixin):
def test_simple(self) -> None:
org = self.create_organization()
integration, organization_integration = self.create_provider_integration_for(
org, self.user, provider="example", name="Example"
)
with assume_test_silo_mode(SiloMode.REGION):
external_issue = ExternalIssue.objects.create(
organization_id=org.id, integration_id=integration.id, key="ABC-123"
)
organization_integration.update(status=ObjectStatus.PENDING_DELETION)
ScheduledDeletion.schedule(instance=organization_integration, days=0)
with self.tasks(), outbox_runner():
run_scheduled_deletions_control()
assert not OrganizationIntegration.objects.filter(id=organization_integration.id).exists()
with assume_test_silo_mode(SiloMode.REGION):
# TODO: When external issue -> organization is a hybrid cloud foreign key, test this is deleted via that route.
assert ExternalIssue.objects.filter(id=external_issue.id).exists()
def test_skip_on_undelete(self) -> None:
org = self.create_organization()
integration = self.create_provider_integration(provider="example", name="Example")
organization_integration = integration.add_organization(org, self.user)
assert organization_integration is not None
ScheduledDeletion.schedule(instance=organization_integration, days=0)
with self.tasks():
run_scheduled_deletions_control()
assert OrganizationIntegration.objects.filter(id=organization_integration.id).exists()
def test_repository_and_identity(self) -> None:
org = self.create_organization()
project = self.create_project(organization=org)
integration = self.create_provider_integration(provider="example", name="Example")
provider = self.create_identity_provider(integration)
identity = self.create_identity(
user=self.user, identity_provider=provider, external_id="abc123"
)
organization_integration = integration.add_organization(org, self.user, identity.id)
assert organization_integration is not None
repository = self.create_repo(
project=project, name="testrepo", provider="gitlab", integration_id=integration.id
)
with assume_test_silo_mode(SiloMode.REGION):
external_issue = ExternalIssue.objects.create(
organization_id=org.id, integration_id=integration.id, key="ABC-123"
)
organization_integration.update(status=ObjectStatus.PENDING_DELETION)
ScheduledDeletion.schedule(instance=organization_integration, days=0)
with self.tasks():
run_scheduled_deletions_control()
assert Integration.objects.filter(id=integration.id).exists()
assert not OrganizationIntegration.objects.filter(id=organization_integration.id).exists()
assert not Identity.objects.filter(id=identity.id).exists()
with assume_test_silo_mode(SiloMode.REGION):
assert Project.objects.filter(id=project.id).exists()
# TODO: When external issue -> organization is a hybrid cloud foreign key, test this is deleted via that route.
assert ExternalIssue.objects.filter(id=external_issue.id).exists()
repo = Repository.objects.get(id=repository.id)
assert repo.integration_id is None
def test_codeowner_links(self) -> None:
org = self.create_organization()
project = self.create_project(organization=org)
integration = self.create_provider_integration(provider="example", name="Example")
repository = self.create_repo(
project=project, name="testrepo", provider="gitlab", integration_id=integration.id
)
organization_integration = integration.add_organization(org, self.user)
assert organization_integration is not None
code_mapping = self.create_code_mapping(
project=project, repo=repository, organization_integration=organization_integration
)
code_owner = self.create_codeowners(project=project, code_mapping=code_mapping)
organization_integration.update(status=ObjectStatus.PENDING_DELETION)
ScheduledDeletion.schedule(instance=organization_integration, days=0)
with self.tasks():
run_scheduled_deletions_control()
assert not OrganizationIntegration.objects.filter(id=organization_integration.id).exists()
with assume_test_silo_mode(SiloMode.REGION):
# We expect to delete all associated Code Owners and Code Mappings
assert not ProjectCodeOwners.objects.filter(id=code_owner.id).exists()
assert not RepositoryProjectPathConfig.objects.filter(id=code_owner.id).exists()
@with_feature("organizations:update-action-status")
def test_actions_disabled_on_integration_delete(self) -> None:
"""Test that actions are actually disabled when organization integration is deleted."""
org = self.create_organization()
integration, organization_integration = self.create_provider_integration_for(
org, self.user, provider="slack", name="Test Integration"
)
# Create a data condition group
condition_group = self.create_data_condition_group(organization=org)
# Create an action linked to this integration
action = self.create_action(
type=Action.Type.SLACK,
integration_id=integration.id,
config={
"target_type": ActionTarget.SPECIFIC,
"target_identifier": "123",
"target_display": "Test Integration",
},
)
# Link action to condition group
self.create_data_condition_group_action(condition_group=condition_group, action=action)
organization_integration.update(status=ObjectStatus.PENDING_DELETION)
ScheduledDeletion.schedule(instance=organization_integration, days=0)
with self.tasks(), outbox_runner():
run_scheduled_deletions_control()
# Verify organization integration is deleted
assert not OrganizationIntegration.objects.filter(id=organization_integration.id).exists()
with assume_test_silo_mode(SiloMode.REGION):
# Verify action is also deleted
action = Action.objects.get(id=action.id)
assert action.status == ObjectStatus.DISABLED
|
DeleteOrganizationIntegrationTest
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.