language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | allegroai__clearml | clearml/backend_api/services/v2_20/models.py | {
"start": 100279,
"end": 106598
} | class ____(Response):
"""
Response of models.get_by_task_id endpoint.
:param model: Model info
:type model: Model
"""
_service = "models"
_action = "get_by_task_id"
_version = "2.20"
_schema = {
"definitions": {
"metadata_item": {
"properties": {
"key": {
"description": "The key uniquely identifying the metadata item inside the given entity",
"type": ["string", "null"],
},
"type": {
"description": "The type of the metadata item",
"type": ["string", "null"],
},
"value": {
"description": "The value stored in the metadata item",
"type": ["string", "null"],
},
},
"type": "object",
},
"model": {
"properties": {
"comment": {
"description": "Model comment",
"type": ["string", "null"],
},
"company": {
"description": "Company id",
"type": ["string", "null"],
},
"created": {
"description": "Model creation time",
"format": "date-time",
"type": ["string", "null"],
},
"design": {
"additionalProperties": True,
"description": "Json object representing the model design. Should be identical to the network design of the task which created the model",
"type": ["object", "null"],
},
"framework": {
"description": "Framework on which the model is based. Should be identical to the framework of the task which created the model",
"type": ["string", "null"],
},
"id": {"description": "Model id", "type": ["string", "null"]},
"labels": {
"additionalProperties": {"type": "integer"},
"description": "Json object representing the ids of the labels in the model. The keys are the layers' names and the values are the ids.",
"type": ["object", "null"],
},
"last_update": {
"description": "Model last update time",
"format": "date-time",
"type": ["string", "null"],
},
"metadata": {
"additionalProperties": {"$ref": "#/definitions/metadata_item"},
"description": "Model metadata",
"type": ["object", "null"],
},
"name": {"description": "Model name", "type": ["string", "null"]},
"parent": {
"description": "Parent model ID",
"type": ["string", "null"],
},
"project": {
"description": "Associated project ID",
"type": ["string", "null"],
},
"ready": {
"description": "Indication if the model is final and can be used by other tasks",
"type": ["boolean", "null"],
},
"stats": {
"description": "Model statistics",
"properties": {
"labels_count": {
"description": "Number of the model labels",
"type": "integer",
}
},
"type": ["object", "null"],
},
"system_tags": {
"description": "System tags. This field is reserved for system use, please don't use it.",
"items": {"type": "string"},
"type": ["array", "null"],
},
"tags": {
"description": "User-defined tags",
"items": {"type": "string"},
"type": ["array", "null"],
},
"task": {
"description": "Task ID of task in which the model was created",
"type": ["string", "null"],
},
"ui_cache": {
"additionalProperties": True,
"description": "UI cache for this model",
"type": ["object", "null"],
},
"uri": {
"description": "URI for the model, pointing to the destination storage.",
"type": ["string", "null"],
},
"user": {
"description": "Associated user id",
"type": ["string", "null"],
},
},
"type": "object",
},
},
"properties": {
"model": {
"description": "Model info",
"oneOf": [{"$ref": "#/definitions/model"}, {"type": "null"}],
}
},
"type": "object",
}
def __init__(self, model: Any = None, **kwargs: Any) -> None:
super(GetByTaskIdResponse, self).__init__(**kwargs)
self.model = model
@schema_property("model")
def model(self) -> Any:
return self._property_model
@model.setter
def model(self, value: Any) -> None:
if value is None:
self._property_model = None
return
if isinstance(value, dict):
value = Model.from_dict(value)
else:
self.assert_isinstance(value, "model", Model)
self._property_model = value
| GetByTaskIdResponse |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-bing-ads/unit_tests/integrations/test_hourly_reports.py | {
"start": 924,
"end": 2382
} | class ____(TestSuiteReportStream):
first_read_state = get_state_after_migration(
time_period=f"{TestSuiteReportStream.start_date}T00:00:00+00:00", account_id=TestSuiteReportStream.account_id
)
first_read_state_for_records_further_start_date = get_state_after_migration(
time_period=f"2024-05-06T01:00:00+00:00", account_id=TestSuiteReportStream.account_id
)
second_read_state = SECOND_STATE
second_read_state_for_records_before_start_date = get_state_after_migration(
time_period=f"{TestSuiteReportStream.start_date}T00:00:00+00:00", account_id=TestSuiteReportStream.account_id
)
second_read_state_for_records_further_start_date = get_state_after_migration(
time_period="2024-05-07T01:00:00+00:00", account_id=TestSuiteReportStream.account_id
)
@property
def report_file_with_records_further_start_date(self):
return f"{self.stream_name}_with_records_further_config_start_date"
state_file_legacy = "hourly_reports_state_legacy"
state_file_after_migration = "hourly_reports_state_after_migration"
state_file_after_migration_with_cursor_further_config_start_date = (
"hourly_reports_state_after_migration_with_cursor_further_config_start_date"
)
@property
def incremental_report_file_with_records_further_cursor(self):
return f"{self.stream_name}_incremental_with_records_further_cursor"
| HourlyReportsTestWithStateChangesAfterMigration |
python | django__django | tests/urlpatterns_reverse/middleware.py | {
"start": 748,
"end": 948
} | class ____(MiddlewareMixin):
def process_view(self, *args, **kwargs):
def stream():
yield reverse("inner")
return StreamingHttpResponse(stream())
| ReverseInnerInStreaming |
python | getsentry__sentry | tests/sentry/utils/test_http.py | {
"start": 1022,
"end": 2816
} | class ____(TestCase):
def setUp(self) -> None:
self.project = self.create_project()
def test_project_default(self) -> None:
with self.settings(SENTRY_ALLOW_ORIGIN=None):
result = get_origins(self.project)
self.assertEqual(result, frozenset(["*"]))
def test_project(self) -> None:
self.project.update_option("sentry:origins", ["http://foo.example"])
with self.settings(SENTRY_ALLOW_ORIGIN=None):
result = get_origins(self.project)
self.assertEqual(result, frozenset(["http://foo.example"]))
def test_project_and_setting(self) -> None:
self.project.update_option("sentry:origins", ["http://foo.example"])
with self.settings(SENTRY_ALLOW_ORIGIN="http://example.com"):
result = get_origins(self.project)
self.assertEqual(result, frozenset(["http://foo.example"]))
def test_setting_empty(self) -> None:
with self.settings(SENTRY_ALLOW_ORIGIN=None):
result = get_origins(None)
self.assertEqual(result, frozenset(["*"]))
def test_setting_all(self) -> None:
with self.settings(SENTRY_ALLOW_ORIGIN="*"):
result = get_origins(None)
self.assertEqual(result, frozenset(["*"]))
def test_setting_uri(self) -> None:
with self.settings(SENTRY_ALLOW_ORIGIN="http://example.com"):
result = get_origins(None)
self.assertEqual(result, frozenset(["http://example.com"]))
def test_empty_origin_values(self) -> None:
self.project.update_option("sentry:origins", ["*", None, ""])
with self.settings(SENTRY_ALLOW_ORIGIN=None):
result = get_origins(self.project)
self.assertEqual(result, frozenset(["*"]))
| GetOriginsTestCase |
python | django__django | django/db/models/options.py | {
"start": 2498,
"end": 39672
} | class ____:
FORWARD_PROPERTIES = {
"fields",
"many_to_many",
"concrete_fields",
"local_concrete_fields",
"_non_pk_concrete_field_names",
"_reverse_one_to_one_field_names",
"_forward_fields_map",
"managers",
"managers_map",
"base_manager",
"default_manager",
"db_returning_fields",
"_property_names",
"pk_fields",
"total_unique_constraints",
"all_parents",
"swapped",
"verbose_name_raw",
}
REVERSE_PROPERTIES = {"related_objects", "fields_map", "_relation_tree"}
default_apps = apps
def __init__(self, meta, app_label=None):
self._get_fields_cache = {}
self.local_fields = []
self.local_many_to_many = []
self.private_fields = []
self.local_managers = []
self.base_manager_name = None
self.default_manager_name = None
self.model_name = None
self.verbose_name = None
self.verbose_name_plural = None
self.db_table = ""
self.db_table_comment = ""
self.ordering = []
self._ordering_clash = False
self.indexes = []
self.constraints = []
self.unique_together = []
self.select_on_save = False
self.default_permissions = ("add", "change", "delete", "view")
self.permissions = []
self.object_name = None
self.app_label = app_label
self.get_latest_by = None
self.order_with_respect_to = None
self.db_tablespace = settings.DEFAULT_TABLESPACE
self.required_db_features = []
self.required_db_vendor = None
self.meta = meta
self.pk = None
self.auto_field = None
self.abstract = False
self.managed = True
self.proxy = False
# For any class that is a proxy (including automatically created
# classes for deferred object loading), proxy_for_model tells us
# which class this model is proxying. Note that proxy_for_model
# can create a chain of proxy models. For non-proxy models, the
# variable is always None.
self.proxy_for_model = None
# For any non-abstract class, the concrete class is the model
# in the end of the proxy_for_model chain. In particular, for
# concrete models, the concrete_model is always the class itself.
self.concrete_model = None
self.swappable = None
self.parents = {}
self.auto_created = False
# List of all lookups defined in ForeignKey 'limit_choices_to' options
# from *other* models. Needed for some admin checks. Internal use only.
self.related_fkey_lookups = []
# A custom app registry to use, if you're making a separate model set.
self.apps = self.default_apps
self.default_related_name = None
@property
def label(self):
return "%s.%s" % (self.app_label, self.object_name)
@property
def label_lower(self):
return "%s.%s" % (self.app_label, self.model_name)
@property
def app_config(self):
# Don't go through get_app_config to avoid triggering imports.
return self.apps.app_configs.get(self.app_label)
def contribute_to_class(self, cls, name):
from django.db import connection
from django.db.backends.utils import truncate_name
cls._meta = self
self.model = cls
# First, construct the default values for these options.
self.object_name = cls.__name__
self.model_name = self.object_name.lower()
self.verbose_name = camel_case_to_spaces(self.object_name)
# Store the original user-defined values for each option,
# for use when serializing the model definition
self.original_attrs = {}
# Next, apply any overridden values from 'class Meta'.
if self.meta:
meta_attrs = self.meta.__dict__.copy()
for name in self.meta.__dict__:
# Ignore any private attributes that Django doesn't care about.
# NOTE: We can't modify a dictionary's contents while looping
# over it, so we loop over the *original* dictionary instead.
if name.startswith("_"):
del meta_attrs[name]
for attr_name in DEFAULT_NAMES:
if attr_name in meta_attrs:
setattr(self, attr_name, meta_attrs.pop(attr_name))
self.original_attrs[attr_name] = getattr(self, attr_name)
elif hasattr(self.meta, attr_name):
setattr(self, attr_name, getattr(self.meta, attr_name))
self.original_attrs[attr_name] = getattr(self, attr_name)
self.unique_together = normalize_together(self.unique_together)
# App label/class name interpolation for names of constraints and
# indexes.
if not self.abstract:
self.constraints = self._format_names(self.constraints)
self.indexes = self._format_names(self.indexes)
# verbose_name_plural is a special case because it uses a 's'
# by default.
if self.verbose_name_plural is None:
self.verbose_name_plural = format_lazy("{}s", self.verbose_name)
# order_with_respect_and ordering are mutually exclusive.
self._ordering_clash = bool(self.ordering and self.order_with_respect_to)
# Any leftover attributes must be invalid.
if meta_attrs != {}:
raise TypeError(
"'class Meta' got invalid attribute(s): %s" % ",".join(meta_attrs)
)
else:
self.verbose_name_plural = format_lazy("{}s", self.verbose_name)
del self.meta
# If the db_table wasn't provided, use the app_label + model_name.
if not self.db_table:
self.db_table = "%s_%s" % (self.app_label, self.model_name)
self.db_table = truncate_name(
self.db_table, connection.ops.max_name_length()
)
if self.swappable:
setting_changed.connect(self.setting_changed)
def _format_names(self, objs):
"""App label/class name interpolation for object names."""
names = {"app_label": self.app_label.lower(), "class": self.model_name}
new_objs = []
for obj in objs:
obj = obj.clone()
obj.name %= names
new_objs.append(obj)
return new_objs
def _get_default_pk_class(self):
pk_class_path = getattr(
self.app_config,
"default_auto_field",
settings.DEFAULT_AUTO_FIELD,
)
if self.app_config and self.app_config._is_default_auto_field_overridden:
app_config_class = type(self.app_config)
source = (
f"{app_config_class.__module__}."
f"{app_config_class.__qualname__}.default_auto_field"
)
else:
source = "DEFAULT_AUTO_FIELD"
if not pk_class_path:
raise ImproperlyConfigured(f"{source} must not be empty.")
try:
pk_class = import_string(pk_class_path)
except ImportError as e:
msg = (
f"{source} refers to the module '{pk_class_path}' that could "
f"not be imported."
)
raise ImproperlyConfigured(msg) from e
if not issubclass(pk_class, AutoField):
raise ValueError(
f"Primary key '{pk_class_path}' referred by {source} must "
f"subclass AutoField."
)
return pk_class
def _prepare(self, model):
if self.order_with_respect_to:
# The app registry will not be ready at this point, so we cannot
# use get_field().
query = self.order_with_respect_to
try:
self.order_with_respect_to = next(
f
for f in self._get_fields(reverse=False)
if f.name == query or f.attname == query
)
except StopIteration:
raise FieldDoesNotExist(
"%s has no field named '%s'" % (self.object_name, query)
)
self.ordering = ("_order",)
if not any(
isinstance(field, OrderWrt) for field in model._meta.local_fields
):
model.add_to_class("_order", OrderWrt())
else:
self.order_with_respect_to = None
if self.pk is None:
if self.parents:
# Promote the first parent link in lieu of adding yet another
# field.
field = next(iter(self.parents.values()))
# Look for a local field with the same name as the
# first parent link. If a local field has already been
# created, use it instead of promoting the parent
already_created = [
fld for fld in self.local_fields if fld.name == field.name
]
if already_created:
field = already_created[0]
field.primary_key = True
self.setup_pk(field)
else:
pk_class = self._get_default_pk_class()
auto = pk_class(verbose_name="ID", primary_key=True, auto_created=True)
model.add_to_class("id", auto)
def add_manager(self, manager):
self.local_managers.append(manager)
self._expire_cache()
def add_field(self, field, private=False):
# Insert the given field in the order in which it was created, using
# the "creation_counter" attribute of the field.
# Move many-to-many related fields from self.fields into
# self.many_to_many.
if private:
self.private_fields.append(field)
elif field.is_relation and field.many_to_many:
bisect.insort(self.local_many_to_many, field)
else:
bisect.insort(self.local_fields, field)
self.setup_pk(field)
# If the field being added is a relation to another known field,
# expire the cache on this field and the forward cache on the field
# being referenced, because there will be new relationships in the
# cache. Otherwise, expire the cache of references *to* this field.
# The mechanism for getting at the related model is slightly odd -
# ideally, we'd just ask for field.related_model. However,
# related_model is a cached property, and all the models haven't been
# loaded yet, so we need to make sure we don't cache a string
# reference.
if (
field.is_relation
and hasattr(field.remote_field, "model")
and field.remote_field.model
):
try:
field.remote_field.model._meta._expire_cache(forward=False)
except AttributeError:
pass
self._expire_cache()
else:
self._expire_cache(reverse=False)
def setup_pk(self, field):
if not self.pk and field.primary_key:
self.pk = field
field.serialize = False
def setup_proxy(self, target):
"""
Do the internal setup so that the current model is a proxy for
"target".
"""
self.pk = target._meta.pk
self.proxy_for_model = target
self.db_table = target._meta.db_table
def __repr__(self):
return "<Options for %s>" % self.object_name
def __str__(self):
return self.label_lower
def can_migrate(self, connection):
"""
Return True if the model can/should be migrated on the `connection`.
`connection` can be either a real connection or a connection alias.
"""
if self.proxy or self.swapped or not self.managed:
return False
if isinstance(connection, str):
connection = connections[connection]
if self.required_db_vendor:
return self.required_db_vendor == connection.vendor
if self.required_db_features:
return all(
getattr(connection.features, feat, False)
for feat in self.required_db_features
)
return True
@cached_property
def verbose_name_raw(self):
"""Return the untranslated verbose name."""
if isinstance(self.verbose_name, str):
return self.verbose_name
with override(None):
return str(self.verbose_name)
@cached_property
def swapped(self):
"""
Has this model been swapped out for another? If so, return the model
name of the replacement; otherwise, return None.
For historical reasons, model name lookups using get_model() are
case insensitive, so we make sure we are case insensitive here.
"""
if self.swappable:
swapped_for = getattr(settings, self.swappable, None)
if swapped_for:
try:
swapped_label, swapped_object = swapped_for.split(".")
except ValueError:
# setting not in the format app_label.model_name
# raising ImproperlyConfigured here causes problems with
# test cleanup code - instead it is raised in
# get_user_model or as part of validation.
return swapped_for
if (
"%s.%s" % (swapped_label, swapped_object.lower())
!= self.label_lower
):
return swapped_for
return None
def setting_changed(self, *, setting, **kwargs):
if setting == self.swappable and "swapped" in self.__dict__:
del self.swapped
@cached_property
def managers(self):
managers = []
seen_managers = set()
bases = (b for b in self.model.mro() if hasattr(b, "_meta"))
for depth, base in enumerate(bases):
for manager in base._meta.local_managers:
if manager.name in seen_managers:
continue
manager = copy.copy(manager)
manager.model = self.model
seen_managers.add(manager.name)
managers.append((depth, manager.creation_counter, manager))
return make_immutable_fields_list(
"managers",
(m[2] for m in sorted(managers)),
)
@cached_property
def managers_map(self):
return {manager.name: manager for manager in self.managers}
@cached_property
def base_manager(self):
base_manager_name = self.base_manager_name
if not base_manager_name:
# Get the first parent's base_manager_name if there's one.
for parent in self.model.mro()[1:]:
if hasattr(parent, "_meta"):
if parent._base_manager.name != "_base_manager":
base_manager_name = parent._base_manager.name
break
if base_manager_name:
try:
return self.managers_map[base_manager_name]
except KeyError:
raise ValueError(
"%s has no manager named %r"
% (
self.object_name,
base_manager_name,
)
)
manager = Manager()
manager.name = "_base_manager"
manager.model = self.model
manager.auto_created = True
return manager
@cached_property
def default_manager(self):
default_manager_name = self.default_manager_name
if not default_manager_name and not self.local_managers:
# Get the first parent's default_manager_name if there's one.
for parent in self.model.mro()[1:]:
if hasattr(parent, "_meta"):
default_manager_name = parent._meta.default_manager_name
break
if default_manager_name:
try:
return self.managers_map[default_manager_name]
except KeyError:
raise ValueError(
"%s has no manager named %r"
% (
self.object_name,
default_manager_name,
)
)
if self.managers:
return self.managers[0]
@cached_property
def fields(self):
"""
Return a list of all forward fields on the model and its parents,
excluding ManyToManyFields.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this field list.
"""
# For legacy reasons, the fields property should only contain forward
# fields that are not private or with a m2m cardinality. Therefore we
# pass these three filters as filters to the generator.
# The third filter is a longwinded way of checking f.related_model - we
# don't use that property directly because related_model is a cached
# property, and all the models may not have been loaded yet; we don't
# want to cache the string reference to the related_model.
def is_not_an_m2m_field(f):
return not (f.is_relation and f.many_to_many)
def is_not_a_generic_relation(f):
return not (f.is_relation and f.one_to_many)
def is_not_a_generic_foreign_key(f):
return not (
f.is_relation
and f.many_to_one
and not (hasattr(f.remote_field, "model") and f.remote_field.model)
)
return make_immutable_fields_list(
"fields",
(
f
for f in self._get_fields(reverse=False)
if is_not_an_m2m_field(f)
and is_not_a_generic_relation(f)
and is_not_a_generic_foreign_key(f)
),
)
@cached_property
def concrete_fields(self):
"""
Return a list of all concrete fields on the model and its parents.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this field list.
"""
return make_immutable_fields_list(
"concrete_fields", (f for f in self.fields if f.concrete)
)
@cached_property
def local_concrete_fields(self):
"""
Return a list of all concrete fields on the model.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this field list.
"""
return make_immutable_fields_list(
"local_concrete_fields", (f for f in self.local_fields if f.concrete)
)
@cached_property
def many_to_many(self):
"""
Return a list of all many to many fields on the model and its parents.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this list.
"""
return make_immutable_fields_list(
"many_to_many",
(
f
for f in self._get_fields(reverse=False)
if f.is_relation and f.many_to_many
),
)
@cached_property
def related_objects(self):
"""
Return all related objects pointing to the current model. The related
objects can come from a one-to-one, one-to-many, or many-to-many field
relation type.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this field list.
"""
all_related_fields = self._get_fields(
forward=False, reverse=True, include_hidden=True
)
return make_immutable_fields_list(
"related_objects",
(
obj
for obj in all_related_fields
if not obj.hidden or obj.field.many_to_many
),
)
@cached_property
def _forward_fields_map(self):
res = {}
fields = self._get_fields(reverse=False)
for field in fields:
res[field.name] = field
# Due to the way Django's internals work, get_field() should also
# be able to fetch a field by attname. In the case of a concrete
# field with relation, includes the *_id name too
try:
res[field.attname] = field
except AttributeError:
pass
return res
@cached_property
def fields_map(self):
res = {}
fields = self._get_fields(forward=False, include_hidden=True)
for field in fields:
res[field.name] = field
# Due to the way Django's internals work, get_field() should also
# be able to fetch a field by attname. In the case of a concrete
# field with relation, includes the *_id name too
try:
res[field.attname] = field
except AttributeError:
pass
return res
def get_field(self, field_name):
"""
Return a field instance given the name of a forward or reverse field.
"""
try:
# In order to avoid premature loading of the relation tree
# (expensive) we prefer checking if the field is a forward field.
return self._forward_fields_map[field_name]
except KeyError:
# If the app registry is not ready, reverse fields are
# unavailable, therefore we throw a FieldDoesNotExist exception.
if not self.apps.models_ready:
raise FieldDoesNotExist(
"%s has no field named '%s'. The app cache isn't ready yet, "
"so if this is an auto-created related field, it won't "
"be available yet." % (self.object_name, field_name)
)
try:
# Retrieve field instance by name from cached or just-computed
# field map.
return self.fields_map[field_name]
except KeyError:
raise FieldDoesNotExist(
"%s has no field named '%s'" % (self.object_name, field_name)
)
def get_base_chain(self, model):
"""
Return a list of parent classes leading to `model` (ordered from
closest to most distant ancestor). This has to handle the case where
`model` is a grandparent or even more distant relation.
"""
if not self.parents:
return []
if model in self.parents:
return [model]
for parent in self.parents:
res = parent._meta.get_base_chain(model)
if res:
res.insert(0, parent)
return res
return []
@cached_property
def all_parents(self):
"""
Return all the ancestors of this model as a tuple ordered by MRO.
Useful for determining if something is an ancestor, regardless of
lineage.
"""
result = OrderedSet(self.parents)
for parent in self.parents:
for ancestor in parent._meta.all_parents:
result.add(ancestor)
return tuple(result)
def get_parent_list(self):
"""
Return all the ancestors of this model as a list ordered by MRO.
Backward compatibility method.
"""
return list(self.all_parents)
def get_ancestor_link(self, ancestor):
"""
Return the field on the current model which points to the given
"ancestor". This is possible an indirect link (a pointer to a parent
model, which points, eventually, to the ancestor). Used when
constructing table joins for model inheritance.
Return None if the model isn't an ancestor of this one.
"""
if ancestor in self.parents:
return self.parents[ancestor]
for parent in self.parents:
# Tries to get a link field from the immediate parent
parent_link = parent._meta.get_ancestor_link(ancestor)
if parent_link:
# In case of a proxied model, the first link
# of the chain to the ancestor is that parent
# links
return self.parents[parent] or parent_link
def get_path_to_parent(self, parent):
"""
Return a list of PathInfos containing the path from the current
model to the parent model, or an empty list if parent is not a
parent of the current model.
"""
if self.model is parent:
return []
# Skip the chain of proxy to the concrete proxied model.
proxied_model = self.concrete_model
path = []
opts = self
for int_model in self.get_base_chain(parent):
if int_model is proxied_model:
opts = int_model._meta
else:
final_field = opts.parents[int_model]
targets = (final_field.remote_field.get_related_field(),)
opts = int_model._meta
path.append(
PathInfo(
from_opts=final_field.model._meta,
to_opts=opts,
target_fields=targets,
join_field=final_field,
m2m=False,
direct=True,
filtered_relation=None,
)
)
return path
def get_path_from_parent(self, parent):
"""
Return a list of PathInfos containing the path from the parent
model to the current model, or an empty list if parent is not a
parent of the current model.
"""
if self.model is parent:
return []
model = self.concrete_model
# Get a reversed base chain including both the current and parent
# models.
chain = model._meta.get_base_chain(parent)
chain.reverse()
chain.append(model)
# Construct a list of the PathInfos between models in chain.
path = []
for i, ancestor in enumerate(chain[:-1]):
child = chain[i + 1]
link = child._meta.get_ancestor_link(ancestor)
path.extend(link.reverse_path_infos)
return path
def _populate_directed_relation_graph(self):
"""
This method is used by each model to find its reverse objects. As this
method is very expensive and is accessed frequently (it looks up every
field in a model, in every app), it is computed on first access and
then is set as a property on every model.
"""
related_objects_graph = defaultdict(list)
all_models = self.apps.get_models(include_auto_created=True)
for model in all_models:
opts = model._meta
# Abstract model's fields are copied to child models, hence we will
# see the fields from the child models.
if opts.abstract:
continue
fields_with_relations = (
f
for f in opts._get_fields(reverse=False, include_parents=False)
if f.is_relation and f.related_model is not None
)
for f in fields_with_relations:
if not isinstance(f.remote_field.model, str):
remote_label = f.remote_field.model._meta.concrete_model._meta.label
related_objects_graph[remote_label].append(f)
for model in all_models:
# Set the relation_tree using the internal __dict__. In this way
# we avoid calling the cached property. In attribute lookup,
# __dict__ takes precedence over a data descriptor (such as
# @cached_property). This means that the _meta._relation_tree is
# only called if related_objects is not in __dict__.
related_objects = related_objects_graph[
model._meta.concrete_model._meta.label
]
model._meta.__dict__["_relation_tree"] = related_objects
# It seems it is possible that self is not in all_models, so guard
# against that with default for get().
return self.__dict__.get("_relation_tree", EMPTY_RELATION_TREE)
@cached_property
def _relation_tree(self):
return self._populate_directed_relation_graph()
def _expire_cache(self, forward=True, reverse=True):
# This method is usually called by apps.cache_clear(), when the
# registry is finalized, or when a new field is added.
if forward:
for cache_key in self.FORWARD_PROPERTIES:
if cache_key in self.__dict__:
delattr(self, cache_key)
if reverse and not self.abstract:
for cache_key in self.REVERSE_PROPERTIES:
if cache_key in self.__dict__:
delattr(self, cache_key)
self._get_fields_cache = {}
def get_fields(self, include_parents=True, include_hidden=False):
"""
Return a list of fields associated to the model. By default, include
forward and reverse fields, fields derived from inheritance, but not
hidden fields. The returned fields can be changed using the parameters:
- include_parents: include fields derived from inheritance
- include_hidden: include fields that have a related_name that
starts with a "+"
"""
if include_parents is False:
include_parents = PROXY_PARENTS
return self._get_fields(
include_parents=include_parents, include_hidden=include_hidden
)
def _get_fields(
self,
forward=True,
reverse=True,
include_parents=True,
include_hidden=False,
topmost_call=True,
):
"""
Internal helper function to return fields of the model.
* If forward=True, then fields defined on this model are returned.
* If reverse=True, then relations pointing to this model are returned.
* If include_hidden=True, then fields with is_hidden=True are returned.
* The include_parents argument toggles if fields from parent models
should be included. It has three values: True, False, and
PROXY_PARENTS. When set to PROXY_PARENTS, the call will return all
fields defined for the current model or any of its parents in the
parent chain to the model's concrete model.
"""
if include_parents not in (True, False, PROXY_PARENTS):
raise TypeError(
"Invalid argument for include_parents: %s" % (include_parents,)
)
# This helper function is used to allow recursion in ``get_fields()``
# implementation and to provide a fast way for Django's internals to
# access specific subsets of fields.
# Creates a cache key composed of all arguments
cache_key = (forward, reverse, include_parents, include_hidden, topmost_call)
try:
# In order to avoid list manipulation. Always return a shallow copy
# of the results.
return self._get_fields_cache[cache_key]
except KeyError:
pass
fields = []
# Recursively call _get_fields() on each parent, with the same
# options provided in this call.
if include_parents is not False:
# In diamond inheritance it is possible that we see the same model
# from two different routes. In that case, avoid adding fields from
# the same parent again.
parent_fields = set()
for parent in self.parents:
if (
parent._meta.concrete_model != self.concrete_model
and include_parents == PROXY_PARENTS
):
continue
for obj in parent._meta._get_fields(
forward=forward,
reverse=reverse,
include_parents=include_parents,
include_hidden=include_hidden,
topmost_call=False,
):
if (
not getattr(obj, "parent_link", False)
or obj.model == self.concrete_model
) and obj not in parent_fields:
fields.append(obj)
parent_fields.add(obj)
if reverse and not self.proxy:
# Tree is computed once and cached until the app cache is expired.
# It is composed of a list of fields pointing to the current model
# from other models.
all_fields = self._relation_tree
for field in all_fields:
# If hidden fields should be included or the relation is not
# intentionally hidden, add to the fields dict.
if include_hidden or not field.remote_field.hidden:
fields.append(field.remote_field)
if forward:
fields += self.local_fields
fields += self.local_many_to_many
# Private fields are recopied to each child model, and they get a
# different model as field.model in each child. Hence we have to
# add the private fields separately from the topmost call. If we
# did this recursively similar to local_fields, we would get field
# instances with field.model != self.model.
if topmost_call:
fields += self.private_fields
# In order to avoid list manipulation. Always
# return a shallow copy of the results
fields = make_immutable_fields_list("get_fields()", fields)
# Store result into cache for later access
self._get_fields_cache[cache_key] = fields
return fields
@cached_property
def total_unique_constraints(self):
"""
Return a list of total unique constraints. Useful for determining set
of fields guaranteed to be unique for all rows.
"""
return [
constraint
for constraint in self.constraints
if (
isinstance(constraint, UniqueConstraint)
and constraint.condition is None
and not constraint.contains_expressions
)
]
@cached_property
def pk_fields(self):
return composite.unnest([self.pk])
@property
def is_composite_pk(self):
return isinstance(self.pk, CompositePrimaryKey)
@cached_property
def _property_names(self):
"""Return a set of the names of the properties defined on the model."""
names = set()
seen = set()
for klass in self.model.__mro__:
names |= {
name
for name, value in klass.__dict__.items()
if isinstance(value, property) and name not in seen
}
seen |= set(klass.__dict__)
return frozenset(names)
@cached_property
def _non_pk_concrete_field_names(self):
"""
Return a set of the non-pk concrete field names defined on the model.
"""
names = []
all_pk_fields = set(self.pk_fields)
for parent in self.all_parents:
all_pk_fields.update(parent._meta.pk_fields)
for field in self.concrete_fields:
if field not in all_pk_fields:
names.append(field.name)
if field.name != field.attname:
names.append(field.attname)
return frozenset(names)
@cached_property
def _reverse_one_to_one_field_names(self):
"""
Return a set of reverse one to one field names pointing to the current
model.
"""
return frozenset(
field.name for field in self.related_objects if field.one_to_one
)
@cached_property
def db_returning_fields(self):
"""
Private API intended only to be used by Django itself.
Fields to be returned after a database insert.
"""
return [
field
for field in self._get_fields(
forward=True, reverse=False, include_parents=PROXY_PARENTS
)
if getattr(field, "db_returning", False)
]
| Options |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 137499,
"end": 137916
} | class ____(sgqlc.types.Input):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("field", "direction")
field = sgqlc.types.Field(
sgqlc.types.non_null(SecurityVulnerabilityOrderField), graphql_name="field"
)
direction = sgqlc.types.Field(
sgqlc.types.non_null(OrderDirection), graphql_name="direction"
)
| SecurityVulnerabilityOrder |
python | uqfoundation__dill | dill/tests/test_recursive.py | {
"start": 1028,
"end": 1636
} | class ____(object):
super_ = super
def __init__(self):
obj3.super_(obj3, self).__init__()
def test_super():
assert copy(obj1(), byref=True)
assert copy(obj1(), byref=True, recurse=True)
assert copy(obj1(), recurse=True)
assert copy(obj1())
assert copy(obj2(), byref=True)
assert copy(obj2(), byref=True, recurse=True)
assert copy(obj2(), recurse=True)
assert copy(obj2())
assert copy(obj3(), byref=True)
assert copy(obj3(), byref=True, recurse=True)
assert copy(obj3(), recurse=True)
assert copy(obj3())
def get_trigger(model):
pass
| obj3 |
python | run-llama__llama_index | llama-index-integrations/llms/llama-index-llms-google-genai/llama_index/llms/google_genai/base.py | {
"start": 1923,
"end": 2672
} | class ____(typing.TypedDict):
credentials: Optional[google.auth.credentials.Credentials] = None
project: Optional[str] = None
location: Optional[str] = None
def llm_retry_decorator(f: Callable[..., Any]) -> Callable[..., Any]:
@functools.wraps(f)
def wrapper(self, *args: Any, **kwargs: Any) -> Any:
max_retries = getattr(self, "max_retries", 0)
if max_retries <= 0:
return f(self, *args, **kwargs)
retry = create_retry_decorator(
max_retries=max_retries,
random_exponential=True,
stop_after_delay_seconds=60,
min_seconds=1,
max_seconds=20,
)
return retry(f)(self, *args, **kwargs)
return wrapper
| VertexAIConfig |
python | getsentry__sentry | tests/sentry/api/test_paginator.py | {
"start": 20261,
"end": 20918
} | class ____(SimpleTestCase):
def test_simple(self) -> None:
def data_fn(offset=None, limit=None):
return [i for i in range(offset, limit)]
paginator = GenericOffsetPaginator(data_fn=data_fn)
result = paginator.get_result(5)
assert list(result) == [0, 1, 2, 3, 4]
assert result.prev == Cursor(0, 0, True, False)
assert result.next == Cursor(0, 5, False, True)
result2 = paginator.get_result(5, result.next)
assert list(result2) == [5]
assert result2.prev == Cursor(0, 0, True, True)
assert result2.next == Cursor(0, 10, False, False)
| GenericOffsetPaginatorTest |
python | dask__distributed | distributed/dashboard/components/scheduler.py | {
"start": 72303,
"end": 78749
} | class ____(DashboardComponent):
"""
A dynamic node-link diagram for the task graph on the scheduler
See also the GraphLayout diagnostic at
distributed/diagnostics/graph_layout.py
"""
def __init__(self, scheduler, **kwargs):
self.scheduler = scheduler
self.layout = GraphLayout(scheduler)
scheduler.add_plugin(self.layout)
self.invisible_count = 0 # number of invisible nodes
self.node_source = ColumnDataSource(
{"x": [], "y": [], "name": [], "state": [], "visible": [], "key": []}
)
self.edge_source = ColumnDataSource({"x": [], "y": [], "visible": []})
filter = GroupFilter(column_name="visible", group="True")
node_view = CDSView(filter=filter)
edge_view = CDSView(filter=filter)
node_colors = factor_cmap(
"state",
factors=["waiting", "queued", "processing", "memory", "released", "erred"],
palette=["gray", "yellow", "green", "red", "blue", "black"],
)
self.root = figure(title="Task Graph", **kwargs)
self.subtitle = Title(text=" ", text_font_style="italic")
self.root.add_layout(self.subtitle, "above")
self.root.multi_line(
xs="x",
ys="y",
source=self.edge_source,
line_width=1,
view=edge_view,
color="black",
alpha=0.3,
)
rect = self.root.scatter(
x="x",
y="y",
size=10,
color=node_colors,
source=self.node_source,
view=node_view,
legend_field="state",
marker="square",
)
self.root.xgrid.grid_line_color = None
self.root.ygrid.grid_line_color = None
self.root.xaxis.visible = False
self.root.yaxis.visible = False
hover = HoverTool(
point_policy="follow_mouse",
tooltips="<b>@name</b>: @state",
renderers=[rect],
)
tap = TapTool(callback=OpenURL(url="info/task/@key.html"), renderers=[rect])
rect.nonselection_glyph = None
self.root.add_tools(hover, tap)
self.max_items = config.get("distributed.dashboard.graph-max-items", 5000)
@without_property_validation
@log_errors
def update(self):
# If there are too many tasks in the scheduler we'll disable this
# compoonents to not overload scheduler or client. Once we drop
# below the threshold, the data is filled up again as usual
if len(self.scheduler.tasks) > self.max_items:
self.subtitle.text = "Scheduler has too many tasks to display."
for container in [self.node_source, self.edge_source]:
container.data = {col: [] for col in container.column_names}
else:
# occasionally reset the column data source to remove old nodes
if self.invisible_count > len(self.node_source.data["x"]) / 2:
self.layout.reset_index()
self.invisible_count = 0
update = True
else:
update = False
new, self.layout.new = self.layout.new, []
new_edges = self.layout.new_edges
self.layout.new_edges = []
self.add_new_nodes_edges(new, new_edges, update=update)
self.patch_updates()
if len(self.scheduler.tasks) == 0:
self.subtitle.text = "Scheduler is empty."
else:
self.subtitle.text = " "
@without_property_validation
def add_new_nodes_edges(self, new, new_edges, update=False):
if new or update:
node_key = []
node_x = []
node_y = []
node_state = []
node_name = []
edge_x = []
edge_y = []
x = self.layout.x
y = self.layout.y
tasks = self.scheduler.tasks
for key in new:
try:
task = tasks[key]
except KeyError:
continue
xx = x[key]
yy = y[key]
node_key.append(url_escape(str(key)))
node_x.append(xx)
node_y.append(yy)
node_state.append(task.state)
node_name.append(task.prefix.name)
for a, b in new_edges:
try:
edge_x.append([x[a], x[b]])
edge_y.append([y[a], y[b]])
except KeyError:
pass
node = {
"x": node_x,
"y": node_y,
"state": node_state,
"name": node_name,
"key": node_key,
"visible": ["True"] * len(node_x),
}
edge = {"x": edge_x, "y": edge_y, "visible": ["True"] * len(edge_x)}
if update or not len(self.node_source.data["x"]):
# see https://github.com/bokeh/bokeh/issues/7523
self.node_source.data.update(node)
self.edge_source.data.update(edge)
else:
self.node_source.stream(node)
self.edge_source.stream(edge)
@without_property_validation
def patch_updates(self):
"""
Small updates like color changes or lost nodes from task transitions
"""
n = len(self.node_source.data["x"])
m = len(self.edge_source.data["x"])
if self.layout.state_updates:
state_updates = self.layout.state_updates
self.layout.state_updates = []
updates = [(i, c) for i, c in state_updates if i < n]
self.node_source.patch({"state": updates})
if self.layout.visible_updates:
updates = self.layout.visible_updates
updates = [(i, c) for i, c in updates if i < n]
self.layout.visible_updates = []
self.node_source.patch({"visible": updates})
self.invisible_count += len(updates)
if self.layout.visible_edge_updates:
updates = self.layout.visible_edge_updates
updates = [(i, c) for i, c in updates if i < m]
self.layout.visible_edge_updates = []
self.edge_source.patch({"visible": updates})
def __del__(self):
self.scheduler.remove_plugin(name=self.layout.name)
| TaskGraph |
python | great-expectations__great_expectations | great_expectations/core/partitioners.py | {
"start": 2871,
"end": 3163
} | class ____(pydantic.BaseModel):
regex: re.Pattern
param_names: Tuple[()] = ()
sort_ascending: bool = True
FileNamePartitioner = Union[
FileNamePartitionerYearly,
FileNamePartitionerMonthly,
FileNamePartitionerDaily,
FileNamePartitionerPath,
]
| FileNamePartitionerPath |
python | prabhupant__python-ds | data_structures/graphs/min_number_of_operations.py | {
"start": 227,
"end": 1003
} | class ____:
def __init__(self, value, level):
self.value = value
self.level = level
def min_steps(x, y):
node_x = Node(x, 0)
visited = []
queue = []
queue.append(node_x)
while queue:
s = queue.pop(0)
if s.value == y:
return s.level
visited.append(s.value)
if s.value * 2 == y or s.value - 1 == y:
return s.level + 1
# If not visited already, add its children
if s.value * 2 not in visited:
new_node = Node(s.value * 2, s.level + 1)
queue.append(new_node)
if s.value - 1 not in visited:
new_node = Node(s.value - 1, s.level + 1)
queue.append(new_node)
x = 2
y = 5
print(min_steps(x, y)) | Node |
python | python__mypy | mypy/nodes.py | {
"start": 79916,
"end": 80393
} | class ____(Expression):
"""Represents a typing.assert_type(expr, type) call."""
__slots__ = ("expr", "type")
__match_args__ = ("expr", "type")
expr: Expression
type: mypy.types.Type
def __init__(self, expr: Expression, typ: mypy.types.Type) -> None:
super().__init__()
self.expr = expr
self.type = typ
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_assert_type_expr(self)
| AssertTypeExpr |
python | lxml__lxml | src/lxml/cssselect.py | {
"start": 1766,
"end": 3306
} | class ____(etree.XPath):
"""A CSS selector.
Usage::
>>> from lxml import etree, cssselect
>>> select = cssselect.CSSSelector("a tag > child")
>>> root = etree.XML("<a><b><c/><tag><child>TEXT</child></tag></b></a>")
>>> [ el.tag for el in select(root) ]
['child']
To use CSS namespaces, you need to pass a prefix-to-namespace
mapping as ``namespaces`` keyword argument::
>>> rdfns = 'http://www.w3.org/1999/02/22-rdf-syntax-ns#'
>>> select_ns = cssselect.CSSSelector('root > rdf|Description',
... namespaces={'rdf': rdfns})
>>> rdf = etree.XML((
... '<root xmlns:rdf="%s">'
... '<rdf:Description>blah</rdf:Description>'
... '</root>') % rdfns)
>>> [(el.tag, el.text) for el in select_ns(rdf)]
[('{http://www.w3.org/1999/02/22-rdf-syntax-ns#}Description', 'blah')]
"""
def __init__(self, css, namespaces=None, translator='xml'):
if translator == 'xml':
translator = LxmlTranslator()
elif translator == 'html':
translator = LxmlHTMLTranslator()
elif translator == 'xhtml':
translator = LxmlHTMLTranslator(xhtml=True)
path = translator.css_to_xpath(css)
super().__init__(path, namespaces=namespaces)
self.css = css
def __repr__(self):
return '<%s %x for %r>' % (
self.__class__.__name__,
abs(id(self)),
self.css)
| CSSSelector |
python | walkccc__LeetCode | solutions/2509. Cycle Length Queries in a Tree/2509.py | {
"start": 0,
"end": 353
} | class ____:
def cycleLengthQueries(self, n: int, queries: list[list[int]]) -> list[int]:
def getCycleLength(a: int, b: int):
cycleLength = 1
while a != b:
if a > b:
a //= 2
else:
b //= 2
cycleLength += 1
return cycleLength
return [getCycleLength(*query) for query in queries]
| Solution |
python | huggingface__transformers | tests/models/layoutlmv3/test_image_processing_layoutlmv3.py | {
"start": 2449,
"end": 14414
} | class ____(ImageProcessingTestMixin, unittest.TestCase):
image_processing_class = LayoutLMv3ImageProcessor if is_pytesseract_available() else None
fast_image_processing_class = (
LayoutLMv3ImageProcessorFast if (is_torchvision_available() and is_pytesseract_available()) else None
)
def setUp(self):
super().setUp()
self.image_processor_tester = LayoutLMv3ImageProcessingTester(self)
@property
def image_processor_dict(self):
return self.image_processor_tester.prepare_image_processor_dict()
def test_image_processor_properties(self):
for image_processing_class in self.image_processor_list:
image_processing = image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processing, "do_resize"))
self.assertTrue(hasattr(image_processing, "size"))
self.assertTrue(hasattr(image_processing, "apply_ocr"))
def test_image_processor_from_dict_with_kwargs(self):
for image_processing_class in self.image_processor_list:
image_processor = image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size, {"height": 18, "width": 18})
image_processor = image_processing_class.from_dict(self.image_processor_dict, size=42)
self.assertEqual(image_processor.size, {"height": 42, "width": 42})
def test_LayoutLMv3_integration_test(self):
from datasets import load_dataset
ds = load_dataset("hf-internal-testing/fixtures_docvqa", split="test")
# with apply_OCR = True
for image_processing_class in self.image_processor_list:
image_processor = image_processing_class()
image = ds[0]["image"].convert("RGB")
encoding = image_processor(image, return_tensors="pt")
self.assertEqual(encoding.pixel_values.shape, (1, 3, 224, 224))
self.assertEqual(len(encoding.words), len(encoding.boxes))
# fmt: off
# the words and boxes were obtained with Tesseract 5.3.0
expected_words = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
# We get different outputs on CircleCI and on Github runners since 2025/06/26. It might be different versions of some 3rd party libraries in these 2 environments.
expected_boxes_1 = [[[141, 57, 210, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [695, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
expected_boxes_2 = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words, expected_words)
self.assertIn(encoding.boxes, [expected_boxes_1, expected_boxes_2])
# with apply_OCR = False
image_processor = image_processing_class(apply_ocr=False)
encoding = image_processor(image, return_tensors="pt")
self.assertEqual(encoding.pixel_values.shape, (1, 3, 224, 224))
| LayoutLMv3ImageProcessingTest |
python | huggingface__transformers | tests/models/lilt/test_modeling_lilt.py | {
"start": 10353,
"end": 11184
} | class ____(unittest.TestCase):
def test_inference_no_head(self):
model = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base").to(torch_device)
input_ids = torch.tensor([[1, 2]], device=torch_device)
bbox = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]], device=torch_device)
# forward pass
with torch.no_grad():
outputs = model(input_ids=input_ids, bbox=bbox)
expected_shape = torch.Size([1, 2, 768])
expected_slice = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]],
device=torch_device,
)
self.assertTrue(outputs.last_hidden_state.shape, expected_shape)
torch.testing.assert_close(outputs.last_hidden_state[0, :, :3], expected_slice, rtol=1e-3, atol=1e-3)
| LiltModelIntegrationTest |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 787884,
"end": 788059
} | class ____(sgqlc.types.Type, Contribution):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ()
| JoinedGitHubContribution |
python | apache__airflow | providers/google/tests/unit/google/cloud/sensors/test_pubsub.py | {
"start": 1292,
"end": 9198
} | class ____:
def _generate_messages(self, count):
return [
ReceivedMessage(
ack_id=f"{i}",
message={
"data": f"Message {i}".encode(),
"attributes": {"type": "generated message"},
},
)
for i in range(1, count + 1)
]
def _generate_dicts(self, count):
return [ReceivedMessage.to_dict(m) for m in self._generate_messages(count)]
@mock.patch("airflow.providers.google.cloud.sensors.pubsub.PubSubHook")
def test_poke_no_messages(self, mock_hook):
operator = PubSubPullSensor(
task_id=TASK_ID,
project_id=TEST_PROJECT,
subscription=TEST_SUBSCRIPTION,
)
mock_hook.return_value.pull.return_value = []
assert operator.poke({}) is False
@mock.patch("airflow.providers.google.cloud.sensors.pubsub.PubSubHook")
def test_poke_with_ack_messages(self, mock_hook):
operator = PubSubPullSensor(
task_id=TASK_ID,
project_id=TEST_PROJECT,
subscription=TEST_SUBSCRIPTION,
ack_messages=True,
)
generated_messages = self._generate_messages(5)
mock_hook.return_value.pull.return_value = generated_messages
assert operator.poke({}) is True
mock_hook.return_value.acknowledge.assert_called_once_with(
project_id=TEST_PROJECT,
subscription=TEST_SUBSCRIPTION,
messages=generated_messages,
)
@mock.patch("airflow.providers.google.cloud.sensors.pubsub.PubSubHook")
def test_execute(self, mock_hook):
operator = PubSubPullSensor(
task_id=TASK_ID,
project_id=TEST_PROJECT,
subscription=TEST_SUBSCRIPTION,
poke_interval=0,
)
generated_messages = self._generate_messages(5)
generated_dicts = self._generate_dicts(5)
mock_hook.return_value.pull.return_value = generated_messages
response = operator.execute({})
mock_hook.return_value.pull.assert_called_once_with(
project_id=TEST_PROJECT, subscription=TEST_SUBSCRIPTION, max_messages=5, return_immediately=True
)
assert generated_dicts == response
@mock.patch("airflow.providers.google.cloud.sensors.pubsub.PubSubHook")
def test_execute_timeout(self, mock_hook):
operator = PubSubPullSensor(
task_id=TASK_ID,
project_id=TEST_PROJECT,
subscription=TEST_SUBSCRIPTION,
poke_interval=0,
timeout=1,
)
mock_hook.return_value.pull.return_value = []
with pytest.raises(AirflowException):
operator.execute({})
@mock.patch("airflow.providers.google.cloud.sensors.pubsub.PubSubHook")
def test_execute_with_messages_callback(self, mock_hook):
generated_messages = self._generate_messages(5)
messages_callback_return_value = "asdfg"
def messages_callback(
pulled_messages: list[ReceivedMessage],
context: dict[str, Any],
):
assert pulled_messages == generated_messages
assert isinstance(context, dict)
for key in context.keys():
assert isinstance(key, str)
return messages_callback_return_value
messages_callback = mock.Mock(side_effect=messages_callback)
operator = PubSubPullSensor(
task_id=TASK_ID,
project_id=TEST_PROJECT,
subscription=TEST_SUBSCRIPTION,
poke_interval=0,
messages_callback=messages_callback,
)
mock_hook.return_value.pull.return_value = generated_messages
response = operator.execute({})
mock_hook.return_value.pull.assert_called_once_with(
project_id=TEST_PROJECT, subscription=TEST_SUBSCRIPTION, max_messages=5, return_immediately=True
)
messages_callback.assert_called_once()
assert response == messages_callback_return_value
def test_pubsub_pull_sensor_async(self):
"""
Asserts that a task is deferred and a PubsubPullTrigger will be fired
when the PubSubPullSensor is executed.
"""
task = PubSubPullSensor(
task_id="test_task_id",
ack_messages=True,
project_id=TEST_PROJECT,
subscription=TEST_SUBSCRIPTION,
deferrable=True,
)
with pytest.raises(TaskDeferred) as exc:
task.execute(context={})
assert isinstance(exc.value.trigger, PubsubPullTrigger), "Trigger is not a PubsubPullTrigger"
def test_pubsub_pull_sensor_async_execute_should_throw_exception(self):
"""Tests that an AirflowException is raised in case of error event"""
operator = PubSubPullSensor(
task_id="test_task",
ack_messages=True,
project_id=TEST_PROJECT,
subscription=TEST_SUBSCRIPTION,
deferrable=True,
)
with pytest.raises(AirflowException):
operator.execute_complete(
context=mock.MagicMock(), event={"status": "error", "message": "test failure message"}
)
def test_pubsub_pull_sensor_async_execute_complete(self):
"""Asserts that logging occurs as expected"""
operator = PubSubPullSensor(
task_id="test_task",
ack_messages=True,
project_id=TEST_PROJECT,
subscription=TEST_SUBSCRIPTION,
deferrable=True,
)
test_message = "test"
with mock.patch.object(operator.log, "info") as mock_log_info:
operator.execute_complete(context={}, event={"status": "success", "message": test_message})
mock_log_info.assert_called_with("Sensor pulls messages: %s", test_message)
@mock.patch("airflow.providers.google.cloud.sensors.pubsub.PubSubHook")
def test_pubsub_pull_sensor_async_execute_complete_use_message_callback(self, mock_hook):
test_message = [
{
"ack_id": "UAYWLF1GSFE3GQhoUQ5PXiM_NSAoRRIJB08CKF15MU0sQVhwaFENGXJ9YHxrUxsDV0ECel1RGQdoTm11H4GglfRLQ1RrWBIHB01Vel5TEwxoX11wBnm4vPO6v8vgfwk9OpX-8tltO6ywsP9GZiM9XhJLLD5-LzlFQV5AEkwkDERJUytDCypYEU4EISE-MD5FU0Q",
"message": {
"data": "aGkgZnJvbSBjbG91ZCBjb25zb2xlIQ==",
"message_id": "12165864188103151",
"publish_time": "2024-08-28T11:49:50.962Z",
"attributes": {},
"ordering_key": "",
},
"delivery_attempt": 0,
}
]
received_messages = [pubsub_v1.types.ReceivedMessage(msg) for msg in test_message]
messages_callback_return_value = "custom_message_from_callback"
def messages_callback(
pulled_messages: list[ReceivedMessage],
context: dict[str, Any],
):
assert pulled_messages == received_messages
assert isinstance(context, dict)
for key in context.keys():
assert isinstance(key, str)
return messages_callback_return_value
operator = PubSubPullSensor(
task_id="test_task",
ack_messages=True,
project_id=TEST_PROJECT,
subscription=TEST_SUBSCRIPTION,
deferrable=True,
messages_callback=messages_callback,
)
mock_hook.return_value.pull.return_value = received_messages
with mock.patch.object(operator.log, "info") as mock_log_info:
resp = operator.execute_complete(context={}, event={"status": "success", "message": test_message})
mock_log_info.assert_called_with("Sensor pulls messages: %s", test_message)
assert resp == messages_callback_return_value
| TestPubSubPullSensor |
python | docker__docker-py | docker/errors.py | {
"start": 5024,
"end": 5169
} | class ____(DockerException):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return (self.msg)
| ContextException |
python | facebook__pyre-check | tools/generate_taint_models/get_models_filtered_by_callable.py | {
"start": 419,
"end": 1096
} | class ____(ModelGenerator[T]):
def __init__(
self, generator_to_filter: ModelGenerator[T], filter: Callable[[T], bool]
) -> None:
self.generator_to_filter = generator_to_filter
self.filter = filter
def gather_functions_to_model(self) -> Iterable[Callable[..., object]]:
return self.generator_to_filter.gather_functions_to_model()
def compute_models(
self, functions_to_model: Iterable[Callable[..., object]]
) -> List[T]:
return [
model
for model in self.generator_to_filter.compute_models(functions_to_model)
if self.filter(model)
]
| ModelsFilteredByCallableGenerator |
python | mamba-org__mamba | docs/source/tools/mermaid.py | {
"start": 1738,
"end": 12681
} | class ____(Directive):
"""
Directive to insert arbitrary Mermaid markup.
"""
has_content = True
required_arguments = 0
optional_arguments = 1
final_argument_whitespace = False
option_spec = {
"alt": directives.unchanged,
"align": align_spec,
"caption": directives.unchanged,
}
def get_mm_code(self):
if self.arguments:
# try to load mermaid code from an external file
document = self.state.document
if self.content:
return [
document.reporter.warning(
"Mermaid directive cannot have both content and a filename argument",
line=self.lineno,
)
]
env = self.state.document.settings.env
argument = search_image_for_language(self.arguments[0], env)
rel_filename, filename = env.relfn2path(argument)
env.note_dependency(rel_filename)
try:
with codecs.open(filename, "r", "utf-8") as fp:
mmcode = fp.read()
except OSError: # noqa
return [
document.reporter.warning(
"External Mermaid file %r not found or reading it failed" % filename,
line=self.lineno,
)
]
else:
# inline mermaid code
mmcode = "\n".join(self.content)
if not mmcode.strip():
return [
self.state_machine.reporter.warning(
'Ignoring "mermaid" directive without content.',
line=self.lineno,
)
]
return mmcode
def run(self):
node = mermaid()
node["code"] = self.get_mm_code()
node["options"] = {}
if "alt" in self.options:
node["alt"] = self.options["alt"]
if "align" in self.options:
node["align"] = self.options["align"]
if "inline" in self.options:
node["inline"] = True
caption = self.options.get("caption")
if caption:
node = figure_wrapper(self, node, caption)
return [node]
def render_mm(self, code, options, fmt, prefix="mermaid"):
"""Render mermaid code into a PNG or PDF output file."""
if fmt == "raw":
fmt = "png"
mermaid_cmd = self.builder.config.mermaid_cmd
hashkey = (code + str(options) + str(self.builder.config.mermaid_sequence_config)).encode(
"utf-8"
)
basename = f"{prefix}-{sha1(hashkey).hexdigest()}"
fname = f"{basename}.{fmt}"
relfn = posixpath.join(self.builder.imgpath, fname)
outdir = os.path.join(self.builder.outdir, self.builder.imagedir)
outfn = os.path.join(outdir, fname)
tmpfn = os.path.join(_get_default_tempdir(), basename)
if os.path.isfile(outfn):
return relfn, outfn
ensuredir(os.path.dirname(outfn))
# mermaid expects UTF-8 by default
if isinstance(code, str):
code = code.encode("utf-8")
with open(tmpfn, "wb") as t:
t.write(code)
mm_args = [mermaid_cmd, "-i", tmpfn, "-o", outfn]
mm_args.extend(self.builder.config.mermaid_params)
if self.builder.config.mermaid_sequence_config:
mm_args.extend("--configFile", self.builder.config.mermaid_sequence_config)
try:
p = Popen(mm_args, stdout=PIPE, stdin=PIPE, stderr=PIPE)
except OSError as err:
if err.errno != ENOENT: # No such file or directory
raise
logger.warning(
"command %r cannot be run (needed for mermaid "
"output), check the mermaid_cmd setting" % mermaid_cmd
)
return None, None
stdout, stderr = p.communicate(code)
if self.builder.config.mermaid_verbose:
logger.info(stdout)
if p.returncode != 0:
raise MermaidError(f"Mermaid exited with error:\n[stderr]\n{stderr}\n[stdout]\n{stdout}")
if not os.path.isfile(outfn):
raise MermaidError(
"Mermaid did not produce an output file:\n[stderr]\n{}\n[stdout]\n{}".format(
stderr, stdout
)
)
return relfn, outfn
def _render_mm_html_raw(self, node, code, options, prefix="mermaid", imgcls=None, alt=None):
if "align" in node:
tag_template = """<div align="{align}" class="mermaid align-{align}">
{code}
</div>
"""
else:
tag_template = """<div class="mermaid">
{code}
</div>"""
self.body.append(tag_template.format(align=node.get("align"), code=self.encode(code)))
raise nodes.SkipNode
def render_mm_html(self, node, code, options, prefix="mermaid", imgcls=None, alt=None):
fmt = self.builder.config.mermaid_output_format
if fmt == "raw":
return _render_mm_html_raw(
self, node, code, options, prefix="mermaid", imgcls=None, alt=None
)
try:
if fmt not in ("png", "svg"):
raise MermaidError(
"mermaid_output_format must be one of 'raw', 'png', 'svg', but is %r" % fmt
)
fname, outfn = render_mm(self, code, options, fmt, prefix)
except MermaidError as exc:
logger.warning("mermaid code %r: " % code + str(exc))
raise nodes.SkipNode
if fname is None:
self.body.append(self.encode(code))
else:
if alt is None:
alt = node.get("alt", self.encode(code).strip())
imgcss = imgcls and 'class="%s"' % imgcls or ""
if fmt == "svg":
svgtag = """<object data="{}" type="image/svg+xml">
<p class="warning">{}</p></object>\n""".format(
fname,
alt,
)
self.body.append(svgtag)
else:
if "align" in node:
self.body.append(
'<div align="{}" class="align-{}">'.format(node["align"], node["align"])
)
self.body.append(f'<img src="{fname}" alt="{alt}" {imgcss}/>\n')
if "align" in node:
self.body.append("</div>\n")
raise nodes.SkipNode
def html_visit_mermaid(self, node):
render_mm_html(self, node, node["code"], node["options"])
def render_mm_latex(self, node, code, options, prefix="mermaid"):
try:
fname, outfn = render_mm(self, code, options, "pdf", prefix)
except MermaidError as exc:
logger.warning("mm code %r: " % code + str(exc))
raise nodes.SkipNode
if self.builder.config.mermaid_pdfcrop != "":
mm_args = [self.builder.config.mermaid_pdfcrop, outfn]
try:
p = Popen(mm_args, stdout=PIPE, stdin=PIPE, stderr=PIPE)
except OSError as err:
if err.errno != ENOENT: # No such file or directory
raise
logger.warning(
"command %r cannot be run (needed to crop pdf), \
check the mermaid_cmd setting"
% self.builder.config.mermaid_pdfcrop
)
return None, None
stdout, stderr = p.communicate()
if self.builder.config.mermaid_verbose:
logger.info(stdout)
if p.returncode != 0:
raise MermaidError(
f"PdfCrop exited with error:\n[stderr]\n{stderr}\n[stdout]\n{stdout}"
)
if not os.path.isfile(outfn):
raise MermaidError(
"PdfCrop did not produce an output file:\n[stderr]\n%s\n"
"[stdout]\n%s" % (stderr, stdout)
)
fname = "{filename[0]}-crop{filename[1]}".format(filename=os.path.splitext(fname))
is_inline = self.is_inline(node)
if is_inline:
para_separator = ""
else:
para_separator = "\n"
if fname is not None:
post = None
if not is_inline and "align" in node:
if node["align"] == "left":
self.body.append("{")
post = "\\hspace*{\\fill}}"
elif node["align"] == "right":
self.body.append("{\\hspace*{\\fill}")
post = "}"
self.body.append(f"{para_separator}\\sphinxincludegraphics{{{fname}}}{para_separator}")
if post:
self.body.append(post)
raise nodes.SkipNode
def latex_visit_mermaid(self, node):
render_mm_latex(self, node, node["code"], node["options"])
def render_mm_texinfo(self, node, code, options, prefix="mermaid"):
try:
fname, outfn = render_mm(self, code, options, "png", prefix)
except MermaidError as exc:
logger.warning("mm code %r: " % code + str(exc))
raise nodes.SkipNode
if fname is not None:
self.body.append("@image{%s,,,[mermaid],png}\n" % fname[:-4])
raise nodes.SkipNode
def texinfo_visit_mermaid(self, node):
render_mm_texinfo(self, node, node["code"], node["options"])
def text_visit_mermaid(self, node):
if "alt" in node.attributes:
self.add_text(_("[graph: %s]") % node["alt"])
else:
self.add_text(_("[graph]"))
raise nodes.SkipNode
def man_visit_mermaid(self, node):
if "alt" in node.attributes:
self.body.append(_("[graph: %s]") % node["alt"])
else:
self.body.append(_("[graph]"))
raise nodes.SkipNode
def config_inited(app, config):
version = config.mermaid_version
mermaid_js_url = f"https://unpkg.com/mermaid@{version}/dist/mermaid.min.js"
app.add_js_file(mermaid_js_url)
app.add_js_file(
None,
body='mermaid.initialize({startOnLoad:true, theme:"neutral", securityLevel="loose", \
sequenceConfig: {mirrorActors: false}});',
)
app.add_css_file("mermaid.css")
def on_build_finished(app: Sphinx, exc: Exception) -> None:
if exc is None:
src = os.path.join(os.path.dirname(__file__), "mermaid.css")
dst = os.path.join(app.outdir, "_static")
copy_asset(src, dst)
def setup(app):
app.add_node(
mermaid,
html=(html_visit_mermaid, None),
latex=(latex_visit_mermaid, None),
texinfo=(texinfo_visit_mermaid, None),
text=(text_visit_mermaid, None),
man=(man_visit_mermaid, None),
)
app.add_directive("mermaid", Mermaid)
#
app.add_config_value("mermaid_cmd", "mmdc", "html")
app.add_config_value("mermaid_pdfcrop", "", "html")
app.add_config_value("mermaid_output_format", "raw", "html")
app.add_config_value("mermaid_params", list(), "html")
app.add_config_value("mermaid_verbose", False, "html")
app.add_config_value("mermaid_sequence_config", False, "html")
app.add_config_value("mermaid_version", "8.10.2", "html")
app.connect("config-inited", config_inited)
app.connect("build-finished", on_build_finished)
return {"version": sphinx.__display_version__, "parallel_read_safe": True}
| Mermaid |
python | numba__numba | numba/core/interpreter.py | {
"start": 56303,
"end": 143235
} | class ____(object):
"""A bytecode interpreter that builds up the IR.
"""
_DEBUG_PRINT = False
def __init__(self, func_id):
self.func_id = func_id
if self._DEBUG_PRINT:
print(func_id.func)
self.arg_count = func_id.arg_count
self.arg_names = func_id.arg_names
self.loc = self.first_loc = ir.Loc.from_function_id(func_id)
self.is_generator = func_id.is_generator
# { inst offset : ir.Block }
self.blocks = {}
# { name: [definitions] } of local variables
self.definitions = collections.defaultdict(list)
# A set to keep track of all exception variables.
# To be used in _legalize_exception_vars()
self._exception_vars = set()
def interpret(self, bytecode):
"""
Generate IR for this bytecode.
"""
self.bytecode = bytecode
self.scopes = []
global_scope = ir.Scope(parent=None, loc=self.loc)
self.scopes.append(global_scope)
flow = Flow(bytecode)
flow.run()
self.dfa = AdaptDFA(flow)
self.cfa = AdaptCFA(flow)
if config.DUMP_CFG:
self.cfa.dump()
# Temp states during interpretation
self.current_block = None
self.current_block_offset = None
last_active_offset = 0
for _, inst_blocks in self.cfa.blocks.items():
if inst_blocks.body:
last_active_offset = max(last_active_offset,
max(inst_blocks.body))
self.last_active_offset = last_active_offset
if PYVERSION in ((3, 12), (3, 13), (3, 14)):
self.active_exception_entries = tuple(
[entry for entry in self.bytecode.exception_entries
if entry.start < self.last_active_offset])
elif PYVERSION in ((3, 10), (3, 11)):
pass
else:
raise NotImplementedError(PYVERSION)
self.syntax_blocks = []
self.dfainfo = None
self.scopes.append(ir.Scope(parent=self.current_scope, loc=self.loc))
# Interpret loop
for inst, kws in self._iter_inst():
self._dispatch(inst, kws)
if PYVERSION in ((3, 11), (3, 12), (3, 13), (3, 14)):
# Insert end of try markers
self._end_try_blocks()
elif PYVERSION in ((3, 10),):
pass
else:
raise NotImplementedError(PYVERSION)
self._legalize_exception_vars()
# Prepare FunctionIR
func_ir = ir.FunctionIR(self.blocks, self.is_generator, self.func_id,
self.first_loc, self.definitions,
self.arg_count, self.arg_names)
_logger.debug(_lazy_pformat(func_ir,
lazy_func=lambda x: x.dump_to_string()))
# post process the IR to rewrite opcodes/byte sequences that are too
# involved to risk handling as part of direct interpretation
peepholes = []
if PYVERSION in ((3, 11), (3, 12), (3, 13), (3, 14)):
peepholes.append(peep_hole_split_at_pop_block)
if PYVERSION in ((3, 10), (3, 11), (3, 12), (3, 13), (3, 14)):
peepholes.append(peep_hole_list_to_tuple)
peepholes.append(peep_hole_delete_with_exit)
if PYVERSION in ((3, 10), (3, 11), (3, 12), (3, 13), (3, 14)):
# peep_hole_call_function_ex_to_call_function_kw
# depends on peep_hole_list_to_tuple converting
# any large number of arguments from a list to a
# tuple.
peepholes.append(peep_hole_call_function_ex_to_call_function_kw)
peepholes.append(peep_hole_fuse_dict_add_updates)
post_processed_ir = self.post_process(peepholes, func_ir)
return post_processed_ir
def post_process(self, peepholes, func_ir):
for peep in peepholes:
func_ir = peep(func_ir)
return func_ir
def _end_try_blocks(self):
"""Closes all try blocks by inserting the required marker at the
exception handler
This is only needed for py3.11 because of the changes in exception
handling. This merely maps the new py3.11 semantics back to the old way.
What the code does:
- For each block, compute the difference of blockstack to its incoming
blocks' blockstack.
- If the incoming blockstack has an extra TRY, the current block must
be the EXCEPT block and we need to insert a marker.
See also: _insert_try_block_end
"""
assert PYVERSION in ((3, 11), (3, 12), (3, 13), (3, 14))
graph = self.cfa.graph
for offset, block in self.blocks.items():
# Get current blockstack
cur_bs = self.dfa.infos[offset].blockstack
# Check blockstack of the incoming blocks
for inc, _ in graph.predecessors(offset):
inc_bs = self.dfa.infos[inc].blockstack
# find first diff in the blockstack
for i, (x, y) in enumerate(zip(cur_bs, inc_bs)):
if x != y:
break
else:
i = min(len(cur_bs), len(inc_bs))
def do_change(remain):
while remain:
ent = remain.pop()
if ent['kind'] == BlockKind('TRY'):
# Extend block with marker for end of try
self.current_block = block
oldbody = list(block.body)
block.body.clear()
self._insert_try_block_end()
block.body.extend(oldbody)
return True
if do_change(list(inc_bs[i:])):
break
def _legalize_exception_vars(self):
"""Search for unsupported use of exception variables.
Note, they cannot be stored into user variable.
"""
# Build a set of exception variables
excvars = self._exception_vars.copy()
# Propagate the exception variables to LHS of assignment
for varname, defnvars in self.definitions.items():
for v in defnvars:
if isinstance(v, ir.Var):
k = v.name
if k in excvars:
excvars.add(varname)
# Filter out the user variables.
uservar = list(filter(lambda x: not x.startswith('$'), excvars))
if uservar:
# Complain about the first user-variable storing an exception
first = uservar[0]
loc = self.current_scope.get(first).loc
msg = "Exception object cannot be stored into variable ({})."
raise errors.UnsupportedBytecodeError(msg.format(first), loc=loc)
def init_first_block(self):
# Define variables receiving the function arguments
for index, name in enumerate(self.arg_names):
val = ir.Arg(index=index, name=name, loc=self.loc)
self.store(val, name)
def _iter_inst(self):
for blkct, block in enumerate(self.cfa.iterliveblocks()):
firstinst = self.bytecode[block.offset]
# If its an END_FOR instruction, the start location of block
# is set to start of the FOR loop, so take the location of
# next instruction. This only affects the source location
# marking and has no impact to semantic.
if firstinst.opname == 'END_FOR':
firstinst = self.bytecode[firstinst.next]
self.loc = self.loc.with_lineno(firstinst.lineno)
self._start_new_block(block.offset)
if blkct == 0:
# Is first block
self.init_first_block()
for offset, kws in self.dfainfo.insts:
inst = self.bytecode[offset]
self.loc = self.loc.with_lineno(inst.lineno)
yield inst, kws
self._end_current_block()
def _start_new_block(self, offset):
oldblock = self.current_block
self.insert_block(offset)
tryblk = self.dfainfo.active_try_block if self.dfainfo else None
# Ensure the last block is terminated
if oldblock is not None and not oldblock.is_terminated:
# Handle ending try block.
# If there's an active try-block and the handler block is live.
if tryblk is not None and tryblk['end'] in self.cfa.graph.nodes():
# We are in a try-block, insert a branch to except-block.
# This logic cannot be in self._end_current_block()
# because we don't know the non-raising next block-offset.
branch = ir.Branch(
cond=self.get('$exception_check'),
truebr=tryblk['end'],
falsebr=offset,
loc=self.loc,
)
oldblock.append(branch)
# Handle normal case
else:
jmp = ir.Jump(offset, loc=self.loc)
oldblock.append(jmp)
# Get DFA block info
self.dfainfo = self.dfa.infos[self.current_block_offset]
self.assigner = Assigner()
# Check out-of-scope syntactic-block
if PYVERSION in ((3, 11), (3, 12), (3, 13), (3, 14)):
# This is recreating pre-3.11 code structure
while self.syntax_blocks:
if offset >= self.syntax_blocks[-1].exit:
synblk = self.syntax_blocks.pop()
if isinstance(synblk, ir.With):
self.current_block.append(ir.PopBlock(self.loc))
else:
break
# inject try block:
newtryblk = self.dfainfo.active_try_block
if newtryblk is not None:
if newtryblk is not tryblk:
self._insert_try_block_begin()
elif PYVERSION in ((3, 10),):
while self.syntax_blocks:
if offset >= self.syntax_blocks[-1].exit:
self.syntax_blocks.pop()
else:
break
else:
raise NotImplementedError(PYVERSION)
def _end_current_block(self):
# Handle try block
if not self.current_block.is_terminated:
tryblk = self.dfainfo.active_try_block
if tryblk is not None:
self._insert_exception_check()
# Handle normal block cleanup
self._remove_unused_temporaries()
self._insert_outgoing_phis()
def _inject_call(self, func, gv_name, res_name=None):
"""A helper function to inject a call to *func* which is a python
function.
Parameters
----------
func : callable
The function object to be called.
gv_name : str
The variable name to be used to store the function object.
res_name : str; optional
The variable name to be used to store the call result.
If ``None``, a name is created automatically.
"""
gv_fn = ir.Global(gv_name, func, loc=self.loc)
self.store(value=gv_fn, name=gv_name, redefine=True)
callres = ir.Expr.call(self.get(gv_name), (), (), loc=self.loc)
res_name = res_name or '$callres_{}'.format(gv_name)
self.store(value=callres, name=res_name, redefine=True)
def _insert_try_block_begin(self):
"""Insert IR-nodes to mark the start of a `try` block.
"""
self._inject_call(eh.mark_try_block, 'mark_try_block')
def _insert_try_block_end(self):
"""Insert IR-nodes to mark the end of a `try` block.
"""
self._inject_call(eh.end_try_block, 'end_try_block')
def _insert_exception_variables(self):
"""Insert IR-nodes to initialize the exception variables.
"""
tryblk = self.dfainfo.active_try_block
# Get exception variables
endblk = tryblk['end']
edgepushed = self.dfainfo.outgoing_edgepushed.get(endblk)
# Note: the last value on the stack is the exception value
# Note: due to the current limitation, all exception variables are None
if edgepushed:
const_none = ir.Const(value=None, loc=self.loc)
# For each variable going to the handler block.
for var in edgepushed:
if var in self.definitions:
raise AssertionError(
"exception variable CANNOT be defined by other code",
)
self.store(value=const_none, name=var)
self._exception_vars.add(var)
def _insert_exception_check(self):
"""Called before the end of a block to inject checks if raised.
"""
self._insert_exception_variables()
# Do exception check
self._inject_call(eh.exception_check, 'exception_check',
'$exception_check')
def _remove_unused_temporaries(self):
"""
Remove assignments to unused temporary variables from the
current block.
"""
new_body = []
replaced_var = {}
for inst in self.current_block.body:
# the same temporary is assigned to multiple variables in cases
# like a = b[i] = 1, so need to handle replaced temporaries in
# later setitem/setattr nodes
if (isinstance(inst, (ir.SetItem, ir.SetAttr))
and inst.value.name in replaced_var):
inst.value = replaced_var[inst.value.name]
elif isinstance(inst, ir.Assign):
if (inst.target.is_temp
and inst.target.name in self.assigner.unused_dests):
continue
# the same temporary is assigned to multiple variables in cases
# like a = b = 1, so need to handle replaced temporaries in
# later assignments
if (isinstance(inst.value, ir.Var)
and inst.value.name in replaced_var):
inst.value = replaced_var[inst.value.name]
new_body.append(inst)
continue
# chained unpack cases may reuse temporary
# e.g. a = (b, c) = (x, y)
if (isinstance(inst.value, ir.Expr)
and inst.value.op == "exhaust_iter"
and inst.value.value.name in replaced_var):
inst.value.value = replaced_var[inst.value.value.name]
new_body.append(inst)
continue
# eliminate temporary variables that are assigned to user
# variables right after creation. E.g.:
# $1 = f(); a = $1 -> a = f()
# the temporary variable is not reused elsewhere since CPython
# bytecode is stack-based and this pattern corresponds to a pop
if (isinstance(inst.value, ir.Var) and inst.value.is_temp
and new_body and isinstance(new_body[-1], ir.Assign)):
prev_assign = new_body[-1]
# _var_used_in_binop check makes sure we don't create a new
# inplace binop operation which can fail
# (see TestFunctionType.test_in_iter_func_call)
if (prev_assign.target.name == inst.value.name
and not self._var_used_in_binop(
inst.target.name, prev_assign.value)):
replaced_var[inst.value.name] = inst.target
prev_assign.target = inst.target
# replace temp var definition in target with proper defs
self.definitions[inst.target.name].remove(inst.value)
self.definitions[inst.target.name].extend(
self.definitions.pop(inst.value.name)
)
continue
new_body.append(inst)
self.current_block.body = new_body
def _var_used_in_binop(self, varname, expr):
"""return True if 'expr' is a binary expression and 'varname' is used
in it as an argument
"""
return (isinstance(expr, ir.Expr)
and expr.op in ("binop", "inplace_binop")
and (varname == expr.lhs.name or varname == expr.rhs.name))
def _insert_outgoing_phis(self):
"""
Add assignments to forward requested outgoing values
to subsequent blocks.
"""
for phiname, varname in self.dfainfo.outgoing_phis.items():
target = self.current_scope.get_or_define(phiname,
loc=self.loc)
try:
val = self.get(varname)
except ir.NotDefinedError:
# Hack to make sure exception variables are defined
assert PYVERSION in ((3, 11), (3, 12), (3, 13), (3, 14)), \
"unexpected missing definition"
val = ir.Const(value=None, loc=self.loc)
stmt = ir.Assign(value=val, target=target,
loc=self.loc)
self.definitions[target.name].append(stmt.value)
if not self.current_block.is_terminated:
self.current_block.append(stmt)
else:
self.current_block.insert_before_terminator(stmt)
def get_global_value(self, name):
"""
Get a global value from the func_global (first) or
as a builtins (second). If both failed, return a ir.UNDEFINED.
"""
try:
return self.func_id.func.__globals__[name]
except KeyError:
return getattr(builtins, name, ir.UNDEFINED)
def get_closure_value(self, index):
"""
Get a value from the cell contained in this function's closure.
If not set, return a ir.UNDEFINED.
"""
cell = self.func_id.func.__closure__[index]
try:
return cell.cell_contents
except ValueError:
return ir.UNDEFINED
@property
def current_scope(self):
return self.scopes[-1]
@property
def code_consts(self):
return self.bytecode.co_consts
@property
def code_locals(self):
return self.bytecode.co_varnames
@property
def code_names(self):
return self.bytecode.co_names
@property
def code_cellvars(self):
return self.bytecode.co_cellvars
@property
def code_freevars(self):
return self.bytecode.co_freevars
def _dispatch(self, inst, kws):
if self._DEBUG_PRINT:
print(inst)
assert self.current_block is not None
if PYVERSION in ((3, 11), (3, 12), (3, 13), (3, 14)):
if self.syntax_blocks:
top = self.syntax_blocks[-1]
if isinstance(top, ir.With) :
if inst.offset >= top.exit:
self.current_block.append(ir.PopBlock(loc=self.loc))
self.syntax_blocks.pop()
elif PYVERSION in ((3, 10),):
pass
else:
raise NotImplementedError(PYVERSION)
fname = "op_%s" % inst.opname.replace('+', '_')
try:
fn = getattr(self, fname)
except AttributeError:
raise NotImplementedError(inst)
else:
try:
return fn(inst, **kws)
except errors.NotDefinedError as e:
if e.loc is None:
loc = self.loc
else:
loc = e.loc
err = errors.NotDefinedError(e.name, loc=loc)
if not config.FULL_TRACEBACKS:
raise err from None
else:
m = f"handling op: {inst} | offset: {inst.offset}"
err.add_context(m)
err.add_context(self.bytecode.dump())
raise err
# --- Scope operations ---
def store(self, value, name, redefine=False):
"""
Store *value* (a Expr or Var instance) into the variable named *name*
(a str object). Returns the target variable.
"""
if redefine or self.current_block_offset in self.cfa.backbone:
rename = not (name in self.code_cellvars)
target = self.current_scope.redefine(name, loc=self.loc,
rename=rename)
else:
target = self.current_scope.get_or_define(name, loc=self.loc)
if isinstance(value, ir.Var):
value = self.assigner.assign(value, target)
stmt = ir.Assign(value=value, target=target, loc=self.loc)
self.current_block.append(stmt)
self.definitions[target.name].append(value)
return target
def get(self, name):
"""
Get the variable (a Var instance) with the given *name*.
"""
# Implicit argument for comprehension starts with '.'
# See Parameter class in inspect.py (from Python source)
if name[0] == '.' and name[1:].isdigit():
name = 'implicit{}'.format(name[1:])
# Try to simplify the variable lookup by returning an earlier
# variable assigned to *name*.
var = self.assigner.get_assignment_source(name)
if var is None:
var = self.current_scope.get(name)
return var
# --- Block operations ---
def insert_block(self, offset, scope=None, loc=None):
scope = scope or self.current_scope
loc = loc or self.loc
blk = ir.Block(scope=scope, loc=loc)
self.blocks[offset] = blk
self.current_block = blk
self.current_block_offset = offset
return blk
# --- Bytecode handlers ---
def op_NOP(self, inst):
pass
if PYVERSION in ((3, 14), ):
# New in 3.14
op_NOT_TAKEN = op_NOP
elif PYVERSION in ((3, 10), (3, 11), (3, 12), (3, 13)):
pass
else:
raise NotImplementedError(PYVERSION)
def op_RESUME(self, inst):
pass
def op_CACHE(self, inst):
pass
def op_PRECALL(self, inst):
pass
def op_PUSH_NULL(self, inst):
pass
def op_RETURN_GENERATOR(self, inst):
pass
def op_PRINT_ITEM(self, inst, item, printvar, res):
item = self.get(item)
printgv = ir.Global("print", print, loc=self.loc)
self.store(value=printgv, name=printvar)
call = ir.Expr.call(self.get(printvar), (item,), (), loc=self.loc)
self.store(value=call, name=res)
def op_PRINT_NEWLINE(self, inst, printvar, res):
printgv = ir.Global("print", print, loc=self.loc)
self.store(value=printgv, name=printvar)
call = ir.Expr.call(self.get(printvar), (), (), loc=self.loc)
self.store(value=call, name=res)
def op_UNPACK_SEQUENCE(self, inst, iterable, stores, tupleobj):
count = len(stores)
# Exhaust the iterable into a tuple-like object
tup = ir.Expr.exhaust_iter(value=self.get(iterable), loc=self.loc,
count=count)
self.store(name=tupleobj, value=tup)
# then index the tuple-like object to extract the values
for i, st in enumerate(stores):
expr = ir.Expr.static_getitem(self.get(tupleobj),
index=i, index_var=None,
loc=self.loc)
self.store(expr, st)
def op_FORMAT_SIMPLE(self, inst, value, res, strvar):
# Same as FORMAT_VALUE
return self.op_FORMAT_VALUE(inst, value, res, strvar)
def op_FORMAT_VALUE(self, inst, value, res, strvar):
"""
FORMAT_VALUE(flags): flags argument specifies format spec which is not
supported yet. Currently, str() is simply called on the value.
https://docs.python.org/3/library/dis.html#opcode-FORMAT_VALUE
"""
value = self.get(value)
strgv = ir.Global("str", str, loc=self.loc)
self.store(value=strgv, name=strvar)
call = ir.Expr.call(self.get(strvar), (value,), (), loc=self.loc)
self.store(value=call, name=res)
def op_BUILD_STRING(self, inst, strings, tmps):
"""
BUILD_STRING(count): Concatenates count strings.
Required for supporting f-strings.
https://docs.python.org/3/library/dis.html#opcode-BUILD_STRING
"""
count = inst.arg
# corner case: f""
if count == 0:
const = ir.Const("", loc=self.loc)
self.store(const, tmps[-1])
return
prev = self.get(strings[0])
for other, tmp in zip(strings[1:], tmps):
other = self.get(other)
expr = ir.Expr.binop(
operator.add, lhs=prev, rhs=other, loc=self.loc
)
self.store(expr, tmp)
prev = self.get(tmp)
def op_BUILD_SLICE(self, inst, start, stop, step, res, slicevar):
start = self.get(start)
stop = self.get(stop)
slicegv = ir.Global("slice", slice, loc=self.loc)
self.store(value=slicegv, name=slicevar)
if step is None:
sliceinst = ir.Expr.call(self.get(slicevar), (start, stop), (),
loc=self.loc)
else:
step = self.get(step)
sliceinst = ir.Expr.call(self.get(slicevar), (start, stop, step),
(), loc=self.loc)
self.store(value=sliceinst, name=res)
if PYVERSION in ((3, 12), (3, 13), (3, 14)):
def op_BINARY_SLICE(self, inst, start, end, container, res, slicevar,
temp_res):
start = self.get(start)
end = self.get(end)
slicegv = ir.Global("slice", slice, loc=self.loc)
self.store(value=slicegv, name=slicevar)
sliceinst = ir.Expr.call(self.get(slicevar), (start, end), (),
loc=self.loc)
self.store(value=sliceinst, name=temp_res)
index = self.get(temp_res)
target = self.get(container)
expr = ir.Expr.getitem(target, index=index, loc=self.loc)
self.store(expr, res)
elif PYVERSION in ((3, 10), (3, 11)):
pass
else:
raise NotImplementedError(PYVERSION)
if PYVERSION in ((3, 12), (3, 13), (3, 14)):
def op_STORE_SLICE(self, inst, start, end, container, value, res,
slicevar):
start = self.get(start)
end = self.get(end)
slicegv = ir.Global("slice", slice, loc=self.loc)
self.store(value=slicegv, name=slicevar)
sliceinst = ir.Expr.call(self.get(slicevar), (start, end), (),
loc=self.loc)
self.store(value=sliceinst, name=res)
index = self.get(res)
target = self.get(container)
value = self.get(value)
stmt = ir.SetItem(target=target, index=index, value=value,
loc=self.loc)
self.current_block.append(stmt)
elif PYVERSION in ((3, 10), (3, 11)):
pass
else:
raise NotImplementedError(PYVERSION)
def op_SLICE_0(self, inst, base, res, slicevar, indexvar, nonevar):
base = self.get(base)
slicegv = ir.Global("slice", slice, loc=self.loc)
self.store(value=slicegv, name=slicevar)
nonegv = ir.Const(None, loc=self.loc)
self.store(value=nonegv, name=nonevar)
none = self.get(nonevar)
index = ir.Expr.call(self.get(slicevar), (none, none), (), loc=self.loc)
self.store(value=index, name=indexvar)
expr = ir.Expr.getitem(base, self.get(indexvar), loc=self.loc)
self.store(value=expr, name=res)
def op_SLICE_1(self, inst, base, start, nonevar, res, slicevar, indexvar):
base = self.get(base)
start = self.get(start)
nonegv = ir.Const(None, loc=self.loc)
self.store(value=nonegv, name=nonevar)
none = self.get(nonevar)
slicegv = ir.Global("slice", slice, loc=self.loc)
self.store(value=slicegv, name=slicevar)
index = ir.Expr.call(self.get(slicevar), (start, none), (),
loc=self.loc)
self.store(value=index, name=indexvar)
expr = ir.Expr.getitem(base, self.get(indexvar), loc=self.loc)
self.store(value=expr, name=res)
def op_SLICE_2(self, inst, base, nonevar, stop, res, slicevar, indexvar):
base = self.get(base)
stop = self.get(stop)
nonegv = ir.Const(None, loc=self.loc)
self.store(value=nonegv, name=nonevar)
none = self.get(nonevar)
slicegv = ir.Global("slice", slice, loc=self.loc)
self.store(value=slicegv, name=slicevar)
index = ir.Expr.call(self.get(slicevar), (none, stop,), (),
loc=self.loc)
self.store(value=index, name=indexvar)
expr = ir.Expr.getitem(base, self.get(indexvar), loc=self.loc)
self.store(value=expr, name=res)
def op_SLICE_3(self, inst, base, start, stop, res, slicevar, indexvar):
base = self.get(base)
start = self.get(start)
stop = self.get(stop)
slicegv = ir.Global("slice", slice, loc=self.loc)
self.store(value=slicegv, name=slicevar)
index = ir.Expr.call(self.get(slicevar), (start, stop), (),
loc=self.loc)
self.store(value=index, name=indexvar)
expr = ir.Expr.getitem(base, self.get(indexvar), loc=self.loc)
self.store(value=expr, name=res)
def op_STORE_SLICE_0(self, inst, base, value, slicevar, indexvar, nonevar):
base = self.get(base)
slicegv = ir.Global("slice", slice, loc=self.loc)
self.store(value=slicegv, name=slicevar)
nonegv = ir.Const(None, loc=self.loc)
self.store(value=nonegv, name=nonevar)
none = self.get(nonevar)
index = ir.Expr.call(self.get(slicevar), (none, none), (), loc=self.loc)
self.store(value=index, name=indexvar)
stmt = ir.SetItem(base, self.get(indexvar), self.get(value),
loc=self.loc)
self.current_block.append(stmt)
def op_STORE_SLICE_1(self, inst, base, start, nonevar, value, slicevar,
indexvar):
base = self.get(base)
start = self.get(start)
nonegv = ir.Const(None, loc=self.loc)
self.store(value=nonegv, name=nonevar)
none = self.get(nonevar)
slicegv = ir.Global("slice", slice, loc=self.loc)
self.store(value=slicegv, name=slicevar)
index = ir.Expr.call(self.get(slicevar), (start, none), (),
loc=self.loc)
self.store(value=index, name=indexvar)
stmt = ir.SetItem(base, self.get(indexvar), self.get(value),
loc=self.loc)
self.current_block.append(stmt)
def op_STORE_SLICE_2(self, inst, base, nonevar, stop, value, slicevar,
indexvar):
base = self.get(base)
stop = self.get(stop)
nonegv = ir.Const(None, loc=self.loc)
self.store(value=nonegv, name=nonevar)
none = self.get(nonevar)
slicegv = ir.Global("slice", slice, loc=self.loc)
self.store(value=slicegv, name=slicevar)
index = ir.Expr.call(self.get(slicevar), (none, stop,), (),
loc=self.loc)
self.store(value=index, name=indexvar)
stmt = ir.SetItem(base, self.get(indexvar), self.get(value),
loc=self.loc)
self.current_block.append(stmt)
def op_STORE_SLICE_3(self, inst, base, start, stop, value, slicevar,
indexvar):
base = self.get(base)
start = self.get(start)
stop = self.get(stop)
slicegv = ir.Global("slice", slice, loc=self.loc)
self.store(value=slicegv, name=slicevar)
index = ir.Expr.call(self.get(slicevar), (start, stop), (),
loc=self.loc)
self.store(value=index, name=indexvar)
stmt = ir.SetItem(base, self.get(indexvar), self.get(value),
loc=self.loc)
self.current_block.append(stmt)
def op_DELETE_SLICE_0(self, inst, base, slicevar, indexvar, nonevar):
base = self.get(base)
slicegv = ir.Global("slice", slice, loc=self.loc)
self.store(value=slicegv, name=slicevar)
nonegv = ir.Const(None, loc=self.loc)
self.store(value=nonegv, name=nonevar)
none = self.get(nonevar)
index = ir.Expr.call(self.get(slicevar), (none, none), (), loc=self.loc)
self.store(value=index, name=indexvar)
stmt = ir.DelItem(base, self.get(indexvar), loc=self.loc)
self.current_block.append(stmt)
def op_DELETE_SLICE_1(self, inst, base, start, nonevar, slicevar, indexvar):
base = self.get(base)
start = self.get(start)
nonegv = ir.Const(None, loc=self.loc)
self.store(value=nonegv, name=nonevar)
none = self.get(nonevar)
slicegv = ir.Global("slice", slice, loc=self.loc)
self.store(value=slicegv, name=slicevar)
index = ir.Expr.call(self.get(slicevar), (start, none), (),
loc=self.loc)
self.store(value=index, name=indexvar)
stmt = ir.DelItem(base, self.get(indexvar), loc=self.loc)
self.current_block.append(stmt)
def op_DELETE_SLICE_2(self, inst, base, nonevar, stop, slicevar, indexvar):
base = self.get(base)
stop = self.get(stop)
nonegv = ir.Const(None, loc=self.loc)
self.store(value=nonegv, name=nonevar)
none = self.get(nonevar)
slicegv = ir.Global("slice", slice, loc=self.loc)
self.store(value=slicegv, name=slicevar)
index = ir.Expr.call(self.get(slicevar), (none, stop,), (),
loc=self.loc)
self.store(value=index, name=indexvar)
stmt = ir.DelItem(base, self.get(indexvar), loc=self.loc)
self.current_block.append(stmt)
def op_DELETE_SLICE_3(self, inst, base, start, stop, slicevar, indexvar):
base = self.get(base)
start = self.get(start)
stop = self.get(stop)
slicegv = ir.Global("slice", slice, loc=self.loc)
self.store(value=slicegv, name=slicevar)
index = ir.Expr.call(self.get(slicevar), (start, stop), (),
loc=self.loc)
self.store(value=index, name=indexvar)
stmt = ir.DelItem(base, self.get(indexvar), loc=self.loc)
self.current_block.append(stmt)
def _op_LOAD_FAST(self, inst, res):
srcname = self.code_locals[inst.arg]
self.store(value=self.get(srcname), name=res)
if PYVERSION in ((3, 13), (3, 14)):
def op_LOAD_FAST(self, inst, res, as_load_deref=False):
if as_load_deref:
self.op_LOAD_DEREF(inst, res)
else:
self._op_LOAD_FAST(inst, res)
else:
op_LOAD_FAST = _op_LOAD_FAST
if PYVERSION in ((3, 13), (3, 14)):
def op_LOAD_FAST_LOAD_FAST(self, inst, res1, res2):
oparg = inst.arg
oparg1 = oparg >> 4
oparg2 = oparg & 15
src1 = self.get(self.code_locals[oparg1])
src2 = self.get(self.code_locals[oparg2])
self.store(value=src1, name=res1)
self.store(value=src2, name=res2)
def op_STORE_FAST_LOAD_FAST(self, inst, store_value, load_res):
oparg = inst.arg
oparg1 = oparg >> 4
oparg2 = oparg & 15
dstname = self.code_locals[oparg1]
dst_value = self.get(store_value)
self.store(value=dst_value, name=dstname)
src_value = self.get(self.code_locals[oparg2])
self.store(value=src_value, name=load_res)
def op_STORE_FAST_STORE_FAST(self, inst, value1, value2):
oparg = inst.arg
oparg1 = oparg >> 4
oparg2 = oparg & 15
dstname = self.code_locals[oparg1]
self.store(value=self.get(value1), name=dstname)
dstname = self.code_locals[oparg2]
self.store(value=self.get(value2), name=dstname)
elif PYVERSION in ((3, 10), (3, 11), (3, 12)):
pass
else:
raise NotImplementedError(PYVERSION)
if PYVERSION in ((3, 12), (3, 13), (3, 14)):
op_LOAD_FAST_CHECK = op_LOAD_FAST
def op_LOAD_FAST_AND_CLEAR(self, inst, res):
try:
# try the regular LOAD_FAST logic
srcname = self.code_locals[inst.arg]
self.store(value=self.get(srcname), name=res)
except NotDefinedError:
# If the variable is not in the scope, set it to `undef`
undef = ir.Expr.undef(loc=self.loc)
self.store(undef, name=res)
elif PYVERSION in ((3, 10), (3, 11)):
pass
else:
raise NotImplementedError(PYVERSION)
if PYVERSION in ((3, 14),):
# New in 3.14.
op_LOAD_FAST_BORROW = op_LOAD_FAST
op_LOAD_FAST_BORROW_LOAD_FAST_BORROW = op_LOAD_FAST_LOAD_FAST
elif PYVERSION in ((3, 10), (3, 11), (3, 12), (3, 13)):
pass
else:
raise NotImplementedError(PYVERSION)
def op_STORE_FAST(self, inst, value):
dstname = self.code_locals[inst.arg]
value = self.get(value)
self.store(value=value, name=dstname)
def op_DELETE_FAST(self, inst):
dstname = self.code_locals[inst.arg]
self.current_block.append(ir.Del(dstname, loc=self.loc))
def op_DUP_TOPX(self, inst, orig, duped):
for src, dst in zip(orig, duped):
self.store(value=self.get(src), name=dst)
op_DUP_TOP = op_DUP_TOPX
op_DUP_TOP_TWO = op_DUP_TOPX
def op_STORE_ATTR(self, inst, target, value):
attr = self.code_names[inst.arg]
sa = ir.SetAttr(target=self.get(target), value=self.get(value),
attr=attr, loc=self.loc)
self.current_block.append(sa)
def op_DELETE_ATTR(self, inst, target):
attr = self.code_names[inst.arg]
sa = ir.DelAttr(target=self.get(target), attr=attr, loc=self.loc)
self.current_block.append(sa)
def op_LOAD_ATTR(self, inst, item, res):
item = self.get(item)
if PYVERSION in ((3, 12), (3, 13), (3, 14)):
attr = self.code_names[inst.arg >> 1]
elif PYVERSION in ((3, 10), (3, 11)):
attr = self.code_names[inst.arg]
else:
raise NotImplementedError(PYVERSION)
getattr = ir.Expr.getattr(item, attr, loc=self.loc)
self.store(getattr, res)
def op_LOAD_CONST(self, inst, res):
# New in 3.14: slice is loaded via LOAD_CONST. The value is the
# slice object itself, so we get the start, stop and step from it
# directly and then proceed with the code in `BUILD_SLICE`. It may also
# be a tuple containing a slice, so we need to account for that too.
def process_slice(value):
start = self.store(ir.Const(value.start, loc=self.loc),
name=f'$const_{value.start}', redefine=True)
stop = self.store(ir.Const(value.stop, loc=self.loc),
name=f'$const_{value.stop}', redefine=True)
slicevar = self.store(value=ir.Global("slice", slice,
loc=self.loc),
name='$const_slice', redefine=True)
if value.step is None:
params = (start, stop)
else:
step = self.store(ir.Const(value.step, loc=self.loc),
name=f'$const_{value.step}', redefine=True)
params = (start, stop, step)
return ir.Expr.call(slicevar, params, (), loc=self.loc)
def process_args(value):
st = []
for x in value:
if isinstance(x, slice):
st.append(self.store(process_slice(x),
name='$const_my_slice',
redefine=True))
else:
st.append(self.store(ir.Const(x, loc=self.loc),
name=f'$const_{x}',
redefine=True))
return st
value = self.code_consts[inst.arg]
if isinstance(value, tuple):
const = ir.Expr.build_tuple(process_args(value), loc=self.loc)
elif isinstance(value, frozenset):
const = ir.Expr.build_set(process_args(value), loc=self.loc)
elif isinstance(value, slice):
const = process_slice(value)
else:
const = ir.Const(value, loc=self.loc)
self.store(const, res)
if PYVERSION in ((3, 14), ):
# New in 3.14
def op_LOAD_SMALL_INT(self, inst, res):
value = inst.arg
const = ir.Const(value, loc=self.loc)
self.store(const, res)
elif PYVERSION in ((3, 10), (3, 11), (3, 12), (3, 13)):
pass
else:
raise NotImplementedError(PYVERSION)
if PYVERSION in ((3, 11), (3, 12), (3, 13), (3, 14)):
def op_LOAD_GLOBAL(self, inst, idx, res):
name = self.code_names[idx]
value = self.get_global_value(name)
gl = ir.Global(name, value, loc=self.loc)
self.store(gl, res)
elif PYVERSION in ((3, 10),):
def op_LOAD_GLOBAL(self, inst, res):
name = self.code_names[inst.arg]
value = self.get_global_value(name)
gl = ir.Global(name, value, loc=self.loc)
self.store(gl, res)
else:
raise NotImplementedError(PYVERSION)
def op_COPY_FREE_VARS(self, inst):
pass
if PYVERSION in ((3, 11), (3, 12), (3, 13), (3, 14)):
def op_LOAD_DEREF(self, inst, res):
name = self.func_id.func.__code__._varname_from_oparg(inst.arg)
if name in self.code_cellvars:
try:
gl = self.get(name)
except NotDefinedError:
msg = "Unsupported use of cell variable encountered"
raise NotImplementedError(msg)
elif name in self.code_freevars:
idx = self.code_freevars.index(name)
value = self.get_closure_value(idx)
gl = ir.FreeVar(idx, name, value, loc=self.loc)
self.store(gl, res)
elif PYVERSION in ((3, 10),):
def op_LOAD_DEREF(self, inst, res):
n_cellvars = len(self.code_cellvars)
if inst.arg < n_cellvars:
name = self.code_cellvars[inst.arg]
gl = self.get(name)
else:
idx = inst.arg - n_cellvars
name = self.code_freevars[idx]
value = self.get_closure_value(idx)
gl = ir.FreeVar(idx, name, value, loc=self.loc)
self.store(gl, res)
else:
raise NotImplementedError(PYVERSION)
if PYVERSION in ((3, 11), (3, 12), (3, 13), (3, 14)):
def op_MAKE_CELL(self, inst):
pass # ignored bytecode
if PYVERSION in ((3, 11), (3, 12), (3, 13), (3, 14)):
def op_STORE_DEREF(self, inst, value):
name = self.func_id.func.__code__._varname_from_oparg(inst.arg)
value = self.get(value)
self.store(value=value, name=name)
elif PYVERSION in ((3, 10),):
def op_STORE_DEREF(self, inst, value):
n_cellvars = len(self.code_cellvars)
if inst.arg < n_cellvars:
dstname = self.code_cellvars[inst.arg]
else:
dstname = self.code_freevars[inst.arg - n_cellvars]
value = self.get(value)
self.store(value=value, name=dstname)
else:
raise NotImplementedError(PYVERSION)
def op_SETUP_LOOP(self, inst):
assert self.blocks[inst.offset] is self.current_block
loop = ir.Loop(inst.offset, exit=(inst.next + inst.arg))
self.syntax_blocks.append(loop)
def op_SETUP_WITH(self, inst, contextmanager, exitfn=None):
assert self.blocks[inst.offset] is self.current_block
# Handle with
exitpt = inst.next + inst.arg
wth = ir.With(inst.offset, exit=exitpt)
self.syntax_blocks.append(wth)
ctxmgr = self.get(contextmanager)
self.current_block.append(ir.EnterWith(contextmanager=ctxmgr,
begin=inst.offset,
end=exitpt, loc=self.loc,))
# Store exit fn
exit_fn_obj = ir.Const(None, loc=self.loc)
self.store(value=exit_fn_obj, name=exitfn)
if PYVERSION in ((3, 14), ):
# Replaced by LOAD_SPECIAL in 3.14.
pass
elif PYVERSION in ((3, 10), (3, 11), (3, 12), (3, 13)):
def op_BEFORE_WITH(self, inst, contextmanager, exitfn, end):
assert self.blocks[inst.offset] is self.current_block
if PYVERSION in ((3, 12), (3, 13)):
# Python 3.12 hack for handling nested with blocks
if end > self.last_active_offset:
# Use exception entries to figure out end of syntax block
end = max([ex.end for ex in self.active_exception_entries
if ex.target == end])
elif PYVERSION in ((3, 10), (3, 11)):
pass
else:
raise NotImplementedError(PYVERSION)
# Handle with
wth = ir.With(inst.offset, exit=end)
self.syntax_blocks.append(wth)
ctxmgr = self.get(contextmanager)
self.current_block.append(ir.EnterWith(contextmanager=ctxmgr,
begin=inst.offset,
end=end, loc=self.loc,))
# Store exit function
exit_fn_obj = ir.Const(None, loc=self.loc)
self.store(value=exit_fn_obj, name=exitfn)
else:
raise NotImplementedError(PYVERSION)
def op_SETUP_FINALLY(self, inst):
# Removed since python3.11
self._insert_try_block_begin()
def op_WITH_CLEANUP(self, inst):
"no-op"
def op_WITH_CLEANUP_START(self, inst):
"no-op"
def op_WITH_CLEANUP_FINISH(self, inst):
"no-op"
def op_END_FINALLY(self, inst):
"no-op"
def op_BEGIN_FINALLY(self, inst, temps):
# The *temps* are the exception variables
const_none = ir.Const(None, loc=self.loc)
for tmp in temps:
# Set to None for now
self.store(const_none, name=tmp)
self._exception_vars.add(tmp)
def op_CALL(self, inst, func, args, kw_names, res):
func = self.get(func)
args = [self.get(x) for x in args]
if kw_names is not None:
assert PYVERSION < (3, 13)
names = self.code_consts[kw_names]
kwargs = list(zip(names, args[-len(names):]))
args = args[:-len(names)]
else:
kwargs = ()
expr = ir.Expr.call(func, args, kwargs, loc=self.loc)
self.store(expr, res)
if PYVERSION in ((3, 13), (3, 14)):
def op_CALL_KW(self, inst, func, args, kw_names, res):
func = self.get(func)
args = [self.get(x) for x in args]
consti = int(kw_names.rsplit('.', 2)[-1])
names = self.code_consts[consti]
kwargs = list(zip(names, args[-len(names):]))
args = args[:-len(names)]
expr = ir.Expr.call(func, args, kwargs, loc=self.loc)
self.store(expr, res)
else:
assert PYVERSION < (3, 13)
def op_CALL_FUNCTION(self, inst, func, args, res):
func = self.get(func)
args = [self.get(x) for x in args]
expr = ir.Expr.call(func, args, (), loc=self.loc)
self.store(expr, res)
def op_CALL_FUNCTION_KW(self, inst, func, args, names, res):
func = self.get(func)
args = [self.get(x) for x in args]
# Find names const
names = self.get(names)
for inst in self.current_block.body:
if isinstance(inst, ir.Assign) and inst.target is names:
self.current_block.remove(inst)
# scan up the block looking for the values, remove them
# and find their name strings
named_items = []
for x in inst.value.items:
for y in self.current_block.body[::-1]:
if x == y.target:
self.current_block.remove(y)
named_items.append(y.value.value)
break
keys = named_items
break
nkeys = len(keys)
posvals = args[:-nkeys]
kwvals = args[-nkeys:]
keyvalues = list(zip(keys, kwvals))
expr = ir.Expr.call(func, posvals, keyvalues, loc=self.loc)
self.store(expr, res)
def op_CALL_FUNCTION_EX(self, inst, func, vararg, varkwarg, res):
func = self.get(func)
vararg = self.get(vararg)
if varkwarg is not None:
varkwarg = self.get(varkwarg)
expr = ir.Expr.call(
func, [], [], loc=self.loc, vararg=vararg, varkwarg=varkwarg
)
self.store(expr, res)
def _build_tuple_unpack(self, inst, tuples, temps, is_assign):
first = self.get(tuples[0])
if is_assign:
# it's assign-like, defer handling to an intrinsic that will have
# type information.
# Can deal with tuples only, i.e. y = (*x,). where x = <tuple>
gv_name = "unpack_single_tuple"
gv_fn = ir.Global(gv_name, unpack_single_tuple, loc=self.loc,)
self.store(value=gv_fn, name=gv_name, redefine=True)
exc = ir.Expr.call(self.get(gv_name), args=(first,), kws=(),
loc=self.loc,)
self.store(exc, temps[0])
else:
loc = self.loc
for other, tmp in zip(map(self.get, tuples[1:]), temps):
# Emit as `first + tuple(other)`
gv_tuple = ir.Global(
name="tuple", value=tuple,
loc=loc,
)
tuple_var = self.store(
gv_tuple, "$_list_extend_gv_tuple", redefine=True,
)
tuplify_val = ir.Expr.call(
tuple_var, (other,), (),
loc=loc,
)
tuplify_var = self.store(tuplify_val, "$_tuplify",
redefine=True)
out = ir.Expr.binop(
fn=operator.add, lhs=first, rhs=self.get(tuplify_var.name),
loc=self.loc,
)
self.store(out, tmp)
first = self.get(tmp)
def op_BUILD_TUPLE_UNPACK_WITH_CALL(self, inst, tuples, temps, is_assign):
# just unpack the input tuple, call inst will be handled afterwards
self._build_tuple_unpack(inst, tuples, temps, is_assign)
def op_BUILD_TUPLE_UNPACK(self, inst, tuples, temps, is_assign):
self._build_tuple_unpack(inst, tuples, temps, is_assign)
def op_LIST_TO_TUPLE(self, inst, const_list, res):
expr = ir.Expr.dummy('list_to_tuple', (const_list,), loc=self.loc)
self.store(expr, res)
def op_BUILD_CONST_KEY_MAP(self, inst, keys, keytmps, values, res):
# Unpack the constant key-tuple and reused build_map which takes
# a sequence of (key, value) pair.
keyvar = self.get(keys)
# TODO: refactor this pattern. occurred several times.
for inst in self.current_block.body:
if isinstance(inst, ir.Assign) and inst.target is keyvar:
self.current_block.remove(inst)
# scan up the block looking for the values, remove them
# and find their name strings
named_items = []
for x in inst.value.items:
for y in self.current_block.body[::-1]:
if x == y.target:
self.current_block.remove(y)
named_items.append(y.value.value)
break
keytup = named_items
break
assert len(keytup) == len(values)
keyconsts = [ir.Const(value=x, loc=self.loc) for x in keytup]
for kval, tmp in zip(keyconsts, keytmps):
self.store(kval, tmp)
items = list(zip(map(self.get, keytmps), map(self.get, values)))
# sort out literal values
literal_items = []
for v in values:
defns = self.definitions[v]
if len(defns) != 1:
break
defn = defns[0]
if not isinstance(defn, ir.Const):
break
literal_items.append(defn.value)
def resolve_const(v):
defns = self.definitions[v]
if len(defns) != 1:
return _UNKNOWN_VALUE(self.get(v).name)
defn = defns[0]
if not isinstance(defn, ir.Const):
return _UNKNOWN_VALUE(self.get(v).name)
return defn.value
if len(literal_items) != len(values):
literal_dict = {x: resolve_const(y) for x, y in
zip(keytup, values)}
else:
literal_dict = {x:y for x, y in zip(keytup, literal_items)}
# to deal with things like {'a': 1, 'a': 'cat', 'b': 2, 'a': 2j}
# store the index of the actual used value for a given key, this is
# used when lowering to pull the right value out into the tuple repr
# of a mixed value type dictionary.
value_indexes = {}
for i, k in enumerate(keytup):
value_indexes[k] = i
expr = ir.Expr.build_map(items=items,
size=2,
literal_value=literal_dict,
value_indexes=value_indexes,
loc=self.loc)
self.store(expr, res)
def op_GET_ITER(self, inst, value, res):
expr = ir.Expr.getiter(value=self.get(value), loc=self.loc)
self.store(expr, res)
def op_FOR_ITER(self, inst, iterator, pair, indval, pred):
"""
Assign new block other this instruction.
"""
assert inst.offset in self.blocks, "FOR_ITER must be block head"
# Emit code
val = self.get(iterator)
pairval = ir.Expr.iternext(value=val, loc=self.loc)
self.store(pairval, pair)
iternext = ir.Expr.pair_first(value=self.get(pair), loc=self.loc)
self.store(iternext, indval)
isvalid = ir.Expr.pair_second(value=self.get(pair), loc=self.loc)
self.store(isvalid, pred)
# Conditional jump
br = ir.Branch(cond=self.get(pred), truebr=inst.next,
falsebr=inst.get_jump_target(),
loc=self.loc)
self.current_block.append(br)
if PYVERSION in ((3, 14),):
# Removed in 3.14 -- replaced with BINARY_OP and []
pass
elif PYVERSION in ((3, 10), (3, 11), (3, 12), (3, 13)):
def op_BINARY_SUBSCR(self, inst, target, index, res):
index = self.get(index)
target = self.get(target)
expr = ir.Expr.getitem(target, index=index, loc=self.loc)
self.store(expr, res)
else:
raise NotImplementedError(PYVERSION)
def op_STORE_SUBSCR(self, inst, target, index, value):
index = self.get(index)
target = self.get(target)
value = self.get(value)
stmt = ir.SetItem(target=target, index=index, value=value,
loc=self.loc)
self.current_block.append(stmt)
def op_DELETE_SUBSCR(self, inst, target, index):
index = self.get(index)
target = self.get(target)
stmt = ir.DelItem(target=target, index=index, loc=self.loc)
self.current_block.append(stmt)
def op_BUILD_TUPLE(self, inst, items, res):
expr = ir.Expr.build_tuple(items=[self.get(x) for x in items],
loc=self.loc)
self.store(expr, res)
def op_BUILD_LIST(self, inst, items, res):
expr = ir.Expr.build_list(items=[self.get(x) for x in items],
loc=self.loc)
self.store(expr, res)
def op_BUILD_SET(self, inst, items, res):
expr = ir.Expr.build_set(items=[self.get(x) for x in items],
loc=self.loc)
self.store(expr, res)
def op_SET_ADD(self, inst, value, target, addvar, res):
value = self.get(value)
target = self.get(target)
addattr = ir.Expr.getattr(target, 'add', loc=self.loc)
self.store(value=addattr, name=addvar)
addinst = ir.Expr.call(self.get(addvar), (value, ), (), loc=self.loc)
self.store(value=addinst, name=res)
def op_SET_UPDATE(self, inst, target, value, updatevar, res):
target = self.get(target)
value = self.get(value)
updateattr = ir.Expr.getattr(target, 'update', loc=self.loc)
self.store(value=updateattr, name=updatevar)
updateinst = ir.Expr.call(self.get(updatevar), (value,), (),
loc=self.loc)
self.store(value=updateinst, name=res)
def op_DICT_UPDATE(self, inst, target, value, updatevar, res):
target = self.get(target)
value = self.get(value)
# We generate _update_from_bytecode instead of update so we can
# differentiate between user .update() calls and those from the
# bytecode. This is then used to recombine dictionaries in peephole
# optimizations. See the dicussion in this PR about why:
# https://github.com/numba/numba/pull/7964/files#r868229306
updateattr = ir.Expr.getattr(
target, '_update_from_bytecode', loc=self.loc
)
self.store(value=updateattr, name=updatevar)
updateinst = ir.Expr.call(self.get(updatevar), (value,), (),
loc=self.loc)
self.store(value=updateinst, name=res)
def op_BUILD_MAP(self, inst, items, size, res):
got_items = [(self.get(k), self.get(v)) for k, v in items]
# sort out literal values, this is a bit contrived but is to handle
# situations like `{1: 10, 1: 10}` where the size of the literal dict
# is smaller than the definition
def get_literals(target):
literal_items = []
values = [self.get(v.name) for v in target]
for v in values:
defns = self.definitions[v.name]
if len(defns) != 1:
break
defn = defns[0]
if not isinstance(defn, ir.Const):
break
literal_items.append(defn.value)
return literal_items
literal_keys = get_literals(x[0] for x in got_items)
literal_values = get_literals(x[1] for x in got_items)
has_literal_keys = len(literal_keys) == len(got_items)
has_literal_values = len(literal_values) == len(got_items)
value_indexes = {}
if not has_literal_keys and not has_literal_values:
literal_dict = None
elif has_literal_keys and not has_literal_values:
literal_dict = {x: _UNKNOWN_VALUE(y[1]) for x, y in
zip(literal_keys, got_items)}
for i, k in enumerate(literal_keys):
value_indexes[k] = i
else:
literal_dict = {x: y for x, y in zip(literal_keys, literal_values)}
for i, k in enumerate(literal_keys):
value_indexes[k] = i
expr = ir.Expr.build_map(items=got_items, size=size,
literal_value=literal_dict,
value_indexes=value_indexes,
loc=self.loc)
self.store(expr, res)
def op_STORE_MAP(self, inst, dct, key, value):
stmt = ir.StoreMap(dct=self.get(dct), key=self.get(key),
value=self.get(value), loc=self.loc)
self.current_block.append(stmt)
def op_UNARY_NEGATIVE(self, inst, value, res):
value = self.get(value)
expr = ir.Expr.unary('-', value=value, loc=self.loc)
return self.store(expr, res)
def op_UNARY_POSITIVE(self, inst, value, res):
value = self.get(value)
expr = ir.Expr.unary('+', value=value, loc=self.loc)
return self.store(expr, res)
def op_UNARY_INVERT(self, inst, value, res):
value = self.get(value)
expr = ir.Expr.unary('~', value=value, loc=self.loc)
return self.store(expr, res)
def op_UNARY_NOT(self, inst, value, res):
value = self.get(value)
expr = ir.Expr.unary('not', value=value, loc=self.loc)
return self.store(expr, res)
def _binop(self, op, lhs, rhs, res):
op = BINOPS_TO_OPERATORS[op]
lhs = self.get(lhs)
rhs = self.get(rhs)
expr = ir.Expr.binop(op, lhs=lhs, rhs=rhs, loc=self.loc)
self.store(expr, res)
def _inplace_binop(self, op, lhs, rhs, res):
immuop = BINOPS_TO_OPERATORS[op]
op = INPLACE_BINOPS_TO_OPERATORS[op + '=']
lhs = self.get(lhs)
rhs = self.get(rhs)
expr = ir.Expr.inplace_binop(op, immuop, lhs=lhs, rhs=rhs,
loc=self.loc)
self.store(expr, res)
def op_BINARY_OP(self, inst, op, lhs, rhs, res):
if op == "[]":
# Special case 3.14 -- body of BINARY_SUBSCR now here
lhs = self.get(lhs)
rhs = self.get(rhs)
expr = ir.Expr.getitem(lhs, index=rhs, loc=self.loc)
self.store(expr, res)
elif "=" in op:
self._inplace_binop(op[:-1], lhs, rhs, res)
else:
self._binop(op, lhs, rhs, res)
def op_BINARY_ADD(self, inst, lhs, rhs, res):
self._binop('+', lhs, rhs, res)
def op_BINARY_SUBTRACT(self, inst, lhs, rhs, res):
self._binop('-', lhs, rhs, res)
def op_BINARY_MULTIPLY(self, inst, lhs, rhs, res):
self._binop('*', lhs, rhs, res)
def op_BINARY_DIVIDE(self, inst, lhs, rhs, res):
self._binop('/?', lhs, rhs, res)
def op_BINARY_TRUE_DIVIDE(self, inst, lhs, rhs, res):
self._binop('/', lhs, rhs, res)
def op_BINARY_FLOOR_DIVIDE(self, inst, lhs, rhs, res):
self._binop('//', lhs, rhs, res)
def op_BINARY_MODULO(self, inst, lhs, rhs, res):
self._binop('%', lhs, rhs, res)
def op_BINARY_POWER(self, inst, lhs, rhs, res):
self._binop('**', lhs, rhs, res)
def op_BINARY_MATRIX_MULTIPLY(self, inst, lhs, rhs, res):
self._binop('@', lhs, rhs, res)
def op_BINARY_LSHIFT(self, inst, lhs, rhs, res):
self._binop('<<', lhs, rhs, res)
def op_BINARY_RSHIFT(self, inst, lhs, rhs, res):
self._binop('>>', lhs, rhs, res)
def op_BINARY_AND(self, inst, lhs, rhs, res):
self._binop('&', lhs, rhs, res)
def op_BINARY_OR(self, inst, lhs, rhs, res):
self._binop('|', lhs, rhs, res)
def op_BINARY_XOR(self, inst, lhs, rhs, res):
self._binop('^', lhs, rhs, res)
def op_INPLACE_ADD(self, inst, lhs, rhs, res):
self._inplace_binop('+', lhs, rhs, res)
def op_INPLACE_SUBTRACT(self, inst, lhs, rhs, res):
self._inplace_binop('-', lhs, rhs, res)
def op_INPLACE_MULTIPLY(self, inst, lhs, rhs, res):
self._inplace_binop('*', lhs, rhs, res)
def op_INPLACE_DIVIDE(self, inst, lhs, rhs, res):
self._inplace_binop('/?', lhs, rhs, res)
def op_INPLACE_TRUE_DIVIDE(self, inst, lhs, rhs, res):
self._inplace_binop('/', lhs, rhs, res)
def op_INPLACE_FLOOR_DIVIDE(self, inst, lhs, rhs, res):
self._inplace_binop('//', lhs, rhs, res)
def op_INPLACE_MODULO(self, inst, lhs, rhs, res):
self._inplace_binop('%', lhs, rhs, res)
def op_INPLACE_POWER(self, inst, lhs, rhs, res):
self._inplace_binop('**', lhs, rhs, res)
def op_INPLACE_MATRIX_MULTIPLY(self, inst, lhs, rhs, res):
self._inplace_binop('@', lhs, rhs, res)
def op_INPLACE_LSHIFT(self, inst, lhs, rhs, res):
self._inplace_binop('<<', lhs, rhs, res)
def op_INPLACE_RSHIFT(self, inst, lhs, rhs, res):
self._inplace_binop('>>', lhs, rhs, res)
def op_INPLACE_AND(self, inst, lhs, rhs, res):
self._inplace_binop('&', lhs, rhs, res)
def op_INPLACE_OR(self, inst, lhs, rhs, res):
self._inplace_binop('|', lhs, rhs, res)
def op_INPLACE_XOR(self, inst, lhs, rhs, res):
self._inplace_binop('^', lhs, rhs, res)
def op_JUMP_ABSOLUTE(self, inst):
jmp = ir.Jump(inst.get_jump_target(), loc=self.loc)
self.current_block.append(jmp)
def op_JUMP_FORWARD(self, inst):
jmp = ir.Jump(inst.get_jump_target(), loc=self.loc)
self.current_block.append(jmp)
def op_JUMP_BACKWARD(self, inst):
jmp = ir.Jump(inst.get_jump_target(), loc=self.loc)
self.current_block.append(jmp)
op_JUMP_BACKWARD_NO_INTERRUPT = op_JUMP_BACKWARD
def op_POP_BLOCK(self, inst, kind=None):
if kind is None:
self.syntax_blocks.pop()
elif kind == 'with':
d = ir.PopBlock(loc=self.loc)
self.current_block.append(d)
elif kind == 'try':
self._insert_try_block_end()
def op_RETURN_VALUE(self, inst, retval, castval):
self.store(ir.Expr.cast(self.get(retval), loc=self.loc), castval)
ret = ir.Return(self.get(castval), loc=self.loc)
self.current_block.append(ret)
if PYVERSION in ((3, 12), (3, 13), (3, 14)):
def op_RETURN_CONST(self, inst, retval, castval):
value = self.code_consts[inst.arg]
const = ir.Const(value, loc=self.loc)
self.store(const, retval)
self.store(ir.Expr.cast(self.get(retval), loc=self.loc), castval)
ret = ir.Return(self.get(castval), loc=self.loc)
self.current_block.append(ret)
elif PYVERSION in ((3, 10), (3, 11)):
pass
else:
raise NotImplementedError(PYVERSION)
if PYVERSION in ((3, 13), (3, 14)):
def op_TO_BOOL(self, inst, val, res):
self.store(self.get(val), res) # TODO: just a lazy hack
elif PYVERSION in ((3, 10), (3, 11), (3, 12)):
pass
else:
raise NotImplementedError(PYVERSION)
def op_COMPARE_OP(self, inst, lhs, rhs, res):
if PYVERSION in ((3, 13), (3, 14)):
op = dis.cmp_op[inst.arg >> 5]
# TODO: fifth lowest bit now indicates a forced version to bool.
elif PYVERSION in ((3, 12),):
op = dis.cmp_op[inst.arg >> 4]
elif PYVERSION in ((3, 10), (3, 11)):
op = dis.cmp_op[inst.arg]
else:
raise NotImplementedError(PYVERSION)
if op == 'in' or op == 'not in':
lhs, rhs = rhs, lhs
if op == 'not in':
self._binop('in', lhs, rhs, res)
tmp = self.get(res)
out = ir.Expr.unary('not', value=tmp, loc=self.loc)
self.store(out, res)
elif op == 'exception match':
gv_fn = ir.Global(
"exception_match", eh.exception_match, loc=self.loc,
)
exc_match_name = '$exc_match'
self.store(value=gv_fn, name=exc_match_name, redefine=True)
lhs = self.get(lhs)
rhs = self.get(rhs)
exc = ir.Expr.call(
self.get(exc_match_name), args=(lhs, rhs), kws=(), loc=self.loc,
)
self.store(exc, res)
else:
self._binop(op, lhs, rhs, res)
def op_IS_OP(self, inst, lhs, rhs, res):
# invert if op case is 1
op = 'is not' if inst.arg == 1 else 'is'
self._binop(op, lhs, rhs, res)
def op_CONTAINS_OP(self, inst, lhs, rhs, res):
lhs, rhs = rhs, lhs
self._binop('in', lhs, rhs, res)
# invert if op case is 1
if inst.arg == 1:
tmp = self.get(res)
out = ir.Expr.unary('not', value=tmp, loc=self.loc)
self.store(out, res)
def op_BREAK_LOOP(self, inst, end=None):
if end is None:
loop = self.syntax_blocks[-1]
assert isinstance(loop, ir.Loop)
end = loop.exit
jmp = ir.Jump(target=end, loc=self.loc)
self.current_block.append(jmp)
def _op_JUMP_IF(self, inst, pred, iftrue):
brs = {
True: inst.get_jump_target(),
False: inst.next,
}
truebr = brs[iftrue]
falsebr = brs[not iftrue]
name = "$bool%s" % (inst.offset)
gv_fn = ir.Global("bool", bool, loc=self.loc)
self.store(value=gv_fn, name=name)
callres = ir.Expr.call(self.get(name), (self.get(pred),), (),
loc=self.loc)
pname = "$%spred" % (inst.offset)
predicate = self.store(value=callres, name=pname)
bra = ir.Branch(cond=predicate, truebr=truebr, falsebr=falsebr,
loc=self.loc)
self.current_block.append(bra)
def op_JUMP_IF_FALSE(self, inst, pred):
self._op_JUMP_IF(inst, pred=pred, iftrue=False)
def op_JUMP_IF_TRUE(self, inst, pred):
self._op_JUMP_IF(inst, pred=pred, iftrue=True)
def _jump_if_none(self, inst, pred, iftrue):
# branch pruning assumes true falls through and false is jump
truebr = inst.next
falsebr = inst.get_jump_target()
# this seems strange
if not iftrue:
op = BINOPS_TO_OPERATORS["is"]
else:
op = BINOPS_TO_OPERATORS["is not"]
rhs = self.store(value=ir.Const(None, loc=self.loc),
name=f"$constNone{inst.offset}")
lhs = self.get(pred)
isnone = ir.Expr.binop(op, lhs=lhs, rhs=rhs, loc=self.loc)
maybeNone = f"$maybeNone{inst.offset}"
self.store(value=isnone, name=maybeNone)
name = f"$bool{inst.offset}"
gv_fn = ir.Global("bool", bool, loc=self.loc)
self.store(value=gv_fn, name=name)
callres = ir.Expr.call(self.get(name), (self.get(maybeNone),), (),
loc=self.loc)
pname = f"$pred{inst.offset}"
predicate = self.store(value=callres, name=pname)
branch = ir.Branch(cond=predicate,
truebr=truebr,
falsebr=falsebr,
loc=self.loc)
self.current_block.append(branch)
def op_POP_JUMP_FORWARD_IF_NONE(self, inst, pred):
self._jump_if_none(inst, pred, True)
def op_POP_JUMP_FORWARD_IF_NOT_NONE(self, inst, pred):
self._jump_if_none(inst, pred, False)
if PYVERSION in ((3, 12), (3, 13), (3, 14)):
def op_POP_JUMP_IF_NONE(self, inst, pred):
self._jump_if_none(inst, pred, True)
def op_POP_JUMP_IF_NOT_NONE(self, inst, pred):
self._jump_if_none(inst, pred, False)
elif PYVERSION in ((3, 10), (3, 11)):
pass
else:
raise NotImplementedError(PYVERSION)
def op_POP_JUMP_BACKWARD_IF_NONE(self, inst, pred):
self._jump_if_none(inst, pred, True)
def op_POP_JUMP_BACKWARD_IF_NOT_NONE(self, inst, pred):
self._jump_if_none(inst, pred, False)
def op_POP_JUMP_FORWARD_IF_FALSE(self, inst, pred):
self._op_JUMP_IF(inst, pred=pred, iftrue=False)
def op_POP_JUMP_FORWARD_IF_TRUE(self, inst, pred):
self._op_JUMP_IF(inst, pred=pred, iftrue=True)
def op_POP_JUMP_BACKWARD_IF_FALSE(self, inst, pred):
self._op_JUMP_IF(inst, pred=pred, iftrue=False)
def op_POP_JUMP_BACKWARD_IF_TRUE(self, inst, pred):
self._op_JUMP_IF(inst, pred=pred, iftrue=True)
def op_POP_JUMP_IF_FALSE(self, inst, pred):
self._op_JUMP_IF(inst, pred=pred, iftrue=False)
def op_POP_JUMP_IF_TRUE(self, inst, pred):
self._op_JUMP_IF(inst, pred=pred, iftrue=True)
def op_JUMP_IF_FALSE_OR_POP(self, inst, pred):
self._op_JUMP_IF(inst, pred=pred, iftrue=False)
def op_JUMP_IF_TRUE_OR_POP(self, inst, pred):
self._op_JUMP_IF(inst, pred=pred, iftrue=True)
def op_CHECK_EXC_MATCH(self, inst, pred, tos, tos1):
gv_fn = ir.Global(
"exception_match", eh.exception_match, loc=self.loc,
)
exc_match_name = '$exc_match'
self.store(value=gv_fn, name=exc_match_name, redefine=True)
lhs = self.get(tos1)
rhs = self.get(tos)
exc = ir.Expr.call(
self.get(exc_match_name), args=(lhs, rhs), kws=(), loc=self.loc,
)
self.store(exc, pred)
def op_JUMP_IF_NOT_EXC_MATCH(self, inst, pred, tos, tos1):
truebr = inst.next
falsebr = inst.get_jump_target()
gv_fn = ir.Global(
"exception_match", eh.exception_match, loc=self.loc,
)
exc_match_name = '$exc_match'
self.store(value=gv_fn, name=exc_match_name, redefine=True)
lhs = self.get(tos1)
rhs = self.get(tos)
exc = ir.Expr.call(
self.get(exc_match_name), args=(lhs, rhs), kws=(), loc=self.loc,
)
predicate = self.store(exc, pred)
bra = ir.Branch(cond=predicate, truebr=truebr, falsebr=falsebr,
loc=self.loc)
self.current_block.append(bra)
def op_RERAISE(self, inst, exc):
tryblk = self.dfainfo.active_try_block
if tryblk is not None:
stmt = ir.TryRaise(exception=None, loc=self.loc)
self.current_block.append(stmt)
self._insert_try_block_end()
self.current_block.append(ir.Jump(tryblk['end'], loc=self.loc))
else:
# Numba can't handle this case and it's caught else where, this is a
# runtime guard in case this is reached by unknown means.
msg = (f"Unreachable condition reached (op code RERAISE executed)"
f"{error_extras['reportable']}")
stmt = ir.StaticRaise(AssertionError, (msg,), self.loc)
self.current_block.append(stmt)
def op_RAISE_VARARGS(self, inst, exc):
if exc is not None:
exc = self.get(exc)
tryblk = self.dfainfo.active_try_block
if tryblk is not None:
# In a try block
stmt = ir.TryRaise(exception=exc, loc=self.loc)
self.current_block.append(stmt)
self._insert_try_block_end()
self.current_block.append(ir.Jump(tryblk['end'], loc=self.loc))
else:
# Not in a try block
stmt = ir.Raise(exception=exc, loc=self.loc)
self.current_block.append(stmt)
def op_YIELD_VALUE(self, inst, value, res):
# initialize index to None. it's being set later in post-processing
index = None
inst = ir.Yield(value=self.get(value), index=index, loc=self.loc)
return self.store(inst, res)
def op_MAKE_FUNCTION(self, inst, name, code, closure, annotations,
annotate, kwdefaults, defaults, res):
# annotations are ignored by numba but useful for static analysis
# re. https://github.com/numba/numba/issues/7269
# annotate is ignored too
# re. https://github.com/numba/numba/pull/10321
if kwdefaults is not None:
msg = "op_MAKE_FUNCTION with kwdefaults is not implemented"
raise NotImplementedError(msg)
if defaults:
if isinstance(defaults, tuple):
defaults = tuple([self.get(name) for name in defaults])
else:
defaults = self.get(defaults)
assume_code_const = self.definitions[code][0]
if not isinstance(assume_code_const, ir.Const):
msg = (
"Unsupported use of closure. "
"Probably caused by complex control-flow constructs; "
"e.g. try-except"
)
raise errors.UnsupportedBytecodeError(msg, loc=self.loc)
fcode = assume_code_const.value
if name:
name = self.get(name)
if closure:
closure = self.get(closure)
expr = ir.Expr.make_function(name, fcode, closure, defaults, self.loc)
self.store(expr, res)
def op_MAKE_CLOSURE(self, inst, name, code, closure, annotations,
kwdefaults, defaults, res):
self.op_MAKE_FUNCTION(inst, name, code, closure, annotations,
kwdefaults, defaults, res)
if PYVERSION in ((3, 11), (3, 12), (3, 13), (3, 14)):
def op_LOAD_CLOSURE(self, inst, res):
name = self.func_id.func.__code__._varname_from_oparg(inst.arg)
if name in self.code_cellvars:
try:
gl = self.get(name)
except NotDefinedError:
msg = "Unsupported use of cell variable encountered"
raise NotImplementedError(msg)
elif name in self.code_freevars:
idx = self.code_freevars.index(name)
value = self.get_closure_value(idx)
gl = ir.FreeVar(idx, name, value, loc=self.loc)
else:
assert 0, "unreachable"
self.store(gl, res)
elif PYVERSION in ((3, 10),):
def op_LOAD_CLOSURE(self, inst, res):
n_cellvars = len(self.code_cellvars)
if inst.arg < n_cellvars:
name = self.code_cellvars[inst.arg]
try:
gl = self.get(name)
except NotDefinedError:
msg = "Unsupported use of cell variable encountered"
raise NotImplementedError(msg)
else:
idx = inst.arg - n_cellvars
name = self.code_freevars[idx]
value = self.get_closure_value(idx)
gl = ir.FreeVar(idx, name, value, loc=self.loc)
self.store(gl, res)
else:
raise NotImplementedError(PYVERSION)
def op_LIST_APPEND(self, inst, target, value, appendvar, res):
target = self.get(target)
value = self.get(value)
appendattr = ir.Expr.getattr(target, 'append', loc=self.loc)
self.store(value=appendattr, name=appendvar)
appendinst = ir.Expr.call(self.get(appendvar), (value,), (),
loc=self.loc)
self.store(value=appendinst, name=res)
def op_LIST_EXTEND(self, inst, target, value, extendvar, res):
target = self.get(target)
value = self.get(value)
# If the statements between the current instruction and the target
# are N * consts followed by build_tuple AND the target has no items,
# it's a situation where a list is being statically initialised, rewrite
# the build_tuple as a build_list, drop the extend, and wire up the
# target as the result from the build_tuple that's been rewritten.
# See if this is the first statement in a block, if so its probably from
# control flow in a tuple unpack like:
# `(*(1, (2,) if predicate else (3,)))`
# this cannot be handled as present so raise
msg = ("An unsupported bytecode sequence has been encountered: "
"op_LIST_EXTEND at the start of a block.\n\nThis could be "
"due to the use of a branch in a tuple unpacking statement.")
if not self.current_block.body:
raise errors.UnsupportedBytecodeError(msg)
# is last emitted statement a build_tuple?
stmt = self.current_block.body[-1]
ok = isinstance(stmt.value, ir.Expr) and stmt.value.op == "build_tuple"
# check statements from self.current_block.body[-1] through to target,
# make sure they are consts
build_empty_list = None
if ok:
for stmt in reversed(self.current_block.body[:-1]):
if not isinstance(stmt, ir.Assign):
ok = False
break
# if its not a const, it needs to be the `build_list` for the
# target, else it's something else we don't know about so just
# bail
if isinstance(stmt.value, ir.Const):
continue
# it's not a const, check for target
elif isinstance(stmt.value, ir.Expr) and stmt.target == target:
build_empty_list = stmt
# it's only ok to do this if the target has no initializer
# already
ok = not stmt.value.items
break
else:
ok = False
break
if ok and build_empty_list is None:
raise errors.UnsupportedBytecodeError(msg)
if ok:
stmts = self.current_block.body
build_tuple_asgn = self.current_block.body[-1]
# move build list to last issued statement
stmts.append(stmts.pop(stmts.index(build_empty_list)))
# fix the build list
build_tuple = build_tuple_asgn.value
build_list = build_empty_list.value
build_list.items = build_tuple.items
else:
# it's just a list extend with no static init, let it be
extendattr = ir.Expr.getattr(target, 'extend', loc=self.loc)
self.store(value=extendattr, name=extendvar)
extendinst = ir.Expr.call(self.get(extendvar), (value,), (),
loc=self.loc)
self.store(value=extendinst, name=res)
def op_MAP_ADD(self, inst, target, key, value, setitemvar, res):
target = self.get(target)
key = self.get(key)
value = self.get(value)
setitemattr = ir.Expr.getattr(target, '__setitem__', loc=self.loc)
self.store(value=setitemattr, name=setitemvar)
appendinst = ir.Expr.call(self.get(setitemvar), (key, value,), (),
loc=self.loc)
self.store(value=appendinst, name=res)
if PYVERSION in ((3, 14), ):
# Removed in 3.14
pass
elif PYVERSION in ((3, 10), (3, 11), (3, 12), (3, 13)):
def op_LOAD_ASSERTION_ERROR(self, inst, res):
gv_fn = ir.Global("AssertionError", AssertionError, loc=self.loc)
self.store(value=gv_fn, name=res)
else:
raise NotImplementedError(PYVERSION)
# NOTE: The LOAD_METHOD opcode is implemented as a LOAD_ATTR for ease,
# however this means a new object (the bound-method instance) could be
# created. Conversely, using a pure LOAD_METHOD no intermediary is present
# and it is essentially like a pointer grab and forward to CALL_METHOD. The
# net outcome is that the implementation in Numba produces the same result,
# but in object mode it may be that it runs more slowly than it would if
# run in CPython.
def op_LOAD_METHOD(self, *args, **kws):
self.op_LOAD_ATTR(*args, **kws)
def op_CALL_METHOD(self, *args, **kws):
self.op_CALL_FUNCTION(*args, **kws)
if PYVERSION in ((3, 12), (3, 13), (3, 14)):
def op_CALL_INTRINSIC_1(self, inst, operand, **kwargs):
if operand == ci1op.INTRINSIC_STOPITERATION_ERROR:
stmt = ir.StaticRaise(INTRINSIC_STOPITERATION_ERROR, (),
self.loc)
self.current_block.append(stmt)
return
elif operand == ci1op.UNARY_POSITIVE:
self.op_UNARY_POSITIVE(inst, **kwargs)
return
elif operand == ci1op.INTRINSIC_LIST_TO_TUPLE:
self.op_LIST_TO_TUPLE(inst, **kwargs)
return
else:
raise NotImplementedError(operand)
elif PYVERSION in ((3, 10), (3, 11)):
pass
else:
raise NotImplementedError(PYVERSION)
if PYVERSION in ((3, 14), ):
# New in 3.14, replaces BEFORE_WITH.
def op_LOAD_SPECIAL(self, inst, contextmanager, exit_method, block_end):
assert self.blocks[inst.offset] is self.current_block
# Python 3.12 hack for handling nested with blocks
if block_end > self.last_active_offset:
# Use exception entries to figure out end of syntax block
block_end = max([ex.end for ex in self.active_exception_entries
if ex.target == block_end])
# Handle with
wth = ir.With(inst.offset, exit=block_end)
self.syntax_blocks.append(wth)
ctxmgr = self.get(contextmanager)
self.current_block.append(
ir.EnterWith(contextmanager=ctxmgr,
begin=inst.offset,
end=block_end, loc=self.loc,))
# Store exit function
exit_fn_obj = ir.Const(None, loc=self.loc)
self.store(value=exit_fn_obj, name=exit_method)
elif PYVERSION in ((3, 10), (3, 11), (3, 12), (3, 13)):
pass
else:
raise NotImplementedError(PYVERSION)
if PYVERSION in ((3, 14), ):
def op_LOAD_COMMON_CONSTANT(self, inst, res, idx):
if dis._common_constants[idx] == AssertionError:
gv_fn = ir.Global("AssertionError",
AssertionError,
loc=self.loc)
self.store(value=gv_fn, name=res)
else:
raise NotImplementedError
elif PYVERSION in ((3, 10), (3, 11), (3, 12), (3, 13)):
pass
else:
raise NotImplementedError(PYVERSION)
if PYVERSION in ((3, 12), (3, 13), (3, 14)):
class INTRINSIC_STOPITERATION_ERROR(AssertionError):
pass
elif PYVERSION in ((3, 10), (3, 11)):
pass
else:
raise NotImplementedError(PYVERSION)
| Interpreter |
python | mozilla__bleach | bleach/css_sanitizer.py | {
"start": 1330,
"end": 2526
} | class ____:
def __init__(
self,
allowed_css_properties=ALLOWED_CSS_PROPERTIES,
allowed_svg_properties=ALLOWED_SVG_PROPERTIES,
):
self.allowed_css_properties = allowed_css_properties
self.allowed_svg_properties = allowed_svg_properties
def sanitize_css(self, style):
"""Sanitizes css in style tags"""
parsed = tinycss2.parse_declaration_list(style)
if not parsed:
return ""
new_tokens = []
for token in parsed:
if token.type == "declaration":
if (
token.lower_name in self.allowed_css_properties
or token.lower_name in self.allowed_svg_properties
):
new_tokens.append(token)
elif token.type in ("comment", "whitespace"):
if new_tokens and new_tokens[-1].type != token.type:
new_tokens.append(token)
# NOTE(willkg): We currently don't handle AtRule or ParseError and
# so both get silently thrown out
if not new_tokens:
return ""
return tinycss2.serialize(new_tokens).strip()
| CSSSanitizer |
python | PrefectHQ__prefect | tests/server/orchestration/api/test_workers.py | {
"start": 34158,
"end": 35709
} | class ____:
async def test_read_work_pool(self, client, work_pool):
response = await client.get(f"/work_pools/{work_pool.name}")
assert response.status_code == status.HTTP_200_OK, response.text
result = parse_obj_as(WorkPool, response.json())
assert result.name == work_pool.name
assert result.id == work_pool.id
assert result.status == schemas.statuses.WorkPoolStatus.NOT_READY.value
async def test_read_invalid_config(self, client):
response = await client.get("/work_pools/does-not-exist")
assert response.status_code == status.HTTP_404_NOT_FOUND, response.text
async def test_read_work_pool_that_fails_validation(
self,
client,
invalid_work_pool,
):
response = await client.get(f"/work_pools/{invalid_work_pool.name}")
assert response.status_code == 200, response.text
assert response.json()["id"] == str(invalid_work_pool.id)
assert response.json()["name"] == "wp-1"
async def test_read_work_pool_with_3_3_7_client_version_does_not_include_default_result_storage_block_id(
self, client: AsyncClient, work_pool: WorkPool
):
response = await client.get(
f"/work_pools/{work_pool.name}",
headers={"User-Agent": "prefect/3.3.7 (API 0.8.4)"},
)
assert response.status_code == 200
assert response.json()["storage_configuration"] == {
"bundle_upload_step": None,
"bundle_execution_step": None,
}
| TestReadWorkPool |
python | prompt-toolkit__python-prompt-toolkit | examples/prompts/multiline-autosuggest.py | {
"start": 1974,
"end": 2768
} | class ____(AutoSuggest):
def get_suggestion(self, buffer, document):
if document.line_count == 1:
return Suggestion(" (Add a few new lines to see multiline completion)")
cursor_line = document.cursor_position_row
text = document.text.split("\n")[cursor_line]
if not text.strip():
return None
index = None
for i, l in enumerate(universal_declaration_of_human_rights):
if l.startswith(text):
index = i
break
if index is None:
return None
return Suggestion(
universal_declaration_of_human_rights[index][len(text) :]
+ "\n"
+ "\n".join(universal_declaration_of_human_rights[index + 1 :])
)
| FakeLLMAutoSuggest |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/log_manager.py | {
"start": 6380,
"end": 11471
} | class ____(logging.Handler):
"""Internal class used to turn regular logs into Dagster logs by adding Dagster-specific
metadata (such as job_name or step_key), as well as reformatting the underlying message.
Note: The `loggers` argument will be populated with the set of @loggers supplied to the current
run. These essentially work as handlers (they do not create their own log messages, they simply
re-log messages that are created from context.log.x() calls), which is why they are referenced
from within this handler class.
"""
def __init__(
self,
metadata: DagsterLogHandlerMetadata,
loggers: Sequence[logging.Logger],
handlers: Sequence[logging.Handler],
):
self._metadata = metadata
self._loggers = loggers
self._handlers = handlers
# Setting up a local thread context here to allow the DagsterLogHandler
# to be used in multi threading environments where the handler is called by
# different threads with different log messages in parallel.
self._local_thread_context = threading.local()
self._local_thread_context.should_capture = True
super().__init__()
@property
def metadata(self) -> DagsterLogHandlerMetadata:
return self._metadata
def with_tags(self, **new_tags: str) -> "DagsterLogHandler":
return DagsterLogHandler(
metadata={**self._metadata, **cast("DagsterLogHandlerMetadata", new_tags)},
loggers=self._loggers,
handlers=self._handlers,
)
def _extract_extra(self, record: logging.LogRecord) -> Mapping[str, Any]:
"""In the logging.Logger log() implementation, the elements of the `extra` dictionary
argument are smashed into the __dict__ of the underlying logging.LogRecord.
This function figures out what the original `extra` values of the log call were by
comparing the set of attributes in the received record to those of a default record.
"""
ref_attrs = list(logging.makeLogRecord({}).__dict__.keys()) + [
"message",
"asctime",
]
return {k: v for k, v in record.__dict__.items() if k not in ref_attrs}
def _convert_record(self, record: logging.LogRecord) -> logging.LogRecord:
# If this was a logged DagsterEvent, the event will be stored on the record
event = get_log_record_event(record) if has_log_record_event(record) else None
event_batch_metadata = (
get_log_record_event_batch_metadata(record)
if has_log_record_event_batch_metadata(record)
else None
)
metadata = construct_log_record_metadata(
self._metadata, record.getMessage(), event, event_batch_metadata
)
message = construct_log_record_message(metadata)
# update the message to be formatted like other dagster logs
set_log_record_metadata(record, metadata)
record.msg = message
record.args = ()
return record
def filter(self, record: logging.LogRecord) -> bool:
"""If you list multiple levels of a python logging hierarchy as managed loggers, and do not
set the propagate attribute to False, this will result in that record getting logged
multiple times, as the DagsterLogHandler will be invoked at each level of the hierarchy as
the message is propagated. This filter prevents this from happening.
"""
if not hasattr(self._local_thread_context, "should_capture"):
# Since only the "main" thread gets an initialized
# "_local_thread_context.should_capture" variable through the __init__()
# we need to set a default value for all other threads here.
self._local_thread_context.should_capture = True
return self._local_thread_context.should_capture and not has_log_record_metadata(record)
def emit(self, record: logging.LogRecord) -> None:
"""For any received record, add Dagster metadata, and have handlers handle it."""
try:
# to prevent the potential for infinite loops in which a handler produces log messages
# which are then captured and then handled by that same handler (etc.), do not capture
# any log messages while one is currently being emitted
self._local_thread_context.should_capture = False
dagster_record = self._convert_record(record)
# built-in handlers
for handler in self._handlers:
if dagster_record.levelno >= handler.level:
handler.handle(dagster_record)
# user-defined @loggers
for logger in self._loggers:
logger.log(
dagster_record.levelno,
dagster_record.msg,
exc_info=dagster_record.exc_info,
extra=self._extract_extra(record),
)
finally:
self._local_thread_context.should_capture = True
@public
| DagsterLogHandler |
python | openai__gym | gym/vector/async_vector_env.py | {
"start": 698,
"end": 828
} | class ____(Enum):
DEFAULT = "default"
WAITING_RESET = "reset"
WAITING_STEP = "step"
WAITING_CALL = "call"
| AsyncState |
python | django__django | tests/fixtures_regress/models.py | {
"start": 402,
"end": 615
} | class ____(models.Model):
name = models.CharField(max_length=150)
class Meta:
# For testing when upper case letter in app name; regression for #4057
db_table = "Fixtures_regress_plant"
| Plant |
python | tornadoweb__tornado | tornado/test/httpserver_test.py | {
"start": 39497,
"end": 42955
} | class ____(AsyncHTTPTestCase):
# 50 characters long, and repetitive so it can be compressed.
BODY = b"01234567890123456789012345678901234567890123456789"
CHUNK_SIZE = 16
def get_http_client(self):
# body_producer doesn't work on curl_httpclient, so override the
# configured AsyncHTTPClient implementation.
return SimpleAsyncHTTPClient()
def get_httpserver_options(self):
return dict(chunk_size=self.CHUNK_SIZE, decompress_request=True)
class MessageDelegate(HTTPMessageDelegate):
def __init__(self, connection):
self.connection = connection
def headers_received(self, start_line, headers):
self.chunk_lengths = [] # type: List[int]
def data_received(self, chunk):
self.chunk_lengths.append(len(chunk))
def finish(self):
response_body = utf8(json_encode(self.chunk_lengths))
self.connection.write_headers(
ResponseStartLine("HTTP/1.1", 200, "OK"),
HTTPHeaders({"Content-Length": str(len(response_body))}),
)
self.connection.write(response_body)
self.connection.finish()
def get_app(self):
class App(HTTPServerConnectionDelegate):
def start_request(self, server_conn, request_conn):
return StreamingChunkSizeTest.MessageDelegate(request_conn)
return App()
def fetch_chunk_sizes(self, **kwargs):
response = self.fetch("/", method="POST", **kwargs)
response.rethrow()
chunks = json_decode(response.body)
self.assertEqual(len(self.BODY), sum(chunks))
for chunk_size in chunks:
self.assertLessEqual(
chunk_size, self.CHUNK_SIZE, "oversized chunk: " + str(chunks)
)
self.assertGreater(chunk_size, 0, "empty chunk: " + str(chunks))
return chunks
def compress(self, body):
bytesio = BytesIO()
gzfile = gzip.GzipFile(mode="w", fileobj=bytesio)
gzfile.write(body)
gzfile.close()
compressed = bytesio.getvalue()
if len(compressed) >= len(body):
raise Exception("body did not shrink when compressed")
return compressed
def test_regular_body(self):
chunks = self.fetch_chunk_sizes(body=self.BODY)
# Without compression we know exactly what to expect.
self.assertEqual([16, 16, 16, 2], chunks)
def test_compressed_body(self):
self.fetch_chunk_sizes(
body=self.compress(self.BODY), headers={"Content-Encoding": "gzip"}
)
# Compression creates irregular boundaries so the assertions
# in fetch_chunk_sizes are as specific as we can get.
def test_chunked_body(self):
def body_producer(write):
write(self.BODY[:20])
write(self.BODY[20:])
chunks = self.fetch_chunk_sizes(body_producer=body_producer)
# HTTP chunk boundaries translate to application-visible breaks
self.assertEqual([16, 4, 16, 14], chunks)
def test_chunked_compressed(self):
compressed = self.compress(self.BODY)
self.assertGreater(len(compressed), 20)
def body_producer(write):
write(compressed[:20])
write(compressed[20:])
self.fetch_chunk_sizes(
body_producer=body_producer, headers={"Content-Encoding": "gzip"}
)
| StreamingChunkSizeTest |
python | modin-project__modin | modin/core/storage_formats/pandas/query_compiler_caster.py | {
"start": 5087,
"end": 44724
} | class ____(ABC):
"""Cast all query compiler arguments of the member function to current query compiler."""
@classmethod
def __init_subclass__(
cls,
**kwargs: Dict,
) -> None:
"""
Apply type casting to all children of ``QueryCompilerCaster``.
This method is called automatically when a class inherits from
``QueryCompilerCaster``. It ensures that all member functions within the
subclass have their arguments automatically casted to the current query
compiler type.
Parameters
----------
**kwargs : Additional keyword arguments
"""
super().__init_subclass__(**kwargs)
apply_argument_cast_to_class(cls)
@abstractmethod
def _get_query_compiler(self) -> Optional[BaseQueryCompiler]:
"""
Get the query compiler storing data for this object.
Returns
-------
Optional[BaseQueryCompiler]
The query compiler storing data for this object, if it exists.
Otherwise, None.
"""
pass
@abstractmethod
def is_backend_pinned(self) -> bool:
"""
Get whether this object's data is pinned to a particular backend.
Returns
-------
bool
True if the data is pinned.
"""
pass
@abstractmethod
def _set_backend_pinned(self, pinned: bool, inplace: bool) -> Optional[Self]:
"""
Update whether this object's data is pinned to a particular backend.
Parameters
----------
pinned : bool
Whether the data is pinned.
inplace : bool, default: False
Whether to update the object in place.
Returns
-------
Optional[Self]
The object with the new pin state, if `inplace` is False. Otherwise, None.
"""
pass
def pin_backend(self, inplace: bool = False) -> Optional[Self]:
"""
Pin the object's underlying data, preventing Modin from automatically moving it to another backend.
Parameters
----------
inplace : bool, default: False
Whether to update the object in place.
Returns
-------
Optional[Self]
The newly-pinned object, if `inplace` is False. Otherwise, None.
"""
return self._set_backend_pinned(True, inplace)
def unpin_backend(self, inplace: bool = False) -> Optional[Self]:
"""
Unpin the object's underlying data, allowing Modin to automatically move it to another backend.
Parameters
----------
inplace : bool, default: False
Whether to update the object in place.
Returns
-------
Optional[Self]
The newly-unpinned object, if `inplace` is False. Otherwise, None.
"""
return self._set_backend_pinned(False, inplace)
@abstractmethod
def get_backend(self) -> str:
"""
Get the backend of this object.
Returns
-------
str
The backend of this object. The backend name must be title-cased.
"""
pass
@abstractmethod
def set_backend(
self,
backend: str,
inplace: bool = False,
*,
switch_operation: Optional[str] = None,
) -> Optional[Self]:
"""
Set the backend of this object.
Parameters
----------
backend : str
The new backend.
inplace : bool, default: False
Whether to update the object in place.
switch_operation : Optional[str], default: None
The name of the operation that triggered the set_backend call.
Internal argument used for displaying progress bar information.
Returns
-------
Optional[Self]
The object with the new backend, if `inplace` is False. Otherwise, None.
"""
pass
@_inherit_docstrings(set_backend)
def move_to(
self,
backend: str,
inplace: bool = False,
*,
switch_operation: Optional[str] = None,
) -> Optional[Self]:
return self.set_backend(
backend=backend, inplace=inplace, switch_operation=switch_operation
)
@abstractmethod
def _copy_into(self, other: Self) -> None:
"""
Copy the data from this object into another object of the same type.
Parameters
----------
other : Self
The object to copy data into.
"""
pass
@disable_logging
def _get_extension(self, name: str, extensions: EXTENSION_DICT_TYPE) -> Any:
"""
Get an extension with the given name from the given set of extensions.
Parameters
----------
name : str
The name of the extension.
extensions : EXTENSION_DICT_TYPE
The set of extensions.
Returns
-------
Any
The extension with the given name, or `sentinel` if the extension is not found.
"""
if self._get_query_compiler() is not None:
extensions_for_backend = extensions[self.get_backend()]
if name in extensions_for_backend:
return extensions_for_backend[name]
if name in extensions[None]:
return extensions[None][name]
return sentinel
@disable_logging
def _getattribute__from_extension_impl(
self, item: str, extensions: EXTENSION_DICT_TYPE
):
"""
__getatttribute__() an extension with the given name from the given set of extensions.
Implement __getattribute__() for extensions. Python calls
__getattribute_() every time you access an attribute of an object.
Parameters
----------
item : str
The name of the attribute to get.
extensions : EXTENSION_DICT_TYPE
The set of extensions.
Returns
-------
Any
The attribute from the extension, or `sentinel` if the attribute is
not found.
"""
# An extension property is only accessible if the backend supports it.
extension = self._get_extension(item, extensions)
if (
extension is not sentinel
# We should implement callable extensions by wrapping them in
# methods that dispatch to the corrrect backend. We should get the
# wrapped method with the usual object.__getattribute__() method
# lookup rather than by getting a particular extension when we call
# __getattribute__(). For example, if we've extended sort_values(),
# then __getattribute__('sort_values') should return a wrapper that
# calls the correct extension once it's invoked.
and not callable(extension)
):
return (
extension.__get__(self) if hasattr(extension, "__get__") else extension
)
return sentinel
@disable_logging
def _getattr__from_extension_impl(
self,
key: str,
default_behavior_attributes: set[str],
extensions: EXTENSION_DICT_TYPE,
) -> Any:
"""
Implement __getattr__, which the python interpreter falls back to if __getattribute__ raises AttributeError.
We override this method to make sure we try to get the extension
attribute for `key`, even if this class has a different
attribute for `key`.
Parameters
----------
key : str
Attribute name.
default_behavior_attributes : set[str]
The set of attributes for which we should follow the default
__getattr__ behavior and not try to get the extension.
extensions : EXTENSION_DICT_TYPE
The set of extensions.
Returns
-------
The value of the attribute.
"""
if key not in default_behavior_attributes:
# If this class has a an extension for `key`, but __getattribute__()
# for the extension raises an AttributeError, we end up in this
# method, which should try getting the extension again (and
# probably raise the AttributeError that
# _getattribute__from_extension_impl() originally raised), rather
# than following back to object.__getattribute__().
extensions_result = self._getattribute__from_extension_impl(key, extensions)
# If extensions_result is not `sentinel`, __getattribute__() should have
# returned it first.
ErrorMessage.catch_bugs_and_request_email(
failure_condition=extensions_result is not sentinel,
extra_log=(
"This object should return extensions via "
+ "__getattribute__ rather than __getattr__"
),
)
return object.__getattribute__(self, key)
def visit_nested_args(arguments, fn: callable):
"""
Visit each argument recursively, calling fn on each one.
Parameters
----------
arguments : tuple or dict
fn : Callable to apply to matching arguments
Returns
-------
tuple or dict
Returns args and kwargs with all query compilers casted to current_qc.
"""
if isinstance(arguments, pandas.NamedAgg):
# NamedAgg needs special treatment because it's an immutable subclass
# of tuple that can't be constructed from another tuple.
return pandas.NamedAgg(
column=fn(arguments.column), aggfunc=fn(arguments.aggfunc)
)
immutable_types = (FrozenList, tuple, ValuesView)
if isinstance(arguments, immutable_types):
args_type = type(arguments)
return (
# ValuesView, which we might get from dict.values(), is immutable,
# but not constructable, so we convert it to a tuple. Otherwise,
# we return an object of the same type as the input.
tuple
if issubclass(args_type, ValuesView)
else args_type
)(visit_nested_args(list(arguments), fn))
types_to_recursively_visit = (list, dict, *immutable_types)
if isinstance(
arguments,
list,
):
for i in range(len(arguments)):
if isinstance(arguments[i], types_to_recursively_visit):
visit_nested_args(arguments[i], fn)
else:
arguments[i] = fn(arguments[i])
elif isinstance(arguments, dict):
for key in arguments:
if isinstance(arguments[key], types_to_recursively_visit):
visit_nested_args(arguments[key], fn)
else:
arguments[key] = fn(arguments[key])
return arguments
def _assert_casting_functions_wrap_same_implementation(
m1: callable, m2: callable
) -> None:
"""
Assert that two casting wrappers wrap the same implementation.
Parameters
----------
m1 : callable
The first casting wrapper.
m2 : callable
The second casting wrapper.
Raises
------
AssertionError
If the two casting wrappers wrap different implementations.
"""
assert (
# For cases like (m1=Series.agg, m2=Series.aggregate), where Series
# defines its own method and aliases it, the two wrapped methods
# are the same.
m2._wrapped_method_for_casting is m1._wrapped_method_for_casting
# For cases like (m1=Series.kurt, m2=Series.kurtosis), where Series
# inherits both kurt and kurtosis from BasePandasDataset but does
# not define its own implementation of either,
# Series.kurt._wrapped_method_for_casting points to
# BasePandasDataset.kurt, which is not the same as
# BasePandasDataset.kurtosis. In that case, we need to go one level
# deeper to compare the wrapped methods of the two aliases of
# BasePandasDataset.
or m2._wrapped_method_for_casting._wrapped_method_for_casting
is m1._wrapped_method_for_casting._wrapped_method_for_casting
)
def apply_argument_cast_to_class(klass: type) -> type:
"""
Apply argument casting to all functions in a class.
Parameters
----------
klass : type
The class to apply argument casting to.
Returns
-------
type
The class with argument casting applied to all functions.
"""
all_attrs = dict(inspect.getmembers(klass))
# This is required because inspect converts class methods to member functions
current_class_attrs = vars(klass)
for key in current_class_attrs:
all_attrs[key] = current_class_attrs[key]
for attr_name, attr_value in all_attrs.items():
if attr_name in _NON_EXTENDABLE_ATTRIBUTES or not isinstance(
attr_value, (FunctionType, classmethod, staticmethod)
):
continue
implementation_function = (
attr_value.__func__
if isinstance(attr_value, (classmethod, staticmethod))
else attr_value
)
if attr_name not in klass._extensions[None]:
# Register the original implementation as the default
# extension. We fall back to this implementation if the
# object's backend does not have an implementation for this
# method.
klass._extensions[None][attr_name] = implementation_function
casting_implementation = wrap_function_in_argument_caster(
klass=klass,
f=implementation_function,
wrapping_function_type=(
classmethod
if isinstance(attr_value, classmethod)
else (
staticmethod if isinstance(attr_value, staticmethod) else MethodType
)
),
extensions=klass._extensions,
name=attr_name,
)
wrapped = (
classmethod(casting_implementation)
if isinstance(attr_value, classmethod)
else (
staticmethod(casting_implementation)
if isinstance(attr_value, staticmethod)
else casting_implementation
)
)
if attr_name not in klass.__dict__:
# If this class's method comes from a superclass (i.e.
# it's not in klass.__dict__), mark it so that
# modin.utils._inherit_docstrings knows that the method
# must get its docstrings from its superclass.
wrapped._wrapped_superclass_method = attr_value
setattr(klass, attr_name, wrapped)
return klass
def _maybe_switch_backend_pre_op(
function_name: str,
input_qc: BaseQueryCompiler,
class_of_wrapped_fn: Optional[str],
arguments: MappingProxyType[str, Any],
) -> tuple[str, Callable[[Any], Any]]:
"""
Possibly switch backend before a function.
Parameters
----------
function_name : str
The name of the function.
input_qc : BaseQueryCompiler
The input query compiler.
class_of_wrapped_fn : Optional[str]
The name of the class that the function belongs to. `None` for functions
in the modin.pandas module.
arguments : MappingProxyType[str, Any]
Mapping from operation argument names to their values.
Returns
-------
Tuple[str, callable]
A tuple of the new backend and a function that casts all castable arguments
to the new query compiler type.
"""
input_backend = input_qc.get_backend()
if (
function_name
in _CLASS_AND_BACKEND_TO_PRE_OP_SWITCH_METHODS[
BackendAndClassName(
backend=input_qc.get_backend(), class_name=class_of_wrapped_fn
)
]
):
result_backend = _get_backend_for_auto_switch(
input_qc=input_qc,
class_of_wrapped_fn=class_of_wrapped_fn,
function_name=function_name,
arguments=arguments,
)
else:
result_backend = input_backend
def cast_to_qc(arg: Any) -> Any:
if not (
isinstance(arg, QueryCompilerCaster)
and arg._get_query_compiler() is not None
and arg.get_backend() != result_backend
):
return arg
arg.set_backend(
result_backend,
inplace=True,
switch_operation=f"{_normalize_class_name(class_of_wrapped_fn)}.{function_name}",
)
return arg
return result_backend, cast_to_qc
def _maybe_switch_backend_post_op(
result: Any,
function_name: str,
qc_list: list[BaseQueryCompiler],
starting_backend: str,
class_of_wrapped_fn: Optional[str],
pin_backend: bool,
arguments: MappingProxyType[str, Any],
) -> Any:
"""
Possibly switch the backend of the result of a function.
Use cost-based optimization to determine whether to switch the backend of the
result of a function. If the function returned a QueryCompilerCaster and the
cost of switching is less than the cost of staying on the current backend,
we switch. If there are multiple backends we can switch to, we choose the
one that minimizes cost_to_move - cost_to_stay.
Parameters
----------
result : Any
The result of the function.
function_name : str
The name of the function.
qc_list : list[BaseQueryCompiler]
The list of query compilers that were arguments to the function.
starting_backend : str
The backend used to run the function.
class_of_wrapped_fn : Optional[str]
The name of the class that the function belongs to. `None` for functions
in the modin.pandas module.
pin_backend : bool
Whether the result should have its backend pinned, and therefore not moved.
arguments : MappingProxyType[str, Any]
Mapping from operation argument names to their values.
Returns
-------
Any
The result of the function, possibly with its backend switched.
"""
# If any input QC was pinned, then the output should be as well.
if pin_backend:
if isinstance(result, QueryCompilerCaster):
result.pin_backend(inplace=True)
return result
if (
# only apply post-operation switch to nullary and unary methods
len(qc_list) in (0, 1)
and function_name
in _CLASS_AND_BACKEND_TO_POST_OP_SWITCH_METHODS[
BackendAndClassName(
backend=(
qc_list[0].get_backend() if len(qc_list) == 1 else starting_backend
),
class_name=class_of_wrapped_fn,
)
]
# if the operation did not return a query compiler, we can't switch the
# backend of the result.
and isinstance(result, QueryCompilerCaster)
and (input_qc := result._get_query_compiler()) is not None
):
return result.move_to(
_get_backend_for_auto_switch(
input_qc=input_qc,
class_of_wrapped_fn=class_of_wrapped_fn,
function_name=function_name,
arguments=arguments,
),
switch_operation=f"{_normalize_class_name(class_of_wrapped_fn)}.{function_name}",
)
return result
def _get_backend_for_auto_switch(
input_qc: BaseQueryCompiler,
class_of_wrapped_fn: str,
function_name: str,
arguments: MappingProxyType[str, Any],
) -> str:
"""
Get the best backend to switch to.
Use cost-based optimization to determine whether to switch the backend of the
arguments to a function. If the cost of switching is less than the cost of
staying on the current backend, we switch. If there are multiple backends we
can switch to, we choose the one that minimizes cost_to_move - cost_to_stay.
Parameters
----------
input_qc : BaseQueryCompiler
The query compiler representing the starting backend.
class_of_wrapped_fn : Optional[str]
The name of the class that the function belongs to. `None` for functions
in the modin.pandas module.
function_name : str
The name of the function.
arguments : MappingProxyType[str, Any]
Mapping from operation argument names to their values.
Returns
-------
str
The name of the best backend to switch to.
"""
# TODO(https://github.com/modin-project/modin/issues/7503): Make costing
# methods take backend instead of query compiler type so that we don't
# have to use the dispatcher to figure out the appropriate type for each
# backend.
from modin.core.execution.dispatching.factories.dispatcher import FactoryDispatcher
# Does not need to be secure, should not use system entropy
metrics_group = "%04x" % random.randrange(16**4)
starting_backend = input_qc.get_backend()
min_move_stay_delta = None
best_backend = starting_backend
stay_cost = input_qc.stay_cost(
api_cls_name=class_of_wrapped_fn,
operation=function_name,
arguments=arguments,
)
data_max_shape = input_qc._max_shape()
emit_metric(
f"hybrid.auto.api.{class_of_wrapped_fn}.{function_name}.group.{metrics_group}",
1,
)
emit_metric(
f"hybrid.auto.current.{starting_backend}.group.{metrics_group}.stay_cost",
stay_cost,
)
emit_metric(
f"hybrid.auto.current.{starting_backend}.group.{metrics_group}.rows",
data_max_shape[0],
)
emit_metric(
f"hybrid.auto.current.{starting_backend}.group.{metrics_group}.cols",
data_max_shape[1],
)
for backend in all_switchable_backends():
if backend == starting_backend:
continue
move_to_class = FactoryDispatcher._get_prepared_factory_for_backend(
backend=backend
).io_cls.query_compiler_cls
move_to_cost = input_qc.move_to_cost(
move_to_class,
api_cls_name=class_of_wrapped_fn,
operation=function_name,
arguments=arguments,
)
other_execute_cost = move_to_class.move_to_me_cost(
input_qc,
api_cls_name=class_of_wrapped_fn,
operation=function_name,
arguments=arguments,
)
if (
move_to_cost is not None
and stay_cost is not None
and other_execute_cost is not None
):
if stay_cost >= QCCoercionCost.COST_IMPOSSIBLE:
# We cannot execute the workload on the current engine
# disregard the move_to_cost and just consider whether
# the other engine can execute the workload
move_stay_delta = other_execute_cost - stay_cost
else:
# We can execute this workload if we need to, consider
# move_to_cost/transfer time in our decision
move_stay_delta = (move_to_cost + other_execute_cost) - stay_cost
if move_stay_delta < 0 and (
min_move_stay_delta is None or move_stay_delta < min_move_stay_delta
):
min_move_stay_delta = move_stay_delta
best_backend = backend
emit_metric(
f"hybrid.auto.candidate.{backend}.group.{metrics_group}.move_to_cost",
move_to_cost,
)
emit_metric(
f"hybrid.auto.candidate.{backend}.group.{metrics_group}.other_execute_cost",
other_execute_cost,
)
emit_metric(
f"hybrid.auto.candidate.{backend}.group.{metrics_group}.delta",
move_stay_delta,
)
get_logger().info(
f"After {_normalize_class_name(class_of_wrapped_fn)} function {function_name}, "
+ f"considered moving to backend {backend} with "
+ f"(transfer_cost {move_to_cost} + other_execution_cost {other_execute_cost}) "
+ f", stay_cost {stay_cost}, and move-stay delta "
+ f"{move_stay_delta}"
)
if best_backend == starting_backend:
emit_metric(f"hybrid.auto.decision.{best_backend}.group.{metrics_group}", 0)
get_logger().info(
f"Chose not to switch backends after operation {function_name}"
)
else:
emit_metric(f"hybrid.auto.decision.{best_backend}.group.{metrics_group}", 1)
get_logger().info(f"Chose to move to backend {best_backend}")
return best_backend
def _get_extension_for_method(
name: str,
extensions: EXTENSION_DICT_TYPE,
backend: str,
args: tuple,
wrapping_function_type: Optional[
Union[type[classmethod], type[staticmethod], type[MethodType]]
],
) -> callable:
"""
Get the extension implementation for a method.
Parameters
----------
name : str
The name of the method.
extensions : EXTENSION_DICT_TYPE
The extension dictionary for the modin-API-level object (e.g. class
DataFrame or module modin.pandas) that the method belongs to.
backend : str
The backend to use for this method call.
args : tuple
The arguments to the method.
wrapping_function_type : Union[type[classmethod], type[staticmethod], type[MethodType]]
The type of the original function that `f` implements.
- `None` means we are wrapping a free function, e.g. pd.concat()
- `classmethod` means we are wrapping a classmethod.
- `staticmethod` means we are wrapping a staticmethod.
- `MethodType` means we are wrapping a regular method of a class.
Returns
-------
callable
The implementation of the method for the given backend.
"""
if name in extensions[backend]:
f_to_apply = extensions[backend][name]
else:
if name not in extensions[None]:
raise AttributeError(
(
# When python invokes a method on an object, it passes the object as
# the first positional argument.
(
f"{(type(args[0]).__name__)} object"
if wrapping_function_type is MethodType
else "module 'modin.pandas'"
)
+ f" has no attribute {name}"
)
)
f_to_apply = extensions[None][name]
return f_to_apply
def wrap_function_in_argument_caster(
klass: Optional[type],
f: callable,
name: str,
wrapping_function_type: Optional[
Union[type[classmethod], type[staticmethod], type[MethodType]]
],
extensions: EXTENSION_DICT_TYPE,
) -> callable:
"""
Wrap a function so that it casts all castable arguments to a consistent query compiler, and uses the correct extension implementation for methods.
Also propagates pin behavior across operations.
Parameters
----------
klass : Optional[type]
Class of the function being wrapped.
f : callable
The function to wrap.
name : str
The name of the function.
wrapping_function_type : Optional[Union[type[classmethod], type[staticmethod], type[MethodType]]
The type of the original function that `f` implements.
- `None` means we are wrapping a free function, e.g. pd.concat()
- `classmethod` means we are wrapping a classmethod.
- `staticmethod` means we are wrapping a staticmethod.
- `MethodType` means we are wrapping a regular method of a class.
extensions : EXTENSION_DICT_TYPE
The class of the function we are wrapping. This should be None if
and only if `wrapping_function_type` is None.
Returns
-------
callable
The wrapped function.
"""
@functools.wraps(f)
def f_with_argument_casting(*args: Tuple, **kwargs: Dict) -> Any:
"""
Add casting for query compiler arguments.
Parameters
----------
*args : tuple
The function arguments.
**kwargs : dict
The function keyword arguments.
Returns
-------
Any
"""
if wrapping_function_type in (classmethod, staticmethod):
# TODO: currently we don't support any kind of casting or extension
# for classmethod or staticmethod.
return f(*args, **kwargs)
# f() may make in-place updates to some of its arguments. If we cast
# an argument and then f() updates it in place, the updates will not
# be reflected in the original object. As a fix, we keep track of all
# the in-place updates that f() makes, and once f() is finished, we
# copy the updates back into the original objects. The query compiler
# interface is mostly immutable (the only exceptions being the mutable
# index and column properties), so to check for an in-place update, we
# check whether an input's query compiler has changed its identity.
InplaceUpdateTracker = namedtuple(
"InplaceUpdateTracker",
["input_castable", "original_query_compiler", "new_castable"],
)
inplace_update_trackers: list[InplaceUpdateTracker] = []
# The function name and class name of the function are passed to the calculator as strings
class_of_wrapped_fn = klass.__name__ if klass is not None else None
input_query_compilers: list[BaseQueryCompiler] = []
pin_target_backend = None
input_backends: set[str] = set()
def register_query_compilers(arg):
nonlocal pin_target_backend
if (
isinstance(arg, QueryCompilerCaster)
and (qc := arg._get_query_compiler()) is not None
):
arg_backend = arg.get_backend()
input_backends.add(arg_backend)
if pin_target_backend is not None:
if arg.is_backend_pinned() and arg_backend != pin_target_backend:
raise ValueError(
f"Cannot combine arguments that are pinned to conflicting backends ({pin_target_backend}, {arg_backend})"
)
elif arg.is_backend_pinned():
pin_target_backend = arg_backend
input_query_compilers.append(qc)
elif isinstance(arg, BaseQueryCompiler):
# We might get query compiler arguments in __init__()
input_query_compilers.append(arg)
return arg
visit_nested_args(args, register_query_compilers)
visit_nested_args(kwargs, register_query_compilers)
# Before determining any automatic switches, we perform the following checks:
# 1. If the global AutoSwitchBackend configuration variable is set to False, do not switch.
# 2. If there's only one query compiler and it's pinned, do not switch.
# 3. If there are multiple query compilers, and at least one is pinned to a particular
# backend, then switch to that backend.
# 4. If there are multiple query compilers, at least two of which are pinned to distinct
# backends, raise a ValueError.
if len(input_query_compilers) == 0:
input_backend = Backend.get()
# For nullary functions, we need to create a dummy query compiler
# to calculate the cost of switching backends. We should only
# create the dummy query compiler once per backend.
input_qc_for_pre_op_switch = _BACKEND_TO_EMPTY_QC[input_backend]
else:
input_qc_for_pre_op_switch = input_query_compilers[0]
input_backend = input_qc_for_pre_op_switch.get_backend()
# Skip the casting code if there are < 2 input backends and either
# auto-switching is disabled or the inputs are pinned to the input
# backend.
if len(input_backends) < 2 and (
not AutoSwitchBackend.get() or pin_target_backend is not None
):
f_to_apply = _get_extension_for_method(
name=name,
extensions=extensions,
backend=(
pin_target_backend
if pin_target_backend is not None
else input_backend
),
args=args,
wrapping_function_type=wrapping_function_type,
)
result = f_to_apply(*args, **kwargs)
if (
isinstance(result, QueryCompilerCaster)
and pin_target_backend is not None
):
result._set_backend_pinned(True, inplace=True)
return result
# Bind the arguments using the function implementation for the input
# backend. TODO(https://github.com/modin-project/modin/issues/7525):
# Ideally every implementation would have the same signature.
bound_arguments = inspect.signature(
_get_extension_for_method(
name=name,
extensions=extensions,
backend=input_backend,
args=args,
wrapping_function_type=wrapping_function_type,
),
).bind(*args, **kwargs)
bound_arguments.apply_defaults()
args_dict = MappingProxyType(bound_arguments.arguments)
if len(input_query_compilers) < 2:
# No need to check should_pin_result() again, since we have already done so above.
result_backend, cast_to_qc = _maybe_switch_backend_pre_op(
name,
input_qc=input_qc_for_pre_op_switch,
class_of_wrapped_fn=class_of_wrapped_fn,
arguments=args_dict,
)
else:
preop_switch = (
name
in _CLASS_AND_BACKEND_TO_PRE_OP_SWITCH_METHODS[
BackendAndClassName(
backend=input_backend,
class_name=class_of_wrapped_fn,
)
]
)
calculator: BackendCostCalculator = BackendCostCalculator(
operation_arguments=args_dict,
api_cls_name=class_of_wrapped_fn,
operation=name,
query_compilers=input_query_compilers,
preop_switch=preop_switch,
)
if pin_target_backend is None:
result_backend = calculator.calculate()
else:
result_backend = pin_target_backend
def cast_to_qc(arg):
if not (
isinstance(arg, QueryCompilerCaster)
and arg._get_query_compiler() is not None
and arg.get_backend() != result_backend
):
return arg
if BackendMergeCastInPlace.get():
arg.set_backend(
result_backend,
switch_operation=f"{_normalize_class_name(class_of_wrapped_fn)}.{name}",
inplace=True,
)
assert arg.get_backend() == result_backend
cast = arg
else:
cast = arg.set_backend(
result_backend,
switch_operation=f"{_normalize_class_name(class_of_wrapped_fn)}.{name}",
inplace=False,
)
inplace_update_trackers.append(
InplaceUpdateTracker(
input_castable=arg,
original_query_compiler=cast._get_query_compiler(),
new_castable=cast,
)
)
return cast
args = visit_nested_args(args, cast_to_qc)
kwargs = visit_nested_args(kwargs, cast_to_qc)
# `result_backend` may be different from `input_backend`, so we have to
# look up the correct implementation based on `result_backend`.
f_to_apply = _get_extension_for_method(
name=name,
extensions=extensions,
backend=result_backend,
args=args,
wrapping_function_type=wrapping_function_type,
)
# We have to set the global Backend correctly for I/O methods like
# read_json() to use the correct backend.
with config_context(Backend=result_backend):
result = f_to_apply(*args, **kwargs)
for (
original_castable,
original_qc,
new_castable,
) in inplace_update_trackers:
new_qc = new_castable._get_query_compiler()
if BackendMergeCastInPlace.get() or original_qc is not new_qc:
new_castable._copy_into(original_castable)
return _maybe_switch_backend_post_op(
result,
function_name=name,
qc_list=input_query_compilers,
starting_backend=result_backend,
class_of_wrapped_fn=class_of_wrapped_fn,
pin_backend=pin_target_backend is not None,
arguments=args_dict,
)
f_with_argument_casting._wrapped_method_for_casting = f
return f_with_argument_casting
_GENERAL_EXTENSIONS: EXTENSION_DICT_TYPE = defaultdict(dict)
def wrap_free_function_in_argument_caster(name: str) -> callable:
"""
Get a wrapper for a free function that casts all castable arguments to a consistent query compiler.
Parameters
----------
name : str
The name of the function.
Returns
-------
callable
A wrapper for a free function that casts all castable arguments to a consistent query compiler.
"""
def wrapper(f):
if name not in _GENERAL_EXTENSIONS[None]:
_GENERAL_EXTENSIONS[None][name] = f
return wrap_function_in_argument_caster(
klass=None,
f=f,
wrapping_function_type=None,
extensions=_GENERAL_EXTENSIONS,
name=name,
)
return wrapper
def register_function_for_post_op_switch(
class_name: Optional[str], backend: str, method: str
) -> None:
"""
Register a function for post-operation backend switch.
Parameters
----------
class_name : Optional[str]
The name of the class that the function belongs to. `None` for functions
in the modin.pandas module.
backend : str
Only consider switching when the starting backend is this one.
method : str
The name of the method to register.
"""
_CLASS_AND_BACKEND_TO_POST_OP_SWITCH_METHODS[
BackendAndClassName(backend=backend, class_name=class_name)
].add(method)
def register_function_for_pre_op_switch(
class_name: Optional[str], backend: str, method: str
) -> None:
"""
Register a function for pre-operation backend switch.
Parameters
----------
class_name : Optional[str]
The name of the class that the function belongs to. `None` for functions
in the modin.pandas module.
backend : str
Only consider switching when the starting backend is this one.
method : str
The name of the method to register.
"""
_CLASS_AND_BACKEND_TO_PRE_OP_SWITCH_METHODS[
BackendAndClassName(backend=backend, class_name=class_name)
].add(method)
| QueryCompilerCaster |
python | pytransitions__transitions | transitions/extensions/locking.py | {
"start": 1977,
"end": 2734
} | class ____(Event):
"""An event type which uses the parent's machine context map when triggered."""
def trigger(self, model, *args, **kwargs):
"""Extends transitions.core.Event.trigger by using locks/machine contexts."""
# pylint: disable=protected-access
# noinspection PyProtectedMember
# LockedMachine._locked should not be called somewhere else. That's why it should not be exposed
# to Machine users.
if self.machine._ident.current != get_ident():
with nested(*self.machine.model_context_map[id(model)]):
return super(LockedEvent, self).trigger(model, *args, **kwargs)
else:
return super(LockedEvent, self).trigger(model, *args, **kwargs)
| LockedEvent |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/patch_a_dependency/package.py | {
"start": 217,
"end": 540
} | class ____(Package):
"""Package that requries a patched version of a dependency."""
homepage = "http://www.example.com"
url = "http://www.example.com/patch-a-dependency-1.0.tar.gz"
version("1.0", md5="0123456789abcdef0123456789abcdef")
depends_on("libelf", patches=patch("libelf.patch"))
| PatchADependency |
python | realpython__materials | python-built-in-functions/processors.py | {
"start": 25,
"end": 522
} | class ____:
def __init__(self, filename):
self.filename = filename
def read(self):
with open(self.filename, encoding="utf-8", newline="") as file:
return list(csv.DictReader(file))
def write(self, data):
with open(
self.filename, mode="w", encoding="utf-8", newline=""
) as file:
writer = csv.DictWriter(file, fieldnames=data[0].keys())
writer.writeheader()
writer.writerows(data)
| CSVProcessor |
python | requests__requests-oauthlib | tests/test_compliance_fixes.py | {
"start": 12731,
"end": 14298
} | class ____(TestCase):
value_to_test_for = "value_to_test_for"
def setUp(self):
mocker = requests_mock.Mocker()
mocker.post(
"https://example.com/token",
request_headers={"X-Client-Secret": self.value_to_test_for},
json={
"access_token": "this is the access token",
"expires_in": 7200,
"token_type": "Bearer",
},
headers={"Content-Type": "application/json"},
)
mocker.post(
"https://example.com/refresh",
request_headers={"X-Client-Secret": self.value_to_test_for},
json={
"access_token": "this is the access token",
"expires_in": 7200,
"token_type": "Bearer",
},
headers={"Content-Type": "application/json"},
)
mocker.start()
self.addCleanup(mocker.stop)
session = OAuth2Session()
self.fixed_session = access_and_refresh_token_request_compliance_fix_test(
session, self.value_to_test_for
)
def test_access_token(self):
token = self.fixed_session.fetch_token(
"https://example.com/token",
authorization_response="https://i.b/?code=hello",
)
assert token["token_type"] == "Bearer"
def test_refresh_token(self):
token = self.fixed_session.refresh_token(
"https://example.com/refresh",
)
assert token["token_type"] == "Bearer"
| RefreshTokenRequestComplianceFixTest |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-google-ads/source_google_ads/components.py | {
"start": 21004,
"end": 21651
} | class ____(GoogleAdsHttpRequester):
CURSOR_FIELD: str = "change_status.last_change_date_time"
def get_request_body_json(
self,
*,
stream_state: Optional[StreamState] = None,
stream_slice: Optional[StreamSlice] = None,
next_page_token: Optional[Mapping[str, Any]] = None,
) -> MutableMapping[str, Any]:
props = self.schema_loader.get_json_schema()[self.name]["properties"]
fields = [f for f in props.keys() if f not in (self.CURSOR_FIELD, "deleted_at")]
return {"query": f"SELECT {', '.join(fields)} FROM {self._parameters['resource_name']}"}
| CriterionFullRefreshRequester |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/instigators.py | {
"start": 139,
"end": 276
} | class ____(graphene.Union):
class Meta:
types = (GrapheneSchedule, GrapheneSensor)
name = "Instigator"
| GrapheneInstigator |
python | huggingface__transformers | src/transformers/models/doge/modeling_doge.py | {
"start": 32433,
"end": 36864
} | class ____(DogePreTrainedModel, GenerationMixin):
_tied_weights_keys = {"lm_head.weight": "model.embed_tokens.weight"}
_tp_plan = {"lm_head": "colwise_rep"}
_pp_plan = {"lm_head": (["hidden_states"], ["logits"])}
def __init__(self, config):
super().__init__(config)
self.model = DogeModel(config)
self.vocab_size = config.vocab_size
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.router_aux_loss_coef = config.router_aux_loss_coef
self.num_experts = config.num_experts
self.num_experts_per_tok = config.num_experts_per_tok
# Initialize weights and apply final processing
self.post_init()
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
output_router_logits: Optional[bool] = None,
**kwargs: Unpack[TransformersKwargs],
) -> MoeCausalLMOutputWithPast:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from transformers import AutoTokenizer, DogeForCausalLM
>>> model = DogeForCausalLM.from_pretrained("SmallDoge/Doge-320M")
>>> tokenizer = AutoTokenizer.from_pretrained("SmallDoge/Doge-320M")
>>> prompt = "Hey, are you conscious? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
```"""
output_router_logits = (
output_router_logits if output_router_logits is not None else self.config.output_router_logits
)
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
outputs: MoeModelOutputWithPast = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = outputs.last_hidden_state
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits, labels, self.vocab_size, **kwargs)
aux_loss = None
if output_router_logits:
aux_loss = load_balancing_loss_func(
outputs.router_logits,
self.num_experts,
math.floor(math.sqrt(self.num_experts)),
self.num_experts_per_tok,
attention_mask,
)
if labels is not None:
loss += self.router_aux_loss_coef * aux_loss.to(loss.device) # make sure to reside in the same device
return MoeCausalLMOutputWithPast(
loss=loss,
aux_loss=aux_loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
router_logits=outputs.router_logits,
)
| DogeForCausalLM |
python | pytorch__pytorch | torch/testing/_internal/distributed/_tensor/common_dtensor.py | {
"start": 25204,
"end": 32810
} | class ____(DTensorTestBase):
@property
def is_local_tensor_enabled(self) -> bool:
return True
def _handle_test_skip(self, msg: str) -> None:
self.skipTest(msg)
def _get_local_tensor_mode(self):
return LocalTensorMode(frozenset(range(self.world_size)))
def setUp(self) -> None:
super().setUp()
torch.autograd._enable_record_function(False)
def tearDown(self) -> None:
from torch.distributed.tensor import _random as random
random._rng_tracker = None
super().tearDown()
torch.autograd._enable_record_function(True)
@property
def rank(self):
return torch.SymInt(LocalIntNode({r: r for r in range(self.world_size)}))
@rank.setter
def rank(self, rank):
pass
def join_or_run(self, fn):
@wraps(fn)
def wrapper(self):
fn()
return types.MethodType(wrapper, self)
def build_device_mesh(self) -> DeviceMesh:
with maybe_disable_local_tensor_mode():
return super().build_device_mesh()
def init_pg(self, eager_init, backend: Optional[str] = None) -> None:
dist.init_process_group("fake", rank=0, world_size=self.world_size)
self._pg = dist.distributed_c10d._get_default_group()
def destroy_pg(self, device_id: Optional[int] = None) -> None:
dist.destroy_process_group(self._pg)
self._pg = None
def _spawn_processes(self) -> None:
pass
def run_test(self, test_name: str, parent_pipe) -> None:
getattr(self, test_name)()
def init_manual_seed_for_rank(self) -> None:
torch.manual_seed(0)
def make_wrapped(fn, ctxs):
@functools.wraps(fn)
def wrapped(self):
torch._dynamo.reset()
stack = contextlib.ExitStack()
for ctx in ctxs:
if callable(ctx):
stack.enter_context(ctx(self))
else:
stack.enter_context(ctx)
try:
out = fn(self)
finally:
stack.close()
return out
return wrapped
def create_local_tensor_test_class(orig_cls, skipped_tests=None):
if skipped_tests is None:
skipped_tests = []
dct = orig_cls.__dict__.copy()
for name in list(dct.keys()):
fn = dct[name]
if not callable(fn):
continue
elif name in skipped_tests:
dct[name] = lambda self: self.skipTest("Skipped test")
elif name.startswith("test_"):
ctxs = [
lambda test: test._get_local_tensor_mode(),
]
dct[name] = make_wrapped(fn, ctxs)
cls = type(
orig_cls.__name__ + "WithLocalTensor",
(LocalDTensorTestBase,) + orig_cls.__bases__,
dct,
)
cls.__file__ = __file__
return cls
@maybe_run_for_local_tensor
def map_local_tensor_for_rank(tensor, rank, func):
return func(tensor, rank)
@maybe_run_for_local_tensor
def map_local_for_rank(rank, func):
return func(rank)
def reduce_local_int(val, func):
return func(val.node._local_ints)
def _convert_shard_order_dict_to_ShardOrder(shard_order):
"""Convert shard_order dict to ShardOrder"""
return tuple(
ShardOrderEntry(tensor_dim=tensor_dim, mesh_dims=tuple(mesh_dims))
for tensor_dim, mesh_dims in shard_order.items()
)
# TODO(zpcore): remove once the native redistribute supports shard_order arg
def redistribute(
dtensor_input,
device_mesh,
placements,
shard_order,
use_graph_based_transform=True,
):
"""
wrapper function to support shard_order for redistribution
This is a simpler version of Redistribute, only considers the forward.
"""
if placements is None:
placements = shard_order_to_placement(shard_order, device_mesh)
placements = tuple(placements)
old_spec = dtensor_input._spec
new_spec = copy.deepcopy(old_spec)
new_spec.placements = placements
if shard_order is not None:
new_spec.shard_order = shard_order
else:
new_spec.shard_order = ()
if old_spec == new_spec:
return dtensor_input
dtensor_input = DTensor.from_local(
redistribute_local_tensor(
dtensor_input.to_local(),
old_spec,
new_spec,
use_graph_based_transform=use_graph_based_transform,
),
device_mesh,
)
dtensor_input._spec = copy.deepcopy(new_spec)
return dtensor_input # returns DTensor
# TODO(zpcore): remove once the native distribute_tensor supports
# shard_order arg
def patched_distribute_tensor(
input_tensor,
device_mesh,
placements,
shard_order,
use_graph_based_transform=True,
):
"""wrapper function to support shard_order for tensor distribution"""
if placements is None:
placements = shard_order_to_placement(shard_order, device_mesh)
placements = tuple(placements)
tensor_dt = distribute_tensor(input_tensor, device_mesh, placements)
# fix the shard order
return redistribute(
tensor_dt, device_mesh, placements, shard_order, use_graph_based_transform
)
# TODO(zpcore): remove once the native redistribute supports shard_order arg
def make_full_tensor(dtensor_input):
"""wrapper function to support DTensor.full_tensor"""
return redistribute(
dtensor_input, dtensor_input.device_mesh, placements=None, shard_order=()
).to_local()
def shard_order_to_placement(shard_order, mesh):
"""convert shard_order to placement with only Replicate() and Shard()"""
placements: list[Any] = [Replicate() for _ in range(mesh.ndim)]
if shard_order is not None:
for entry in shard_order:
tensor_dim = entry.tensor_dim
mesh_dims = entry.mesh_dims
for mesh_dim in mesh_dims:
placements[mesh_dim] = Shard(tensor_dim)
return tuple(placements)
def generate_shard_orders(mesh, tensor_rank):
# Generate all possible sharding placement of tensor with rank
# `tensor_rank` over mesh.
def _split_list(lst: list, N: int):
def compositions(n: int, k: int):
# yields lists of length k, positive ints summing to n
for cuts in itertools.combinations(range(1, n), k - 1):
# add 0 and n as sentinels, then take consecutive differences
yield [b - a for a, b in itertools.pairwise((0, *cuts, n))]
length = len(lst)
for comp in compositions(length, N):
result = []
start = 0
for size in comp:
result.append(lst[start : start + size])
start += size
yield result
all_mesh = list(range(mesh.ndim))
all_device_order = list(itertools.permutations(all_mesh))
for device_order in all_device_order:
# split on device orders, and assign each device order segment to a tensor dim
for num_split in range(1, mesh.ndim + 1):
for splitted_list in _split_list(list(range(mesh.ndim)), num_split):
for tensor_dims in itertools.combinations(
range(tensor_rank), len(splitted_list)
):
shard_order = {}
assert len(tensor_dims) == len(splitted_list)
for tensor_dim, mesh_dims in zip(tensor_dims, splitted_list):
shard_order[tensor_dim] = device_order[
mesh_dims[0] : mesh_dims[-1] + 1
]
yield _convert_shard_order_dict_to_ShardOrder(shard_order)
| LocalDTensorTestBase |
python | pytorch__pytorch | test/distributed/_composable/test_checkpoint.py | {
"start": 2429,
"end": 2848
} | class ____(nn.Module):
def __init__(self, device: torch.device):
super().__init__()
self.w = nn.Parameter(torch.randn((100, 100), device=device))
def forward(self, xs: tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
assert len(xs) == 2, f"Expects 2 args but got {len(xs)}"
x, y = xs
z = x + y
z = z @ self.w
return nn.functional.relu(z)
| MultiInputModel |
python | apache__airflow | task-sdk/src/airflow/sdk/definitions/taskgroup.py | {
"start": 24751,
"end": 26878
} | class ____(TaskGroup):
"""
A mapped task group.
This doesn't really do anything special, just holds some additional metadata
for expansion later.
Don't instantiate this class directly; call *expand* or *expand_kwargs* on
a ``@task_group`` function instead.
"""
_expand_input: DictOfListsExpandInput | ListOfDictsExpandInput = attrs.field(alias="expand_input")
def __iter__(self):
for child in self.children.values():
if getattr(child, "trigger_rule", None) == TriggerRule.ALWAYS:
raise ValueError(
"Task-generated mapping within a mapped task group is not "
"allowed with trigger rule 'always'"
)
yield from self._iter_child(child)
@methodtools.lru_cache(maxsize=None)
def get_parse_time_mapped_ti_count(self) -> int:
"""
Return the Number of instances a task in this group should be mapped to, when a Dag run is created.
This only considers literal mapped arguments, and would return *None*
when any non-literal values are used for mapping.
If this group is inside mapped task groups, all the nested counts are
multiplied and accounted.
:meta private:
:raise NotFullyPopulated: If any non-literal mapped arguments are encountered.
:return: The total number of mapped instances each task should have.
"""
return functools.reduce(
operator.mul,
(g._expand_input.get_parse_time_mapped_ti_count() for g in self.iter_mapped_task_groups()),
)
def __exit__(self, exc_type, exc_val, exc_tb):
for op, _ in self._expand_input.iter_references():
self.set_upstream(op)
super().__exit__(exc_type, exc_val, exc_tb)
def iter_mapped_dependencies(self) -> Iterator[Operator]:
"""Upstream dependencies that provide XComs used by this mapped task group."""
from airflow.sdk.definitions.xcom_arg import XComArg
for op, _ in XComArg.iter_xcom_references(self._expand_input):
yield op
| MappedTaskGroup |
python | Textualize__textual | src/textual/demo/page.py | {
"start": 318,
"end": 1238
} | class ____(ModalScreen):
DEFAULT_CSS = """
CodeScreen {
#code {
border: heavy $accent;
margin: 2 4;
scrollbar-gutter: stable;
Static {
width: auto;
}
}
}
"""
BINDINGS = [("escape", "dismiss", "Dismiss code")]
def __init__(self, title: str, code: str) -> None:
super().__init__()
self.code = code
self.title = title
def compose(self) -> ComposeResult:
with ScrollableContainer(id="code"):
yield Static(
Syntax(
self.code, lexer="python", indent_guides=True, line_numbers=True
),
expand=True,
)
def on_mount(self):
code_widget = self.query_one("#code")
code_widget.border_title = self.title
code_widget.border_subtitle = "Escape to close"
| CodeScreen |
python | jina-ai__jina | jina/logging/formatter.py | {
"start": 87,
"end": 673
} | class ____(Formatter):
"""Remove all control chars from the log and format it as plain text, also restrict the max-length of msg to 512."""
def format(self, record):
"""
Format the LogRecord by removing all control chars and plain text, and restrict the max-length of msg to 512.
:param record: A LogRecord object.
:return:: Formatted plain LogRecord.
"""
cr = copy(record)
if isinstance(cr.msg, str):
cr.msg = re.sub(r'\u001b\[.*?[@-~]', '', str(cr.msg))[:512]
return super().format(cr)
| PlainFormatter |
python | numba__numba | numba/core/typing/templates.py | {
"start": 49513,
"end": 49841
} | class ____(BaseRegistryLoader):
"""
An incremental loader for a typing registry.
"""
registry_items = ('functions', 'attributes', 'globals')
builtin_registry = Registry()
infer = builtin_registry.register
infer_getattr = builtin_registry.register_attr
infer_global = builtin_registry.register_global
| RegistryLoader |
python | doocs__leetcode | solution/3600-3699/3618.Split Array by Prime Indices/Solution.py | {
"start": 174,
"end": 316
} | class ____:
def splitArray(self, nums: List[int]) -> int:
return abs(sum(x if primes[i] else -x for i, x in enumerate(nums)))
| Solution |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/scheduler/instigation.py | {
"start": 20796,
"end": 32404
} | class ____(
NamedTuple(
"_TickData",
[
("instigator_origin_id", str),
("instigator_name", str),
("instigator_type", InstigatorType),
("status", TickStatus),
("timestamp", float), # Time the tick started
("run_ids", Sequence[str]),
("run_keys", Sequence[str]),
("error", Optional[SerializableErrorInfo]),
("skip_reason", Optional[str]),
("cursor", Optional[str]),
("origin_run_ids", Sequence[str]),
("failure_count", int),
("selector_id", Optional[str]),
("log_key", Optional[list[str]]),
(
"dynamic_partitions_request_results",
Sequence[DynamicPartitionsRequestResult],
),
("end_timestamp", Optional[float]), # Time the tick finished
("run_requests", Optional[Sequence[RunRequest]]), # run requests created by the tick
("auto_materialize_evaluation_id", Optional[int]),
("reserved_run_ids", Optional[Sequence[str]]),
("consecutive_failure_count", int),
(
"user_interrupted",
bool,
), # indicates if a user stopped the tick while submitting runs
],
)
):
"""This class defines the data that is serialized and stored for each schedule/sensor tick. We
depend on the storage implementation to provide tick ids, and therefore separate all other
data into this serializable class that can be stored independently of the id.
Args:
instigator_origin_id (str): The id of the instigator target for this tick
instigator_name (str): The name of the instigator for this tick
instigator_type (InstigatorType): The type of this instigator for this tick
status (TickStatus): The status of the tick, which can be updated
timestamp (float): The timestamp at which this instigator evaluation started
run_id (str): The run created by the tick.
run_keys (Sequence[str]): Unique user-specified identifiers for the runs created by this
instigator.
error (SerializableErrorInfo): The error caught during execution. This is set only when
the status is ``TickStatus.Failure``
skip_reason (str): message for why the tick was skipped
cursor (Optional[str]): Cursor output by this tick.
origin_run_ids (List[str]): The runs originated from the schedule/sensor.
failure_count (int): The number of times this particular tick has failed (to determine
whether the next tick should be a retry of that tick).
For example, for a schedule, this tracks the number of attempts we have made for a
particular scheduled execution time. The next tick will attempt to retry the most recent
tick if it failed and its failure count is less than the configured retry limit.
dynamic_partitions_request_results (Sequence[DynamicPartitionsRequestResult]): The results
of the dynamic partitions requests evaluated within the tick.
end_timestamp (Optional[float]) Time that this tick finished.
run_requests (Optional[Sequence[RunRequest]]) The RunRequests that were requested by this
tick. Currently only used by the AUTO_MATERIALIZE type.
auto_materialize_evaluation_id (Optinoal[int]) For AUTO_MATERIALIZE ticks, the evaluation ID
that can be used to index into the asset_daemon_asset_evaluations table.
reserved_run_ids (Optional[Sequence[str]]): A list of run IDs to use for each of the
run_requests. Used to ensure that if the tick fails partway through, we don't create
any duplicate runs for the tick. Currently only used by AUTO_MATERIALIZE ticks.
consecutive_failure_count (Optional[int]): The number of times this instigator has failed
consecutively. Differs from failure_count in that it spans multiple executions, whereas
failure_count measures the number of times that a particular tick should retry. For
example, if a daily schedule fails on 3 consecutive days, failure_count tracks the
number of failures for each day, and consecutive_failure_count tracks the total
number of consecutive failures across all days.
"""
def __new__(
cls,
instigator_origin_id: str,
instigator_name: str,
instigator_type: InstigatorType,
status: TickStatus,
timestamp: float,
run_ids: Optional[Sequence[str]] = None,
run_keys: Optional[Sequence[str]] = None,
error: Optional[SerializableErrorInfo] = None,
skip_reason: Optional[str] = None,
cursor: Optional[str] = None,
origin_run_ids: Optional[Sequence[str]] = None,
failure_count: Optional[int] = None,
selector_id: Optional[str] = None,
log_key: Optional[list[str]] = None,
dynamic_partitions_request_results: Optional[
Sequence[DynamicPartitionsRequestResult]
] = None,
end_timestamp: Optional[float] = None,
run_requests: Optional[Sequence[RunRequest]] = None,
auto_materialize_evaluation_id: Optional[int] = None,
reserved_run_ids: Optional[Sequence[str]] = None,
consecutive_failure_count: Optional[int] = None,
user_interrupted: bool = False,
):
_validate_tick_args(instigator_type, status, run_ids, error, skip_reason)
check.opt_list_param(log_key, "log_key", of_type=str)
return super().__new__(
cls,
check.str_param(instigator_origin_id, "instigator_origin_id"),
check.str_param(instigator_name, "instigator_name"),
check.inst_param(instigator_type, "instigator_type", InstigatorType),
check.inst_param(status, "status", TickStatus),
check.float_param(timestamp, "timestamp"),
check.opt_sequence_param(run_ids, "run_ids", of_type=str),
check.opt_sequence_param(run_keys, "run_keys", of_type=str),
error, # validated in _validate_tick_args
skip_reason, # validated in _validate_tick_args
cursor=check.opt_str_param(cursor, "cursor"),
origin_run_ids=check.opt_sequence_param(origin_run_ids, "origin_run_ids", of_type=str),
failure_count=check.opt_int_param(failure_count, "failure_count", 0),
selector_id=check.opt_str_param(selector_id, "selector_id"),
log_key=log_key,
dynamic_partitions_request_results=check.opt_sequence_param(
dynamic_partitions_request_results,
"dynamic_partitions_request_results",
of_type=DynamicPartitionsRequestResult,
),
end_timestamp=end_timestamp,
run_requests=check.opt_sequence_param(run_requests, "run_requests"),
auto_materialize_evaluation_id=auto_materialize_evaluation_id,
reserved_run_ids=check.opt_sequence_param(reserved_run_ids, "reserved_run_ids"),
consecutive_failure_count=check.opt_int_param(
consecutive_failure_count, "consecutive_failure_count", 0
),
user_interrupted=user_interrupted,
)
def with_status(
self,
status: TickStatus,
**kwargs,
) -> "TickData":
return TickData(
**merge_dicts(
self._asdict(),
{
"status": status,
},
kwargs,
)
)
def with_run_info(
self, run_id: Optional[str] = None, run_key: Optional[str] = None
) -> "TickData":
check.opt_str_param(run_id, "run_id")
check.opt_str_param(run_key, "run_key")
return TickData(
**merge_dicts(
self._asdict(),
{
"run_ids": (
[*self.run_ids, run_id]
if (run_id and run_id not in self.run_ids)
else self.run_ids
),
"run_keys": (
[*self.run_keys, run_key]
if (run_key and run_key not in self.run_keys)
else self.run_keys
),
},
)
)
def with_run_requests(
self,
run_requests: Sequence[RunRequest],
reserved_run_ids: Optional[Sequence[str]] = None,
cursor: Optional[str] = None,
) -> "TickData":
return TickData(
**merge_dicts(
self._asdict(),
{
"run_requests": run_requests,
"reserved_run_ids": reserved_run_ids,
"cursor": cursor,
},
)
)
def with_reason(self, skip_reason: Optional[str]) -> "TickData":
return TickData(
**merge_dicts(
self._asdict(), {"skip_reason": check.opt_str_param(skip_reason, "skip_reason")}
)
)
def with_cursor(self, cursor: Optional[str]) -> "TickData":
return TickData(
**merge_dicts(self._asdict(), {"cursor": check.opt_str_param(cursor, "cursor")})
)
def with_origin_run(self, origin_run_id: str) -> "TickData":
check.str_param(origin_run_id, "origin_run_id")
return TickData(
**merge_dicts(
self._asdict(),
{"origin_run_ids": [*self.origin_run_ids, origin_run_id]},
)
)
def with_log_key(self, log_key: Sequence[str]) -> "TickData":
return TickData(
**merge_dicts(
self._asdict(),
{"log_key": check.list_param(log_key, "log_key", of_type=str)},
)
)
def with_dynamic_partitions_request_result(
self, dynamic_partitions_request_result: DynamicPartitionsRequestResult
):
return TickData(
**merge_dicts(
self._asdict(),
{
"dynamic_partitions_request_results": [
*self.dynamic_partitions_request_results,
dynamic_partitions_request_result,
]
},
)
)
def with_user_interrupted(self, user_interrupted: bool):
return TickData(
**merge_dicts(
self._asdict(),
{"user_interrupted": user_interrupted},
)
)
def _validate_tick_args(
instigator_type: InstigatorType,
status: TickStatus,
run_ids: Optional[Sequence[str]] = None,
error: Optional[SerializableErrorInfo] = None,
skip_reason: Optional[str] = None,
) -> None:
check.inst_param(instigator_type, "instigator_type", InstigatorType)
check.inst_param(status, "status", TickStatus)
if status == TickStatus.SUCCESS:
check.list_param(run_ids, "run_ids", of_type=str)
check.invariant(error is None, desc="Tick status is SUCCESS, but error was provided")
elif status == TickStatus.FAILURE:
check.inst_param(error, "error", SerializableErrorInfo)
else:
check.invariant(error is None, "Tick status was not FAILURE but error was provided")
if skip_reason:
check.invariant(
status == TickStatus.SKIPPED,
"Tick status was not SKIPPED but skip_reason was provided",
)
@dataclass
| TickData |
python | huggingface__transformers | src/transformers/models/ovis2/modular_ovis2.py | {
"start": 2832,
"end": 2893
} | class ____(Aimv2EncoderLayer):
pass
| Ovis2VisionEncoderLayer |
python | pydantic__pydantic | tests/mypy/modules/plugin_success.py | {
"start": 3122,
"end": 3315
} | class ____(FrozenModel):
a: int = 1
model_config = ConfigDict(frozen=False, from_attributes=True)
NotFrozenModel(x=1).x = 2
NotFrozenModel.model_validate(model.__dict__)
| NotFrozenModel |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/sources.py | {
"start": 5462,
"end": 7487
} | class ____(GeneratedAirbyteSource):
class OAuth20:
@public
def __init__(
self,
client_id: str,
client_secret: str,
refresh_token: str,
auth_method: Optional[str] = None,
):
self.auth_method = check.opt_str_param(auth_method, "auth_method")
self.client_id = check.str_param(client_id, "client_id")
self.client_secret = check.str_param(client_secret, "client_secret")
self.refresh_token = check.str_param(refresh_token, "refresh_token")
class AccessToken:
@public
def __init__(self, access_token: str, auth_method: Optional[str] = None):
self.auth_method = check.opt_str_param(auth_method, "auth_method")
self.access_token = check.str_param(access_token, "access_token")
@public
def __init__(
self,
name: str,
credentials: Union["LinkedinAdsSource.OAuth20", "LinkedinAdsSource.AccessToken"],
start_date: str,
account_ids: Optional[list[int]] = None,
):
"""Airbyte Source for Linkedin Ads.
Documentation can be found at https://docs.airbyte.com/integrations/sources/linkedin-ads
Args:
name (str): The name of the destination.
start_date (str): UTC date in the format 2020-09-17. Any data before this date will not be replicated.
account_ids (Optional[List[int]]): Specify the account IDs separated by a space, to pull the data from. Leave empty, if you want to pull the data from all associated accounts. See the LinkedIn Ads docs for more info.
"""
self.credentials = check.inst_param(
credentials, "credentials", (LinkedinAdsSource.OAuth20, LinkedinAdsSource.AccessToken)
)
self.start_date = check.str_param(start_date, "start_date")
self.account_ids = check.opt_nullable_list_param(account_ids, "account_ids", int)
super().__init__("Linkedin Ads", name)
| LinkedinAdsSource |
python | openai__openai-python | src/openai/types/realtime/realtime_server_event.py | {
"start": 4465,
"end": 6578
} | class ____(BaseModel):
event_id: str
"""The unique ID of the server event."""
response_id: str
"""The unique ID of the response that produced the audio."""
type: Literal["output_audio_buffer.cleared"]
"""The event type, must be `output_audio_buffer.cleared`."""
RealtimeServerEvent: TypeAlias = Annotated[
Union[
ConversationCreatedEvent,
ConversationItemCreatedEvent,
ConversationItemDeletedEvent,
ConversationItemInputAudioTranscriptionCompletedEvent,
ConversationItemInputAudioTranscriptionDeltaEvent,
ConversationItemInputAudioTranscriptionFailedEvent,
ConversationItemRetrieved,
ConversationItemTruncatedEvent,
RealtimeErrorEvent,
InputAudioBufferClearedEvent,
InputAudioBufferCommittedEvent,
InputAudioBufferSpeechStartedEvent,
InputAudioBufferSpeechStoppedEvent,
RateLimitsUpdatedEvent,
ResponseAudioDeltaEvent,
ResponseAudioDoneEvent,
ResponseAudioTranscriptDeltaEvent,
ResponseAudioTranscriptDoneEvent,
ResponseContentPartAddedEvent,
ResponseContentPartDoneEvent,
ResponseCreatedEvent,
ResponseDoneEvent,
ResponseFunctionCallArgumentsDeltaEvent,
ResponseFunctionCallArgumentsDoneEvent,
ResponseOutputItemAddedEvent,
ResponseOutputItemDoneEvent,
ResponseTextDeltaEvent,
ResponseTextDoneEvent,
SessionCreatedEvent,
SessionUpdatedEvent,
OutputAudioBufferStarted,
OutputAudioBufferStopped,
OutputAudioBufferCleared,
ConversationItemAdded,
ConversationItemDone,
InputAudioBufferTimeoutTriggered,
ConversationItemInputAudioTranscriptionSegment,
McpListToolsInProgress,
McpListToolsCompleted,
McpListToolsFailed,
ResponseMcpCallArgumentsDelta,
ResponseMcpCallArgumentsDone,
ResponseMcpCallInProgress,
ResponseMcpCallCompleted,
ResponseMcpCallFailed,
],
PropertyInfo(discriminator="type"),
]
| OutputAudioBufferCleared |
python | openai__gym | gym/wrappers/atari_preprocessing.py | {
"start": 215,
"end": 7860
} | class ____(gym.Wrapper):
"""Atari 2600 preprocessing wrapper.
This class follows the guidelines in Machado et al. (2018),
"Revisiting the Arcade Learning Environment: Evaluation Protocols and Open Problems for General Agents".
Specifically, the following preprocess stages applies to the atari environment:
- Noop Reset: Obtains the initial state by taking a random number of no-ops on reset, default max 30 no-ops.
- Frame skipping: The number of frames skipped between steps, 4 by default
- Max-pooling: Pools over the most recent two observations from the frame skips
- Termination signal when a life is lost: When the agent losses a life during the environment, then the environment is terminated.
Turned off by default. Not recommended by Machado et al. (2018).
- Resize to a square image: Resizes the atari environment original observation shape from 210x180 to 84x84 by default
- Grayscale observation: If the observation is colour or greyscale, by default, greyscale.
- Scale observation: If to scale the observation between [0, 1) or [0, 255), by default, not scaled.
"""
def __init__(
self,
env: gym.Env,
noop_max: int = 30,
frame_skip: int = 4,
screen_size: int = 84,
terminal_on_life_loss: bool = False,
grayscale_obs: bool = True,
grayscale_newaxis: bool = False,
scale_obs: bool = False,
):
"""Wrapper for Atari 2600 preprocessing.
Args:
env (Env): The environment to apply the preprocessing
noop_max (int): For No-op reset, the max number no-ops actions are taken at reset, to turn off, set to 0.
frame_skip (int): The number of frames between new observation the agents observations effecting the frequency at which the agent experiences the game.
screen_size (int): resize Atari frame
terminal_on_life_loss (bool): `if True`, then :meth:`step()` returns `terminated=True` whenever a
life is lost.
grayscale_obs (bool): if True, then gray scale observation is returned, otherwise, RGB observation
is returned.
grayscale_newaxis (bool): `if True and grayscale_obs=True`, then a channel axis is added to
grayscale observations to make them 3-dimensional.
scale_obs (bool): if True, then observation normalized in range [0,1) is returned. It also limits memory
optimization benefits of FrameStack Wrapper.
Raises:
DependencyNotInstalled: opencv-python package not installed
ValueError: Disable frame-skipping in the original env
"""
super().__init__(env)
if cv2 is None:
raise gym.error.DependencyNotInstalled(
"opencv-python package not installed, run `pip install gym[other]` to get dependencies for atari"
)
assert frame_skip > 0
assert screen_size > 0
assert noop_max >= 0
if frame_skip > 1:
if (
"NoFrameskip" not in env.spec.id
and getattr(env.unwrapped, "_frameskip", None) != 1
):
raise ValueError(
"Disable frame-skipping in the original env. Otherwise, more than one "
"frame-skip will happen as through this wrapper"
)
self.noop_max = noop_max
assert env.unwrapped.get_action_meanings()[0] == "NOOP"
self.frame_skip = frame_skip
self.screen_size = screen_size
self.terminal_on_life_loss = terminal_on_life_loss
self.grayscale_obs = grayscale_obs
self.grayscale_newaxis = grayscale_newaxis
self.scale_obs = scale_obs
# buffer of most recent two observations for max pooling
assert isinstance(env.observation_space, Box)
if grayscale_obs:
self.obs_buffer = [
np.empty(env.observation_space.shape[:2], dtype=np.uint8),
np.empty(env.observation_space.shape[:2], dtype=np.uint8),
]
else:
self.obs_buffer = [
np.empty(env.observation_space.shape, dtype=np.uint8),
np.empty(env.observation_space.shape, dtype=np.uint8),
]
self.lives = 0
self.game_over = False
_low, _high, _obs_dtype = (
(0, 255, np.uint8) if not scale_obs else (0, 1, np.float32)
)
_shape = (screen_size, screen_size, 1 if grayscale_obs else 3)
if grayscale_obs and not grayscale_newaxis:
_shape = _shape[:-1] # Remove channel axis
self.observation_space = Box(
low=_low, high=_high, shape=_shape, dtype=_obs_dtype
)
@property
def ale(self):
"""Make ale as a class property to avoid serialization error."""
return self.env.unwrapped.ale
def step(self, action):
"""Applies the preprocessing for an :meth:`env.step`."""
total_reward, terminated, truncated, info = 0.0, False, False, {}
for t in range(self.frame_skip):
_, reward, terminated, truncated, info = self.env.step(action)
total_reward += reward
self.game_over = terminated
if self.terminal_on_life_loss:
new_lives = self.ale.lives()
terminated = terminated or new_lives < self.lives
self.game_over = terminated
self.lives = new_lives
if terminated or truncated:
break
if t == self.frame_skip - 2:
if self.grayscale_obs:
self.ale.getScreenGrayscale(self.obs_buffer[1])
else:
self.ale.getScreenRGB(self.obs_buffer[1])
elif t == self.frame_skip - 1:
if self.grayscale_obs:
self.ale.getScreenGrayscale(self.obs_buffer[0])
else:
self.ale.getScreenRGB(self.obs_buffer[0])
return self._get_obs(), total_reward, terminated, truncated, info
def reset(self, **kwargs):
"""Resets the environment using preprocessing."""
# NoopReset
_, reset_info = self.env.reset(**kwargs)
noops = (
self.env.unwrapped.np_random.integers(1, self.noop_max + 1)
if self.noop_max > 0
else 0
)
for _ in range(noops):
_, _, terminated, truncated, step_info = self.env.step(0)
reset_info.update(step_info)
if terminated or truncated:
_, reset_info = self.env.reset(**kwargs)
self.lives = self.ale.lives()
if self.grayscale_obs:
self.ale.getScreenGrayscale(self.obs_buffer[0])
else:
self.ale.getScreenRGB(self.obs_buffer[0])
self.obs_buffer[1].fill(0)
return self._get_obs(), reset_info
def _get_obs(self):
if self.frame_skip > 1: # more efficient in-place pooling
np.maximum(self.obs_buffer[0], self.obs_buffer[1], out=self.obs_buffer[0])
assert cv2 is not None
obs = cv2.resize(
self.obs_buffer[0],
(self.screen_size, self.screen_size),
interpolation=cv2.INTER_AREA,
)
if self.scale_obs:
obs = np.asarray(obs, dtype=np.float32) / 255.0
else:
obs = np.asarray(obs, dtype=np.uint8)
if self.grayscale_obs and self.grayscale_newaxis:
obs = np.expand_dims(obs, axis=-1) # Add a channel axis
return obs
| AtariPreprocessing |
python | EpistasisLab__tpot | tpot/builtin_modules/arithmetictransformer.py | {
"start": 5415,
"end": 6054
} | class ____(TransformerMixin, BaseEstimator):
def __init__(self):
"""
A transformer that adds all elements along axis 1.
"""
pass
def fit(self, X, y=None):
return self
def transform(self, X):
transformed_X = np.array(self.transform_helper(np.array(X)))
if transformed_X.dtype != float:
transformed_X = transformed_X.astype(float)
return transformed_X
def transform_helper(self, X):
X = np.array(X)
if len(X.shape) == 1:
X = np.expand_dims(X,0)
return np.expand_dims(np.sum(X,1),1)
| AddTransformer |
python | run-llama__llama_index | llama-index-integrations/embeddings/llama-index-embeddings-bedrock/tests/test_bedrock.py | {
"start": 389,
"end": 12263
} | class ____(TestCase):
bedrock_client = boto3.client("bedrock-runtime", region_name="us-east-1")
exp_query = "foo bar baz"
exp_titan_response = {"embedding": exp_embed}
def test_get_text_embedding_titan_v1(self) -> None:
bedrock_stubber = Stubber(self.bedrock_client)
mock_stream = BytesIO(json.dumps(self.exp_titan_response).encode())
bedrock_stubber.add_response(
"invoke_model",
{
"contentType": "application/json",
"body": StreamingBody(
mock_stream, len(json.dumps(self.exp_titan_response))
),
},
expected_params={
"accept": "application/json",
"body": f'{{"inputText": "{self.exp_query}"}}',
"contentType": "application/json",
"modelId": Models.TITAN_EMBEDDING.value,
},
)
bedrock_embedding = BedrockEmbedding(
model_name=Models.TITAN_EMBEDDING,
client=self.bedrock_client,
)
assert bedrock_embedding.model_name == Models.TITAN_EMBEDDING
bedrock_stubber.activate()
embedding = bedrock_embedding.get_text_embedding(text=self.exp_query)
bedrock_stubber.deactivate()
bedrock_stubber.assert_no_pending_responses()
self.assertEqual(embedding, self.exp_titan_response["embedding"])
def test_get_text_embedding_titan_v1_bad_params(self) -> None:
bedrock_stubber = Stubber(self.bedrock_client)
bedrock_embedding_dim = BedrockEmbedding(
model_name=Models.TITAN_EMBEDDING,
client=self.bedrock_client,
additional_kwargs={"dimensions": 512},
)
bedrock_embedding_norm = BedrockEmbedding(
model_name=Models.TITAN_EMBEDDING,
client=self.bedrock_client,
additional_kwargs={"normalize": False},
)
bedrock_stubber.activate()
for embedder in [bedrock_embedding_dim, bedrock_embedding_norm]:
with pytest.raises(ValueError):
embedder.get_text_embedding(text=self.exp_query)
bedrock_stubber.deactivate()
bedrock_stubber.assert_no_pending_responses()
def test_get_text_embedding_titan_v2(self) -> None:
bedrock_stubber = Stubber(self.bedrock_client)
exp_body_request_param = json.dumps(
{"inputText": self.exp_query, "dimensions": 512, "normalize": True}
)
mock_stream = BytesIO(json.dumps(self.exp_titan_response).encode())
bedrock_stubber.add_response(
"invoke_model",
{
"contentType": "application/json",
"body": StreamingBody(
mock_stream, len(json.dumps(self.exp_titan_response))
),
},
expected_params={
"accept": "application/json",
"body": exp_body_request_param,
"contentType": "application/json",
"modelId": Models.TITAN_EMBEDDING_V2_0.value,
},
)
bedrock_embedding = BedrockEmbedding(
model_name=Models.TITAN_EMBEDDING_V2_0,
client=self.bedrock_client,
additional_kwargs={"dimensions": 512, "normalize": True},
)
assert bedrock_embedding.model_name == Models.TITAN_EMBEDDING_V2_0
bedrock_stubber.activate()
embedding = bedrock_embedding.get_text_embedding(text=self.exp_query)
bedrock_stubber.deactivate()
bedrock_stubber.assert_no_pending_responses()
self.assertEqual(embedding, self.exp_titan_response["embedding"])
def test_get_text_embedding_cohere(self) -> None:
bedrock_stubber = Stubber(self.bedrock_client)
mock_response = {"embeddings": [exp_embed]}
mock_stream = BytesIO(json.dumps(mock_response).encode())
bedrock_stubber.add_response(
"invoke_model",
{
"contentType": "application/json",
"body": StreamingBody(mock_stream, len(json.dumps(mock_response))),
},
)
bedrock_embedding = BedrockEmbedding(
model_name=Models.COHERE_EMBED_ENGLISH_V3,
client=self.bedrock_client,
)
bedrock_stubber.activate()
embedding = bedrock_embedding.get_text_embedding(text=self.exp_query)
bedrock_stubber.deactivate()
bedrock_stubber.assert_no_pending_responses()
self.assertEqual(embedding, mock_response["embeddings"][0])
def test_get_text_embedding_batch_cohere(self) -> None:
bedrock_stubber = Stubber(self.bedrock_client)
mock_response = {"embeddings": [exp_embed, exp_embed]}
mock_request = [self.exp_query, self.exp_query]
mock_stream = BytesIO(json.dumps(mock_response).encode())
bedrock_stubber.add_response(
"invoke_model",
{
"contentType": "application/json",
"body": StreamingBody(mock_stream, len(json.dumps(mock_response))),
},
)
bedrock_embedding = BedrockEmbedding(
model_name=Models.COHERE_EMBED_ENGLISH_V3,
client=self.bedrock_client,
)
bedrock_stubber.activate()
embedding = bedrock_embedding.get_text_embedding_batch(texts=mock_request)
bedrock_stubber.deactivate()
self.assertEqual(len(embedding), 2)
for i in range(2):
self.assertEqual(embedding[i], mock_response["embeddings"][i])
def test_list_supported_models(self):
exp_dict = {
"amazon": [
"amazon.titan-embed-text-v1",
"amazon.titan-embed-text-v2:0",
"amazon.titan-embed-g1-text-02",
],
"cohere": [
"cohere.embed-english-v3",
"cohere.embed-multilingual-v3",
"cohere.embed-v4:0",
],
}
bedrock_embedding = BedrockEmbedding(
model_name=Models.COHERE_EMBED_ENGLISH_V3,
client=self.bedrock_client,
)
assert bedrock_embedding.list_supported_models() == exp_dict
def test_optional_args_in_json_schema(self) -> None:
json_schema = BedrockEmbedding.model_json_schema()
assert "botocore_session" in json_schema["properties"]
assert json_schema["properties"]["botocore_session"].get("default") is None
assert "botocore_session" not in json_schema.get("required", [])
def test_get_text_embedding_cohere_v4_nested_format(self) -> None:
bedrock_stubber = Stubber(self.bedrock_client)
mock_response = {"embeddings": {"float": [exp_embed]}}
mock_stream = BytesIO(json.dumps(mock_response).encode())
bedrock_stubber.add_response(
"invoke_model",
{
"contentType": "application/json",
"body": StreamingBody(mock_stream, len(json.dumps(mock_response))),
},
)
bedrock_embedding = BedrockEmbedding(
model_name=Models.COHERE_EMBED_ENGLISH_V3,
client=self.bedrock_client,
)
bedrock_stubber.activate()
embedding = bedrock_embedding.get_text_embedding(text=self.exp_query)
bedrock_stubber.deactivate()
bedrock_stubber.assert_no_pending_responses()
self.assertEqual(embedding, exp_embed)
def test_get_text_embedding_cohere_v4_direct_float_format(self) -> None:
bedrock_stubber = Stubber(self.bedrock_client)
mock_response = {"float": [exp_embed]}
mock_stream = BytesIO(json.dumps(mock_response).encode())
bedrock_stubber.add_response(
"invoke_model",
{
"contentType": "application/json",
"body": StreamingBody(mock_stream, len(json.dumps(mock_response))),
},
)
bedrock_embedding = BedrockEmbedding(
model_name=Models.COHERE_EMBED_ENGLISH_V3,
client=self.bedrock_client,
)
bedrock_stubber.activate()
embedding = bedrock_embedding.get_text_embedding(text=self.exp_query)
bedrock_stubber.deactivate()
bedrock_stubber.assert_no_pending_responses()
self.assertEqual(embedding, exp_embed)
def test_get_text_embedding_batch_cohere_v4_format(self) -> None:
bedrock_stubber = Stubber(self.bedrock_client)
mock_response = {"embeddings": {"float": [exp_embed, exp_embed]}}
mock_request = [self.exp_query, self.exp_query]
mock_stream = BytesIO(json.dumps(mock_response).encode())
bedrock_stubber.add_response(
"invoke_model",
{
"contentType": "application/json",
"body": StreamingBody(mock_stream, len(json.dumps(mock_response))),
},
)
bedrock_embedding = BedrockEmbedding(
model_name=Models.COHERE_EMBED_ENGLISH_V3,
client=self.bedrock_client,
)
bedrock_stubber.activate()
embedding = bedrock_embedding.get_text_embedding_batch(texts=mock_request)
bedrock_stubber.deactivate()
self.assertEqual(len(embedding), 2)
for i in range(2):
self.assertEqual(embedding[i], exp_embed)
def test_get_text_embedding_cohere_unexpected_format(self) -> None:
bedrock_stubber = Stubber(self.bedrock_client)
mock_response = {"unexpected_key": "unexpected_value"}
mock_stream = BytesIO(json.dumps(mock_response).encode())
bedrock_stubber.add_response(
"invoke_model",
{
"contentType": "application/json",
"body": StreamingBody(mock_stream, len(json.dumps(mock_response))),
},
)
bedrock_embedding = BedrockEmbedding(
model_name=Models.COHERE_EMBED_ENGLISH_V3,
client=self.bedrock_client,
)
bedrock_stubber.activate()
with pytest.raises(
ValueError, match="Unexpected Cohere embedding response format"
):
bedrock_embedding.get_text_embedding(text=self.exp_query)
bedrock_stubber.deactivate()
bedrock_stubber.assert_no_pending_responses()
def test_application_inference_profile_in_invoke_model_request(self) -> None:
bedrock_stubber = Stubber(self.bedrock_client)
model_name = Models.TITAN_EMBEDDING_V2_0
application_inference_profile_arn = "arn:aws:bedrock:us-east-1:012345678901:application-inference-profile/testProfileId"
mock_stream = BytesIO(json.dumps(self.exp_titan_response).encode())
bedrock_stubber.add_response(
"invoke_model",
{
"contentType": "application/json",
"body": StreamingBody(
mock_stream, len(json.dumps(self.exp_titan_response))
),
},
expected_params={
"accept": "application/json",
"body": BOTOCORE_ANY,
"contentType": "application/json",
"modelId": application_inference_profile_arn,
},
)
bedrock_embedding = BedrockEmbedding(
model_name=model_name,
application_inference_profile_arn=application_inference_profile_arn,
client=self.bedrock_client,
)
assert bedrock_embedding.model_name == model_name
assert (
bedrock_embedding.application_inference_profile_arn
== application_inference_profile_arn
)
bedrock_stubber.activate()
bedrock_embedding.get_text_embedding(text=self.exp_query)
bedrock_stubber.deactivate()
bedrock_stubber.assert_no_pending_responses()
| TestBedrockEmbedding |
python | mwaskom__seaborn | tests/_marks/test_line.py | {
"start": 8282,
"end": 9105
} | class ____:
def test_xy_data(self):
x = [1, 5, 3, np.nan, 2]
y = [1, 4, 2, 5, 3]
g = [1, 2, 1, 1, 2]
p = Plot(x=x, y=y, group=g).add(Lines()).plot()
lines, = p._figure.axes[0].collections
verts = lines.get_paths()[0].vertices.T
assert_array_equal(verts[0], [1, 3])
assert_array_equal(verts[1], [1, 2])
verts = lines.get_paths()[1].vertices.T
assert_array_equal(verts[0], [2, 5])
assert_array_equal(verts[1], [3, 4])
def test_single_orient_value(self):
x = [1, 1, 1]
y = [1, 2, 3]
p = Plot(x, y).add(Lines()).plot()
lines, = p._figure.axes[0].collections
verts = lines.get_paths()[0].vertices.T
assert_array_equal(verts[0], x)
assert_array_equal(verts[1], y)
| TestLines |
python | run-llama__llama_index | llama-index-integrations/indices/llama-index-indices-managed-dashscope/llama_index/indices/managed/dashscope/base.py | {
"start": 1329,
"end": 9566
} | class ____(BaseManagedIndex):
"""DashScope Cloud Platform Index."""
def __init__(
self,
name: str,
nodes: Optional[List[BaseNode]] = None,
transformations: Optional[List[TransformComponent]] = None,
timeout: int = 60,
workspace_id: Optional[str] = None,
api_key: Optional[str] = None,
base_url: Optional[str] = DASHSCOPE_DEFAULT_BASE_URL,
show_progress: bool = False,
callback_manager: Optional[CallbackManager] = None,
**kwargs: Any,
) -> None:
"""Initialize the Platform Index."""
self.name = name
self.transformations = transformations or []
if nodes is not None:
raise ValueError(
"DashScopeCloudIndex does not support nodes on initialization"
)
self.workspace_id = workspace_id or os.environ.get("DASHSCOPE_WORKSPACE_ID")
self._api_key = api_key or os.environ.get("DASHSCOPE_API_KEY")
self._base_url = os.environ.get("DASHSCOPE_BASE_URL", None) or base_url
self._headers = {
"Content-Type": "application/json",
"Accept-Encoding": "utf-8",
"X-DashScope-WorkSpace": self.workspace_id,
"Authorization": "Bearer " + self._api_key,
"X-DashScope-OpenAPISource": "CloudSDK",
}
self._timeout = timeout
self._show_progress = show_progress
self._service_context = None
self._callback_manager = callback_manager or Settings.callback_manager
@classmethod
def from_documents( # type: ignore
cls: Type["DashScopeCloudIndex"],
documents: List[Document],
name: str,
transformations: Optional[List[TransformComponent]] = None,
workspace_id: Optional[str] = None,
api_key: Optional[str] = None,
base_url: Optional[str] = None,
timeout: int = 60,
verbose: bool = True,
**kwargs: Any,
) -> "DashScopeCloudIndex":
"""Build a DashScope index from a sequence of documents."""
pipeline_create = get_pipeline_create(
name, transformations or default_transformations(), documents
)
workspace_id = workspace_id or os.environ.get("DASHSCOPE_WORKSPACE_ID")
api_key = api_key or os.environ.get("DASHSCOPE_API_KEY")
base_url = (
base_url
or os.environ.get("DASHSCOPE_BASE_URL", None)
or DASHSCOPE_DEFAULT_BASE_URL
)
headers = {
"Content-Type": "application/json",
"Accept-Encoding": "utf-8",
"X-DashScope-WorkSpace": workspace_id,
"Authorization": "Bearer " + api_key,
"X-DashScope-OpenAPISource": "CloudSDK",
}
response = requests.put(
base_url + UPSERT_PIPELINE_ENDPOINT,
data=json.dumps(pipeline_create),
headers=headers,
)
response_text = response.json()
pipeline_id = response_text.get("id", None)
if response_text.get("code", "") != Status.SUCCESS.value or pipeline_id is None:
raise ValueError(
f"Failed to create index: {response_text.get('message', '')}\n{response_text}"
)
if verbose:
print(f"Starting creating index {name}, pipeline_id: {pipeline_id}")
response = requests.post(
base_url + START_PIPELINE_ENDPOINT.format(pipeline_id=pipeline_id),
headers=headers,
)
response_text = response.json()
ingestion_id = response_text.get("ingestionId", None)
if (
response_text.get("code", "") != Status.SUCCESS.value
or ingestion_id is None
):
raise ValueError(
f"Failed to start ingestion: {response_text.get('message', '')}\n{response_text}"
)
if verbose:
print(f"Starting ingestion for index {name}, ingestion_id: {ingestion_id}")
ingestion_status, failed_docs = run_ingestion(
base_url
+ CHECK_INGESTION_ENDPOINT.format(
pipeline_id=pipeline_id, ingestion_id=ingestion_id
),
headers,
verbose,
)
if verbose:
print(f"ingestion_status {ingestion_status}")
print(f"failed_docs: {failed_docs}")
if ingestion_status == "FAILED":
print("Index {name} created failed!")
return None
if verbose:
print(f"Index {name} created successfully!")
return cls(
name,
transformations=transformations,
workspace_id=workspace_id,
api_key=api_key,
base_url=base_url,
timeout=timeout,
**kwargs,
)
def as_retriever(self, **kwargs: Any) -> BaseRetriever:
"""Return a Retriever for this managed index."""
from llama_index.indices.managed.dashscope.retriever import (
DashScopeCloudRetriever,
)
return DashScopeCloudRetriever(
self.name,
**kwargs,
)
def as_query_engine(self, **kwargs: Any) -> BaseQueryEngine:
from llama_index.core.query_engine.retriever_query_engine import (
RetrieverQueryEngine,
)
kwargs["retriever"] = self.as_retriever(**kwargs)
return RetrieverQueryEngine.from_args(**kwargs)
def _insert(
self,
documents: List[Document],
transformations: Optional[List[TransformComponent]] = None,
verbose: bool = True,
**insert_kwargs: Any,
) -> None:
"""Insert a set of documents (each a node)."""
pipeline_id = get_pipeline_id(
self._base_url + PIPELINE_SIMPLE_ENDPOINT,
self._headers,
{"pipeline_name": self.name},
)
doc_insert = get_doc_insert(
transformations or default_transformations(),
documents,
)
response = requests.put(
self._base_url + INSERT_DOC_ENDPOINT.format(pipeline_id=pipeline_id),
data=json.dumps(doc_insert),
headers=self._headers,
)
response_text = response.json()
ingestion_id = response_text.get("ingestionId", None)
if (
response_text.get("code", "") != Status.SUCCESS.value
or ingestion_id is None
):
raise ValueError(
f"Failed to insert documents: {response_text.get('message', '')}\n{response_text}"
)
ingestion_status, failed_docs = run_ingestion(
self._base_url
+ CHECK_INGESTION_ENDPOINT.format(
pipeline_id=pipeline_id, ingestion_id=ingestion_id
),
self._headers,
verbose,
)
if verbose:
print(f"ingestion_status {ingestion_status}")
print(f"failed_docs: {failed_docs}")
def delete_ref_doc(
self,
ref_doc_ids: Union[str, List[str]],
verbose: bool = True,
**delete_kwargs: Any,
) -> None:
"""Delete documents in index."""
if isinstance(ref_doc_ids, str):
ref_doc_ids = [ref_doc_ids]
pipeline_id = get_pipeline_id(
self._base_url + PIPELINE_SIMPLE_ENDPOINT,
self._headers,
{"pipeline_name": self.name},
)
doc_delete = get_doc_delete(ref_doc_ids)
response = requests.post(
self._base_url + DELETE_DOC_ENDPOINT.format(pipeline_id=pipeline_id),
json=doc_delete,
headers=self._headers,
)
response_text = response.json()
if response_text.get("code", "") != Status.SUCCESS.value:
raise ValueError(
f"Failed to delete documents: {response_text.get('message', '')}\n{response_text}"
)
if verbose:
print(f"Delete documents {ref_doc_ids} successfully!")
def update_ref_doc(self, document: Document, **update_kwargs: Any) -> None:
"""Update a document and it's corresponding nodes."""
raise NotImplementedError("update_ref_doc not implemented.")
| DashScopeCloudIndex |
python | numba__numba | numba/tests/test_tuples.py | {
"start": 19494,
"end": 23912
} | class ____(TestCase):
def test_build_unpack(self):
def check(p):
pyfunc = lambda a: (1, *a)
cfunc = jit(nopython=True)(pyfunc)
self.assertPreciseEqual(cfunc(p), pyfunc(p))
# Homogeneous
check((4, 5))
# Heterogeneous
check((4, 5.5))
def test_build_unpack_assign_like(self):
# see #6534
def check(p):
pyfunc = lambda a: (*a,)
cfunc = jit(nopython=True)(pyfunc)
self.assertPreciseEqual(cfunc(p), pyfunc(p))
# Homogeneous
check((4, 5))
# Heterogeneous
check((4, 5.5))
def test_build_unpack_fail_on_list_assign_like(self):
# see #6534
def check(p):
pyfunc = lambda a: (*a,)
cfunc = jit(nopython=True)(pyfunc)
self.assertPreciseEqual(cfunc(p), pyfunc(p))
with self.assertRaises(errors.TypingError) as raises:
check([4, 5])
# Python 3.9 has a peephole rewrite due to large changes in tuple
# unpacking. It results in a tuple + list situation from the above
# so the error message reflects that. Catching this specific and
# seemingly rare sequence in the peephole rewrite is prohibitively
# hard. Should it be reported numerous times, revisit then.
msg1 = "No implementation of function"
self.assertIn(msg1, str(raises.exception))
msg2 = "tuple(reflected list(" # ignore the rest of reflected list
# part, it's repr is quite volatile.
self.assertIn(msg2, str(raises.exception))
def test_build_unpack_more(self):
def check(p):
pyfunc = lambda a: (1, *a, (1, 2), *a)
cfunc = jit(nopython=True)(pyfunc)
self.assertPreciseEqual(cfunc(p), pyfunc(p))
# Homogeneous
check((4, 5))
# Heterogeneous
check((4, 5.5))
def test_build_unpack_call(self):
def check(p):
@jit
def inner(*args):
return args
pyfunc = lambda a: inner(1, *a)
cfunc = jit(nopython=True)(pyfunc)
self.assertPreciseEqual(cfunc(p), pyfunc(p))
# Homogeneous
check((4, 5))
# Heterogeneous
check((4, 5.5))
def test_build_unpack_call_more(self):
def check(p):
@jit
def inner(*args):
return args
pyfunc = lambda a: inner(1, *a, *(1, 2), *a)
cfunc = jit(nopython=True)(pyfunc)
self.assertPreciseEqual(cfunc(p), pyfunc(p))
# Homogeneous
check((4, 5))
# Heterogeneous
check((4, 5.5))
def test_tuple_constructor(self):
def check(pyfunc, arg):
cfunc = jit(nopython=True)(pyfunc)
self.assertPreciseEqual(cfunc(arg), pyfunc(arg))
# empty
check(lambda _: tuple(), ())
# Homogeneous
check(lambda a: tuple(a), (4, 5))
# Heterogeneous
check(lambda a: tuple(a), (4, 5.5))
def test_unpack_with_predicate_fails(self):
# this fails as the list_to_tuple/list_extend peephole bytecode
# rewriting needed for Python 3.9+ cannot yet traverse the CFG.
@njit
def foo():
a = (1,)
b = (3,2, 4)
return (*(b if a[0] else (5, 6)),)
with self.assertRaises(errors.UnsupportedBytecodeError) as raises:
foo()
msg = "op_LIST_EXTEND at the start of a block"
self.assertIn(msg, str(raises.exception))
def test_build_unpack_with_calls_in_unpack(self):
def check(p):
def pyfunc(a):
z = [1, 2]
return (*a, z.append(3), z.extend(a), np.ones(3)), z
cfunc = jit(nopython=True)(pyfunc)
self.assertPreciseEqual(cfunc(p), pyfunc(p))
check((4, 5))
def test_build_unpack_complicated(self):
def check(p):
def pyfunc(a):
z = [1, 2]
return (*a, *(*a, a), *(a, (*(a, (1, 2), *(3,), *a),
(a, 1, (2, 3), *a, 1), (1,))),
*(z.append(4), z.extend(a))), z
cfunc = jit(nopython=True)(pyfunc)
self.assertPreciseEqual(cfunc(p), pyfunc(p))
check((10, 20))
if __name__ == '__main__':
unittest.main()
| TestTupleBuild |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 1056854,
"end": 1057203
} | class ____(sgqlc.types.Union):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__types__ = (
OrgRestoreMemberMembershipOrganizationAuditEntryData,
OrgRestoreMemberMembershipRepositoryAuditEntryData,
OrgRestoreMemberMembershipTeamAuditEntryData,
)
| OrgRestoreMemberAuditEntryMembership |
python | airbytehq__airbyte | airbyte-integrations/connectors/destination-deepset/destination_deepset/api.py | {
"start": 391,
"end": 483
} | class ____(RuntimeError):
"""Raised when any error occurs while using the API."""
| APIError |
python | scrapy__scrapy | tests/mockserver/simple_https.py | {
"start": 245,
"end": 469
} | class ____(resource.Resource):
def __init__(self):
resource.Resource.__init__(self)
self.putChild(b"file", Data(b"0123456789", "text/plain"))
def getChild(self, name, request):
return self
| Root |
python | sympy__sympy | sympy/polys/matrices/exceptions.py | {
"start": 818,
"end": 902
} | class ____(DMError):
"""matrix does not have expected rank"""
pass
| DMRankError |
python | prabhupant__python-ds | data_structures/heap/max_heap.py | {
"start": 119,
"end": 3901
} | class ____:
def __init__(self, maxsize):
self.maxsize = maxsize
self.size = 0 # current number of elements in the heap
self.heap = [0] * self.maxsize
self.front = 0
def parent(self, pos):
return (pos) // 2
def left_child(self, pos):
return 2*pos + 1
def right_child(self, pos):
return 2*pos + 2
def mid_index(self):
return self.size // 2
def last_index(self):
return self.size - 1
def is_leaf(self, pos):
"""
Every node that is after the middle index of the heap
is a leaf node because their children cannot exist as the
index of children are twice their index as those indexes
do not exist in the heap
"""
if self.mid_index() <= pos <= self.last_index():
return True
return False
def is_empty(self):
if self.size == 0:
return True
return False
def insert(self, value):
if self.is_empty(): # if the heap is empty
self.heap[self.front] = value
self.size += 1
return
if self.size >= self.maxsize: # if max size has been reached
return
self.size += 1
self.heap[self.last_index()] = value
curr = self.last_index()
# While inserting the element in the heap we have to
# make sure that the inserted element is always smaller
# than its parent. So basically here we are adjusting the
# position of the parent
while self.heap[curr] > self.heap[self.parent(curr)]:
self.swap(curr, self.parent(curr))
curr = self.parent(curr)
def max_heapify(self, pos):
"""
This function will run whenever a node is non-leaf
node and smaller than its childen
"""
if not self.is_leaf(pos):
left = self.heap[self.left_child(pos)]
right = self.heap[self.right_child(pos)]
curr = self.heap[pos]
if curr < left or curr < right:
# This check is only to prevent out-of-index error
if left > right:
self.swap(pos, self.left_child(pos))
self.max_heapify(self.left_child(pos))
else:
self.swap(pos, self.right_child(pos))
self.max_heapify(self.right_child(pos))
def swap(self, x, y):
self.heap[x], self.heap[y] = self.heap[y], self.heap[x]
def pop_max(self):
max_element = self.heap[self.front] # max element is always at the front
self.heap[self.front] = self.heap[self.last_index()] # placing last element at the front
self.heap[self.last_index()] = 0
self.size -= 1 # decrease size as one element has been popped
self.max_heapify(self.front) # heapify the heap again
return max_element
def print(self):
"""
Priting in inorder
"""
for i in range(0, self.mid_index() + 1):
parent = self.heap[i]
left = self.heap[self.left_child(i)]
right = self.heap[self.right_child(i)]
print(f"Parent: {self.heap[i]}")
if left:
print(f"Left child: {left}")
if right:
print(f"Right child: {right}")
if __name__ == '__main__':
max_heap = MaxHeap(15)
max_heap.insert(5)
max_heap.insert(3)
max_heap.insert(17)
max_heap.insert(10)
max_heap.insert(84)
max_heap.insert(19)
max_heap.insert(6)
max_heap.insert(22)
max_heap.insert(9)
max_heap.print()
print('Max element is - ', max_heap.pop_max())
max_heap.print()
| MaxHeap |
python | altair-viz__altair | altair/vegalite/v6/schema/channels.py | {
"start": 1116445,
"end": 1117175
} | class ____(ValueChannelMixin, core.PositionValueDef):
"""
Y2Value schema wrapper.
Definition object for a constant value (primitive value or gradient definition) of an
encoding channel.
Parameters
----------
value : dict, float, :class:`ExprRef`, Literal['height', 'width']
A constant value in visual domain (e.g., ``"red"`` / ``"#0099ff"`` / `gradient
definition <https://vega.github.io/vega-lite/docs/types.html#gradient>`__ for color,
values between ``0`` to ``1`` for opacity).
"""
_class_is_valid_at_instantiation = False
_encoding_name = "y2"
def __init__(self, value, **kwds):
super().__init__(value=value, **kwds)
@with_property_setters
| Y2Value |
python | pytorch__pytorch | tools/experimental/torchfuzz/tensor_fuzzer.py | {
"start": 131,
"end": 378
} | class ____:
"""Global configuration for tensor fuzzing behavior."""
use_real_values: bool = True # If False, use zeros; if True, use random values
avoid_complex: bool = False # If True, exclude complex dtypes from fuzzing
| FuzzerConfig |
python | Textualize__textual | tests/snapshot_tests/snapshot_apps/input_validation.py | {
"start": 169,
"end": 971
} | class ____(App):
CSS = """
Input.-valid {
border: tall $success 60%;
}
Input.-valid:focus {
border: tall $success;
}
Input {
margin: 1 2;
}
"""
def compose(self) -> ComposeResult:
yield Input(
placeholder="Enter a number between 1 and 5",
validators=VALIDATORS,
)
yield Input(
placeholder="Enter a number between 1 and 5",
validators=VALIDATORS,
)
yield Input(
placeholder="Enter a number between 1 and 5",
validators=VALIDATORS,
)
yield Input(
placeholder="Enter a number between 1 and 5",
validators=VALIDATORS,
)
app = InputApp()
if __name__ == '__main__':
app.run()
| InputApp |
python | doocs__leetcode | solution/0400-0499/0425.Word Squares/Solution.py | {
"start": 584,
"end": 1177
} | class ____:
def wordSquares(self, words: List[str]) -> List[List[str]]:
def dfs(t):
if len(t) == len(words[0]):
ans.append(t[:])
return
idx = len(t)
pref = [v[idx] for v in t]
indexes = trie.search(''.join(pref))
for i in indexes:
t.append(words[i])
dfs(t)
t.pop()
trie = Trie()
ans = []
for i, w in enumerate(words):
trie.insert(w, i)
for w in words:
dfs([w])
return ans
| Solution |
python | Pylons__pyramid | tests/test_view.py | {
"start": 41278,
"end": 41608
} | class ____:
def __init__(self):
self.config = DummyConfig()
def call_venusian(venusian, context=None):
if context is None:
context = DummyVenusianContext()
for wrapped, callback, category, depth in venusian.attachments:
callback(context, None, None)
return context.config
| DummyVenusianContext |
python | instagram__MonkeyType | tests/test_typing.py | {
"start": 5072,
"end": 17660
} | class ____:
@pytest.mark.parametrize(
'types, expected_type',
[
(
(
make_typed_dict(required_fields={'a': int, 'b': int}),
make_typed_dict(required_fields={'a': int, 'b': int}),
),
make_typed_dict(required_fields={'a': int, 'b': int}),
),
(
(
make_typed_dict(required_fields={'a': int, 'b': int}),
make_typed_dict(required_fields={'a': int}),
),
make_typed_dict(required_fields={'a': int}, optional_fields={'b': int}),
),
(
(
make_typed_dict(required_fields={'a': int, 'b': int}),
make_typed_dict(required_fields={'a': int, 'c': int}),
),
make_typed_dict(required_fields={'a': int}, optional_fields={'b': int, 'c': int}),
),
(
(
make_typed_dict(required_fields={'a': str}),
make_typed_dict(required_fields={'a': int}),
),
make_typed_dict(required_fields={'a': Union[str, int]}, optional_fields={}),
),
(
(
make_typed_dict(required_fields={'a': str}),
make_typed_dict(required_fields={'a': int}),
make_typed_dict(required_fields={'b': int}),
),
make_typed_dict(required_fields={}, optional_fields={'a': Union[str, int], 'b': int}),
),
# Cases where the input TypedDict has optional fields.
(
(
make_typed_dict(optional_fields={'a': int, 'b': int}),
make_typed_dict(optional_fields={'a': int, 'b': int}),
),
make_typed_dict(optional_fields={'a': int, 'b': int}),
),
(
(
make_typed_dict(optional_fields={'a': int, 'b': int}),
make_typed_dict(optional_fields={'a': int, 'c': int}),
),
make_typed_dict(optional_fields={'a': int, 'b': int, 'c': int}),
),
(
(
make_typed_dict(optional_fields={'a': str}),
make_typed_dict(optional_fields={'a': int}),
),
make_typed_dict(optional_fields={'a': Union[str, int]}),
),
(
(
make_typed_dict(optional_fields={'a': str}),
make_typed_dict(optional_fields={'b': int}),
),
make_typed_dict(optional_fields={'a': str, 'b': int}),
),
(
(
make_typed_dict(required_fields={'a': str}),
make_typed_dict(optional_fields={'a': str}),
),
make_typed_dict(optional_fields={'a': str}),
),
# The shrunk TypedDict is too large, so fall back to Dict.
(
(
make_typed_dict(required_fields={'a1': int}),
make_typed_dict(required_fields={'a2': int}),
make_typed_dict(required_fields={'a3': int}),
make_typed_dict(required_fields={'a4': int}),
make_typed_dict(required_fields={'a5': int}),
make_typed_dict(required_fields={'a6': int}),
make_typed_dict(required_fields={'a7': int}),
make_typed_dict(required_fields={'a8': int}),
make_typed_dict(required_fields={'a9': int}),
make_typed_dict(required_fields={'a10': int}),
make_typed_dict(required_fields={'a11': int}),
),
Dict[str, int],
),
(
(
make_typed_dict(required_fields={'a1': int, 'a2': int, 'a3': int, 'a4': int, 'a5': int}),
make_typed_dict(required_fields={'a6': int, 'a7': int, 'a8': int, 'a9': int, 'a10': int}),
make_typed_dict(required_fields={'a11': int}),
),
Dict[str, int],
),
# Nested TypedDict.
(
(
make_typed_dict(required_fields={
'foo': make_typed_dict(required_fields={
'a': int,
'b': str
}),
}),
make_typed_dict(required_fields={
'foo': make_typed_dict(required_fields={
'a': int,
'b': str
}),
}),
),
make_typed_dict(required_fields={
'foo': make_typed_dict(required_fields={
'a': int,
'b': str
}),
}),
),
# Nested TypedDict with differing types.
(
(
make_typed_dict(required_fields={
'foo': make_typed_dict(required_fields={
'a': int,
'b': str
}),
}),
make_typed_dict(required_fields={
'foo': make_typed_dict(required_fields={
'a': str,
}),
}),
),
make_typed_dict(required_fields={
'foo': make_typed_dict(required_fields={
'a': Union[int, str],
}, optional_fields={
'b': str,
}),
}),
),
],
)
def test_shrink_non_uniform_typed_dict_types(self, types, expected_type):
actual = shrink_types(types, max_typed_dict_size=10)
assert actual == expected_type
@pytest.mark.parametrize(
'types, expected_type',
[
# Sanity-check that it works for primitive types.
(
(int, str), Union[int, str],
),
# Non-TypedDict type with just one trace.
(
(
List[make_typed_dict(required_fields={'a': int})],
),
List[make_typed_dict(required_fields={'a': int})],
),
# Same non-TypedDict types.
(
(
List[make_typed_dict(required_fields={'a': int})],
List[make_typed_dict(required_fields={'a': int})],
),
List[make_typed_dict(required_fields={'a': int})],
),
# Non-TypedDict types but not all the same - convert anonymous TypedDicts to Dicts.
(
(
List[make_typed_dict(required_fields={'a': int})],
List[Dict[str, int]],
),
List[Dict[str, int]],
),
(
(
List[make_typed_dict(required_fields={'a': int})],
List[make_typed_dict(required_fields={'b': int})],
),
List[make_typed_dict(optional_fields={'a': int, 'b': int})],
),
(
(
make_typed_dict(required_fields={"foo": List[make_typed_dict(required_fields={'a': int})]}),
make_typed_dict(required_fields={"foo": List[make_typed_dict(required_fields={'a': int})]}),
),
make_typed_dict(required_fields={"foo": List[make_typed_dict(required_fields={'a': int})]}),
),
(
(
make_typed_dict(required_fields={"foo": List[make_typed_dict(required_fields={'a': int})]}),
make_typed_dict(required_fields={"foo": List[make_typed_dict(required_fields={'b': int})]}),
),
make_typed_dict(required_fields={"foo": List[make_typed_dict(optional_fields={'a': int, 'b': int})]}),
),
(
(
typing_Tuple[make_typed_dict(required_fields={'a': int})],
typing_Tuple[make_typed_dict(required_fields={'a': int})],
),
typing_Tuple[make_typed_dict(required_fields={'a': int})],
),
# We don't currently shrink the inner types for Tuples.
(
(
typing_Tuple[make_typed_dict(required_fields={'a': int})],
typing_Tuple[make_typed_dict(required_fields={'b': int})],
),
typing_Tuple[Dict[str, int]],
),
# Fall back to Dict when the resulting TypedDict would be too large.
# Keep any nested anonymous TypedDicts, though.
(
(
make_typed_dict(required_fields={'a1': make_typed_dict(required_fields={'b': str})}),
make_typed_dict(required_fields={'a2': make_typed_dict(required_fields={'b': str})}),
make_typed_dict(required_fields={'a3': make_typed_dict(required_fields={'b': str})}),
make_typed_dict(required_fields={'a4': make_typed_dict(required_fields={'b': str})}),
make_typed_dict(required_fields={'a5': make_typed_dict(required_fields={'b': str})}),
make_typed_dict(required_fields={'a6': make_typed_dict(required_fields={'b': str})}),
make_typed_dict(required_fields={'a7': make_typed_dict(required_fields={'b': str})}),
make_typed_dict(required_fields={'a8': make_typed_dict(required_fields={'b': str})}),
make_typed_dict(required_fields={'a9': make_typed_dict(required_fields={'b': str})}),
make_typed_dict(required_fields={'a10': make_typed_dict(required_fields={'b': str})}),
make_typed_dict(required_fields={'a11': make_typed_dict(required_fields={'b': str})}),
make_typed_dict(required_fields={'a11': make_typed_dict(required_fields={'c': int})}),
),
Dict[str, make_typed_dict(optional_fields={'b': str, 'c': int})],
),
],
)
def test_shrink_types_non_typed_dict(self, types, expected_type):
actual = shrink_types(types, max_typed_dict_size=10)
assert types_equal(actual, expected_type)
@pytest.mark.parametrize(
'types, expected_type',
[
([], Any),
((int,), int),
((int, int, int), int),
((int, NoneType), Optional[int]),
((int, str), Union[int, str]),
((int, str, NoneType), Optional[Union[int, str]]),
],
)
def test_shrink_types(self, types, expected_type):
assert shrink_types(types, max_typed_dict_size=0) == expected_type
@pytest.mark.parametrize(
'types, expected_type',
[
# If all are anonymous TypedDicts, we get the shrunk TypedDict.
(
(
make_typed_dict(required_fields={'a': int, 'b': int}),
make_typed_dict(required_fields={'a': int, 'b': int}),
),
make_typed_dict(required_fields={'a': int, 'b': int}),
),
# If not all are anonymous TypedDicts, we get the Dict equivalents.
(
(
make_typed_dict(required_fields={'a': int, 'b': int}),
Dict[int, int]
),
Union[Dict[str, int], Dict[int, int]],
),
# If not all are anonymous TypedDicts, we convert any nested TypedDicts to Dicts as well.
(
(
make_typed_dict(required_fields={'a': make_typed_dict(required_fields={'b': int})}),
Dict[str, int]
),
Union[Dict[str, Dict[str, int]], Dict[str, int]],
),
],
)
def test_shrink_types_mixed_dicts(self, types, expected_type):
assert shrink_types(types, max_typed_dict_size=VERY_LARGE_MAX_TYPED_DICT_SIZE) == expected_type
| TestShrinkType |
python | django__django | tests/forms_tests/field_tests/test_decimalfield.py | {
"start": 272,
"end": 10070
} | class ____(FormFieldAssertionsMixin, SimpleTestCase):
def test_decimalfield_1(self):
f = DecimalField(max_digits=4, decimal_places=2)
self.assertWidgetRendersTo(
f, '<input id="id_f" step="0.01" type="number" name="f" required>'
)
with self.assertRaisesMessage(ValidationError, "'This field is required.'"):
f.clean("")
with self.assertRaisesMessage(ValidationError, "'This field is required.'"):
f.clean(None)
self.assertEqual(f.clean("1"), decimal.Decimal("1"))
self.assertIsInstance(f.clean("1"), decimal.Decimal)
self.assertEqual(f.clean("23"), decimal.Decimal("23"))
self.assertEqual(f.clean("3.14"), decimal.Decimal("3.14"))
self.assertEqual(f.clean(3.14), decimal.Decimal("3.14"))
self.assertEqual(f.clean(decimal.Decimal("3.14")), decimal.Decimal("3.14"))
self.assertEqual(f.clean("1.0 "), decimal.Decimal("1.0"))
self.assertEqual(f.clean(" 1.0"), decimal.Decimal("1.0"))
self.assertEqual(f.clean(" 1.0 "), decimal.Decimal("1.0"))
with self.assertRaisesMessage(
ValidationError, "'Ensure that there are no more than 4 digits in total.'"
):
f.clean("123.45")
with self.assertRaisesMessage(
ValidationError, "'Ensure that there are no more than 2 decimal places.'"
):
f.clean("1.234")
msg = "'Ensure that there are no more than 2 digits before the decimal point.'"
with self.assertRaisesMessage(ValidationError, msg):
f.clean("123.4")
self.assertEqual(f.clean("-12.34"), decimal.Decimal("-12.34"))
with self.assertRaisesMessage(
ValidationError, "'Ensure that there are no more than 4 digits in total.'"
):
f.clean("-123.45")
self.assertEqual(f.clean("-.12"), decimal.Decimal("-0.12"))
self.assertEqual(f.clean("-00.12"), decimal.Decimal("-0.12"))
self.assertEqual(f.clean("-000.12"), decimal.Decimal("-0.12"))
with self.assertRaisesMessage(
ValidationError, "'Ensure that there are no more than 2 decimal places.'"
):
f.clean("-000.123")
with self.assertRaisesMessage(
ValidationError, "'Ensure that there are no more than 4 digits in total.'"
):
f.clean("-000.12345")
self.assertEqual(f.max_digits, 4)
self.assertEqual(f.decimal_places, 2)
self.assertIsNone(f.max_value)
self.assertIsNone(f.min_value)
def test_enter_a_number_error(self):
f = DecimalField(max_value=1, max_digits=4, decimal_places=2)
values = (
"-NaN",
"NaN",
"+NaN",
"-sNaN",
"sNaN",
"+sNaN",
"-Inf",
"Inf",
"+Inf",
"-Infinity",
"Infinity",
"+Infinity",
"a",
"łąść",
"1.0a",
"--0.12",
)
for value in values:
with (
self.subTest(value=value),
self.assertRaisesMessage(ValidationError, "'Enter a number.'"),
):
f.clean(value)
def test_decimalfield_2(self):
f = DecimalField(max_digits=4, decimal_places=2, required=False)
self.assertIsNone(f.clean(""))
self.assertIsNone(f.clean(None))
self.assertEqual(f.clean("1"), decimal.Decimal("1"))
self.assertEqual(f.max_digits, 4)
self.assertEqual(f.decimal_places, 2)
self.assertIsNone(f.max_value)
self.assertIsNone(f.min_value)
def test_decimalfield_3(self):
f = DecimalField(
max_digits=4,
decimal_places=2,
max_value=decimal.Decimal("1.5"),
min_value=decimal.Decimal("0.5"),
)
self.assertWidgetRendersTo(
f,
'<input step="0.01" name="f" min="0.5" max="1.5" type="number" id="id_f" '
"required>",
)
with self.assertRaisesMessage(
ValidationError, "'Ensure this value is less than or equal to 1.5.'"
):
f.clean("1.6")
with self.assertRaisesMessage(
ValidationError, "'Ensure this value is greater than or equal to 0.5.'"
):
f.clean("0.4")
self.assertEqual(f.clean("1.5"), decimal.Decimal("1.5"))
self.assertEqual(f.clean("0.5"), decimal.Decimal("0.5"))
self.assertEqual(f.clean(".5"), decimal.Decimal("0.5"))
self.assertEqual(f.clean("00.50"), decimal.Decimal("0.50"))
self.assertEqual(f.max_digits, 4)
self.assertEqual(f.decimal_places, 2)
self.assertEqual(f.max_value, decimal.Decimal("1.5"))
self.assertEqual(f.min_value, decimal.Decimal("0.5"))
def test_decimalfield_4(self):
f = DecimalField(decimal_places=2)
with self.assertRaisesMessage(
ValidationError, "'Ensure that there are no more than 2 decimal places.'"
):
f.clean("0.00000001")
def test_decimalfield_5(self):
f = DecimalField(max_digits=3)
# Leading whole zeros "collapse" to one digit.
self.assertEqual(f.clean("0000000.10"), decimal.Decimal("0.1"))
# But a leading 0 before the . doesn't count toward max_digits
self.assertEqual(f.clean("0000000.100"), decimal.Decimal("0.100"))
# Only leading whole zeros "collapse" to one digit.
self.assertEqual(f.clean("000000.02"), decimal.Decimal("0.02"))
with self.assertRaisesMessage(
ValidationError, "'Ensure that there are no more than 3 digits in total.'"
):
f.clean("000000.0002")
self.assertEqual(f.clean(".002"), decimal.Decimal("0.002"))
def test_decimalfield_6(self):
f = DecimalField(max_digits=2, decimal_places=2)
self.assertEqual(f.clean(".01"), decimal.Decimal(".01"))
msg = "'Ensure that there are no more than 0 digits before the decimal point.'"
with self.assertRaisesMessage(ValidationError, msg):
f.clean("1.1")
def test_decimalfield_step_size_min_value(self):
f = DecimalField(
step_size=decimal.Decimal("0.3"),
min_value=decimal.Decimal("-0.4"),
)
self.assertWidgetRendersTo(
f,
'<input name="f" min="-0.4" step="0.3" type="number" id="id_f" required>',
)
msg = (
"Ensure this value is a multiple of step size 0.3, starting from -0.4, "
"e.g. -0.4, -0.1, 0.2, and so on."
)
with self.assertRaisesMessage(ValidationError, msg):
f.clean("1")
self.assertEqual(f.clean("0.2"), decimal.Decimal("0.2"))
self.assertEqual(f.clean(2), decimal.Decimal(2))
self.assertEqual(f.step_size, decimal.Decimal("0.3"))
def test_decimalfield_scientific(self):
f = DecimalField(max_digits=4, decimal_places=2)
with self.assertRaisesMessage(ValidationError, "Ensure that there are no more"):
f.clean("1E+2")
self.assertEqual(f.clean("1E+1"), decimal.Decimal("10"))
self.assertEqual(f.clean("1E-1"), decimal.Decimal("0.1"))
self.assertEqual(f.clean("0.546e+2"), decimal.Decimal("54.6"))
def test_decimalfield_widget_attrs(self):
f = DecimalField(max_digits=6, decimal_places=2)
self.assertEqual(f.widget_attrs(Widget()), {})
self.assertEqual(f.widget_attrs(NumberInput()), {"step": "0.01"})
f = DecimalField(max_digits=10, decimal_places=0)
self.assertEqual(f.widget_attrs(NumberInput()), {"step": "1"})
f = DecimalField(max_digits=19, decimal_places=19)
self.assertEqual(f.widget_attrs(NumberInput()), {"step": "1e-19"})
f = DecimalField(max_digits=20)
self.assertEqual(f.widget_attrs(NumberInput()), {"step": "any"})
f = DecimalField(max_digits=6, widget=NumberInput(attrs={"step": "0.01"}))
self.assertWidgetRendersTo(
f, '<input step="0.01" name="f" type="number" id="id_f" required>'
)
def test_decimalfield_localized(self):
"""
A localized DecimalField's widget renders to a text input without
number input specific attributes.
"""
f = DecimalField(localize=True)
self.assertWidgetRendersTo(f, '<input id="id_f" name="f" type="text" required>')
def test_decimalfield_changed(self):
f = DecimalField(max_digits=2, decimal_places=2)
d = decimal.Decimal("0.1")
self.assertFalse(f.has_changed(d, "0.10"))
self.assertTrue(f.has_changed(d, "0.101"))
with translation.override("fr"):
f = DecimalField(max_digits=2, decimal_places=2, localize=True)
localized_d = formats.localize_input(d) # -> '0,1' in French
self.assertFalse(f.has_changed(d, localized_d))
@override_settings(DECIMAL_SEPARATOR=",")
def test_decimalfield_support_decimal_separator(self):
with translation.override(None):
f = DecimalField(localize=True)
self.assertEqual(f.clean("1001,10"), decimal.Decimal("1001.10"))
self.assertEqual(f.clean("1001.10"), decimal.Decimal("1001.10"))
@override_settings(
DECIMAL_SEPARATOR=",",
USE_THOUSAND_SEPARATOR=True,
THOUSAND_SEPARATOR=".",
)
def test_decimalfield_support_thousands_separator(self):
with translation.override(None):
f = DecimalField(localize=True)
self.assertEqual(f.clean("1.001,10"), decimal.Decimal("1001.10"))
msg = "'Enter a number.'"
with self.assertRaisesMessage(ValidationError, msg):
f.clean("1,001.1")
| DecimalFieldTest |
python | pytorch__pytorch | tools/linter/adapters/ruff_linter.py | {
"start": 565,
"end": 13090
} | class ____:
"""A lint message defined by https://docs.rs/lintrunner/latest/lintrunner/lint_message/struct.LintMessage.html."""
path: str | None
line: int | None
char: int | None
code: str
severity: LintSeverity
name: str
original: str | None
replacement: str | None
description: str | None
def asdict(self) -> dict[str, Any]:
return dataclasses.asdict(self)
def display(self) -> None:
"""Print to stdout for lintrunner to consume."""
print(json.dumps(self.asdict()), flush=True)
def as_posix(name: str) -> str:
return name.replace("\\", "/") if IS_WINDOWS else name
def _run_command(
args: list[str],
*,
timeout: int | None,
stdin: BinaryIO | None,
input: bytes | None,
check: bool,
cwd: os.PathLike[Any] | None,
) -> subprocess.CompletedProcess[bytes]:
logging.debug("$ %s", " ".join(args))
start_time = time.monotonic()
try:
if input is not None:
return subprocess.run(
args,
capture_output=True,
shell=False,
input=input,
timeout=timeout,
check=check,
cwd=cwd,
)
return subprocess.run(
args,
stdin=stdin,
capture_output=True,
shell=False,
timeout=timeout,
check=check,
cwd=cwd,
)
finally:
end_time = time.monotonic()
logging.debug("took %dms", (end_time - start_time) * 1000)
def run_command(
args: list[str],
*,
retries: int = 0,
timeout: int | None = None,
stdin: BinaryIO | None = None,
input: bytes | None = None,
check: bool = False,
cwd: os.PathLike[Any] | None = None,
) -> subprocess.CompletedProcess[bytes]:
remaining_retries = retries
while True:
try:
return _run_command(
args, timeout=timeout, stdin=stdin, input=input, check=check, cwd=cwd
)
except subprocess.TimeoutExpired as err:
if remaining_retries == 0:
raise err
remaining_retries -= 1
logging.warning( # noqa: G200
"(%s/%s) Retrying because command failed with: %r",
retries - remaining_retries,
retries,
err,
)
time.sleep(1)
def add_default_options(parser: argparse.ArgumentParser) -> None:
"""Add default options to a parser.
This should be called the last in the chain of add_argument calls.
"""
parser.add_argument(
"--retries",
type=int,
default=3,
help="number of times to retry if the linter times out.",
)
parser.add_argument(
"--verbose",
action="store_true",
help="verbose logging",
)
parser.add_argument(
"filenames",
nargs="+",
help="paths to lint",
)
def explain_rule(code: str) -> str:
proc = run_command(
["ruff", "rule", "--output-format=json", code],
check=True,
)
rule = json.loads(str(proc.stdout, "utf-8").strip())
return f"\n{rule['linter']}: {rule['summary']}"
def get_issue_severity(code: str) -> LintSeverity:
# "B901": `return x` inside a generator
# "B902": Invalid first argument to a method
# "B903": __slots__ efficiency
# "B950": Line too long
# "C4": Flake8 Comprehensions
# "C9": Cyclomatic complexity
# "E2": PEP8 horizontal whitespace "errors"
# "E3": PEP8 blank line "errors"
# "E5": PEP8 line length "errors"
# "T400": type checking Notes
# "T49": internal type checker errors or unmatched messages
if any(
code.startswith(x)
for x in (
"B9",
"C4",
"C9",
"E2",
"E3",
"E5",
"T400",
"T49",
"PLC",
"PLR",
)
):
return LintSeverity.ADVICE
# "F821": Undefined name
# "E999": syntax error
if any(code.startswith(x) for x in ("F821", SYNTAX_ERROR, "PLE")):
return LintSeverity.ERROR
# "F": PyFlakes Error
# "B": flake8-bugbear Error
# "E": PEP8 "Error"
# "W": PEP8 Warning
# possibly other plugins...
return LintSeverity.WARNING
def format_lint_message(
message: str, code: str, rules: dict[str, str], show_disable: bool
) -> str:
if rules:
message += f".\n{rules.get(code) or ''}"
message += ".\nSee https://beta.ruff.rs/docs/rules/"
if show_disable:
message += f".\n\nTo disable, use ` # noqa: {code}`"
return message
def check_files(
filenames: list[str],
severities: dict[str, LintSeverity],
*,
config: str | None,
retries: int,
timeout: int,
explain: bool,
show_disable: bool,
) -> list[LintMessage]:
try:
proc = run_command(
[
sys.executable,
"-m",
"ruff",
"check",
"--exit-zero",
"--quiet",
"--output-format=json",
*([f"--config={config}"] if config else []),
*filenames,
],
retries=retries,
timeout=timeout,
check=True,
)
except (OSError, subprocess.CalledProcessError) as err:
return [
LintMessage(
path=None,
line=None,
char=None,
code=LINTER_CODE,
severity=LintSeverity.ERROR,
name="command-failed",
original=None,
replacement=None,
description=(
f"Failed due to {err.__class__.__name__}:\n{err}"
if not isinstance(err, subprocess.CalledProcessError)
else (
f"COMMAND (exit code {err.returncode})\n"
f"{' '.join(as_posix(x) for x in err.cmd)}\n\n"
f"STDERR\n{err.stderr.decode('utf-8').strip() or '(empty)'}\n\n"
f"STDOUT\n{err.stdout.decode('utf-8').strip() or '(empty)'}"
)
),
)
]
stdout = str(proc.stdout, "utf-8").strip()
vulnerabilities = json.loads(stdout)
if explain:
all_codes = {v["code"] for v in vulnerabilities}
rules = {code: explain_rule(code) for code in all_codes}
else:
rules = {}
def lint_message(vuln: dict[str, Any]) -> LintMessage:
code = vuln["code"] or SYNTAX_ERROR
return LintMessage(
path=vuln["filename"],
name=code,
description=(
format_lint_message(
vuln["message"],
code,
rules,
show_disable and bool(vuln["code"]),
)
),
line=int(vuln["location"]["row"]),
char=int(vuln["location"]["column"]),
code=LINTER_CODE,
severity=severities.get(code, get_issue_severity(code)),
original=None,
replacement=None,
)
return [lint_message(v) for v in vulnerabilities]
def check_file_for_fixes(
filename: str,
*,
config: str | None,
retries: int,
timeout: int,
) -> list[LintMessage]:
try:
with open(filename, "rb") as f:
original = f.read()
with open(filename, "rb") as f:
proc_fix = run_command(
[
sys.executable,
"-m",
"ruff",
"check",
"--fix-only",
"--exit-zero",
*([f"--config={config}"] if config else []),
"--stdin-filename",
filename,
"-",
],
stdin=f,
retries=retries,
timeout=timeout,
check=True,
)
except (OSError, subprocess.CalledProcessError) as err:
return [
LintMessage(
path=None,
line=None,
char=None,
code=LINTER_CODE,
severity=LintSeverity.ERROR,
name="command-failed",
original=None,
replacement=None,
description=(
f"Failed due to {err.__class__.__name__}:\n{err}"
if not isinstance(err, subprocess.CalledProcessError)
else (
f"COMMAND (exit code {err.returncode})\n"
f"{' '.join(as_posix(x) for x in err.cmd)}\n\n"
f"STDERR\n{err.stderr.decode('utf-8').strip() or '(empty)'}\n\n"
f"STDOUT\n{err.stdout.decode('utf-8').strip() or '(empty)'}"
)
),
)
]
replacement = proc_fix.stdout
if original == replacement:
return []
return [
LintMessage(
path=filename,
name="format",
description="Run `lintrunner -a` to apply this patch.",
line=None,
char=None,
code=LINTER_CODE,
severity=LintSeverity.WARNING,
original=original.decode("utf-8"),
replacement=replacement.decode("utf-8"),
)
]
def main() -> None:
parser = argparse.ArgumentParser(
description=f"Ruff linter. Linter code: {LINTER_CODE}. Use with RUFF-FIX to auto-fix issues.",
fromfile_prefix_chars="@",
)
parser.add_argument(
"--config",
default=None,
help="Path to the `pyproject.toml` or `ruff.toml` file to use for configuration",
)
parser.add_argument(
"--explain",
action="store_true",
help="Explain a rule",
)
parser.add_argument(
"--show-disable",
action="store_true",
help="Show how to disable a lint message",
)
parser.add_argument(
"--timeout",
default=90,
type=int,
help="Seconds to wait for ruff",
)
parser.add_argument(
"--severity",
action="append",
help="map code to severity (e.g. `F401:advice`). This option can be used multiple times.",
)
parser.add_argument(
"--no-fix",
action="store_true",
help="Do not suggest fixes",
)
add_default_options(parser)
args = parser.parse_args()
logging.basicConfig(
format="<%(threadName)s:%(levelname)s> %(message)s",
level=logging.NOTSET
if args.verbose
else logging.DEBUG
if len(args.filenames) < 1000
else logging.INFO,
stream=sys.stderr,
)
severities: dict[str, LintSeverity] = {}
if args.severity:
for severity in args.severity:
parts = severity.split(":", 1)
assert len(parts) == 2, f"invalid severity `{severity}`"
severities[parts[0]] = LintSeverity(parts[1])
lint_messages = check_files(
args.filenames,
severities=severities,
config=args.config,
retries=args.retries,
timeout=args.timeout,
explain=args.explain,
show_disable=args.show_disable,
)
for lint_message in lint_messages:
lint_message.display()
if args.no_fix or not lint_messages:
# If we're not fixing, we can exit early
return
files_with_lints = {lint.path for lint in lint_messages if lint.path is not None}
with concurrent.futures.ThreadPoolExecutor(
max_workers=os.cpu_count(),
thread_name_prefix="Thread",
) as executor:
futures = {
executor.submit(
check_file_for_fixes,
path,
config=args.config,
retries=args.retries,
timeout=args.timeout,
): path
for path in files_with_lints
}
for future in concurrent.futures.as_completed(futures):
try:
for lint_message in future.result():
lint_message.display()
except Exception: # Catch all exceptions for lintrunner
logging.critical('Failed at "%s".', futures[future])
raise
if __name__ == "__main__":
main()
| LintMessage |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 221655,
"end": 222135
} | class ____(sgqlc.types.Input):
"""Autogenerated input type of DeleteVerifiableDomain"""
__schema__ = github_schema
__field_names__ = ("id", "client_mutation_id")
id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="id")
"""The ID of the verifiable domain to delete."""
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
| DeleteVerifiableDomainInput |
python | numpy__numpy | numpy/_core/tests/test_einsum.py | {
"start": 49619,
"end": 57893
} | class ____:
def build_operands(self, string, size_dict=global_size_dict):
# Builds views based off initial operands
operands = [string]
terms = string.split('->')[0].split(',')
for term in terms:
dims = [size_dict[x] for x in term]
operands.append(np.random.rand(*dims))
return operands
def assert_path_equal(self, comp, benchmark):
# Checks if list of tuples are equivalent
ret = (len(comp) == len(benchmark))
assert_(ret)
for pos in range(len(comp) - 1):
ret &= isinstance(comp[pos + 1], tuple)
ret &= (comp[pos + 1] == benchmark[pos + 1])
assert_(ret)
def test_memory_contraints(self):
# Ensure memory constraints are satisfied
outer_test = self.build_operands('a,b,c->abc')
path, path_str = np.einsum_path(*outer_test, optimize=('greedy', 0))
self.assert_path_equal(path, ['einsum_path', (0, 1, 2)])
path, path_str = np.einsum_path(*outer_test, optimize=('optimal', 0))
self.assert_path_equal(path, ['einsum_path', (0, 1, 2)])
long_test = self.build_operands('acdf,jbje,gihb,hfac')
path, path_str = np.einsum_path(*long_test, optimize=('greedy', 0))
self.assert_path_equal(path, ['einsum_path', (0, 1, 2, 3)])
path, path_str = np.einsum_path(*long_test, optimize=('optimal', 0))
self.assert_path_equal(path, ['einsum_path', (0, 1, 2, 3)])
def test_long_paths(self):
# Long complex cases
# Long test 1
long_test1 = self.build_operands('acdf,jbje,gihb,hfac,gfac,gifabc,hfac')
path, path_str = np.einsum_path(*long_test1, optimize='greedy')
self.assert_path_equal(path, ['einsum_path',
(3, 6), (3, 4), (2, 4), (2, 3), (0, 2), (0, 1)])
path, path_str = np.einsum_path(*long_test1, optimize='optimal')
self.assert_path_equal(path, ['einsum_path',
(3, 6), (3, 4), (2, 4), (2, 3), (0, 2), (0, 1)])
# Long test 2
long_test2 = self.build_operands('chd,bde,agbc,hiad,bdi,cgh,agdb')
path, path_str = np.einsum_path(*long_test2, optimize='greedy')
self.assert_path_equal(path, ['einsum_path',
(3, 4), (0, 3), (3, 4), (1, 3), (1, 2), (0, 1)])
path, path_str = np.einsum_path(*long_test2, optimize='optimal')
self.assert_path_equal(path, ['einsum_path',
(0, 5), (1, 4), (3, 4), (1, 3), (1, 2), (0, 1)])
def test_edge_paths(self):
# Difficult edge cases
# Edge test1
edge_test1 = self.build_operands('eb,cb,fb->cef')
path, path_str = np.einsum_path(*edge_test1, optimize='greedy')
self.assert_path_equal(path, ['einsum_path', (0, 2), (0, 1)])
path, path_str = np.einsum_path(*edge_test1, optimize='optimal')
self.assert_path_equal(path, ['einsum_path', (0, 2), (0, 1)])
# Edge test2
edge_test2 = self.build_operands('dd,fb,be,cdb->cef')
path, path_str = np.einsum_path(*edge_test2, optimize='greedy')
self.assert_path_equal(path, ['einsum_path', (0, 3), (0, 1), (0, 1)])
path, path_str = np.einsum_path(*edge_test2, optimize='optimal')
self.assert_path_equal(path, ['einsum_path', (0, 3), (0, 1), (0, 1)])
# Edge test3
edge_test3 = self.build_operands('bca,cdb,dbf,afc->')
path, path_str = np.einsum_path(*edge_test3, optimize='greedy')
self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 2), (0, 1)])
path, path_str = np.einsum_path(*edge_test3, optimize='optimal')
self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 2), (0, 1)])
# Edge test4
edge_test4 = self.build_operands('dcc,fce,ea,dbf->ab')
path, path_str = np.einsum_path(*edge_test4, optimize='greedy')
self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 1), (0, 1)])
path, path_str = np.einsum_path(*edge_test4, optimize='optimal')
self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 2), (0, 1)])
# Edge test5
edge_test4 = self.build_operands('a,ac,ab,ad,cd,bd,bc->',
size_dict={"a": 20, "b": 20, "c": 20, "d": 20})
path, path_str = np.einsum_path(*edge_test4, optimize='greedy')
self.assert_path_equal(path, ['einsum_path', (0, 1), (0, 1, 2, 3, 4, 5)])
path, path_str = np.einsum_path(*edge_test4, optimize='optimal')
self.assert_path_equal(path, ['einsum_path', (0, 1), (0, 1, 2, 3, 4, 5)])
def test_path_type_input(self):
# Test explicit path handling
path_test = self.build_operands('dcc,fce,ea,dbf->ab')
path, path_str = np.einsum_path(*path_test, optimize=False)
self.assert_path_equal(path, ['einsum_path', (0, 1, 2, 3)])
path, path_str = np.einsum_path(*path_test, optimize=True)
self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 1), (0, 1)])
exp_path = ['einsum_path', (0, 2), (0, 2), (0, 1)]
path, path_str = np.einsum_path(*path_test, optimize=exp_path)
self.assert_path_equal(path, exp_path)
# Double check einsum works on the input path
noopt = np.einsum(*path_test, optimize=False)
opt = np.einsum(*path_test, optimize=exp_path)
assert_almost_equal(noopt, opt)
def test_path_type_input_internal_trace(self):
# gh-20962
path_test = self.build_operands('cab,cdd->ab')
exp_path = ['einsum_path', (1,), (0, 1)]
path, path_str = np.einsum_path(*path_test, optimize=exp_path)
self.assert_path_equal(path, exp_path)
# Double check einsum works on the input path
noopt = np.einsum(*path_test, optimize=False)
opt = np.einsum(*path_test, optimize=exp_path)
assert_almost_equal(noopt, opt)
def test_path_type_input_invalid(self):
path_test = self.build_operands('ab,bc,cd,de->ae')
exp_path = ['einsum_path', (2, 3), (0, 1)]
assert_raises(RuntimeError, np.einsum, *path_test, optimize=exp_path)
assert_raises(
RuntimeError, np.einsum_path, *path_test, optimize=exp_path)
path_test = self.build_operands('a,a,a->a')
exp_path = ['einsum_path', (1,), (0, 1)]
assert_raises(RuntimeError, np.einsum, *path_test, optimize=exp_path)
assert_raises(
RuntimeError, np.einsum_path, *path_test, optimize=exp_path)
def test_spaces(self):
# gh-10794
arr = np.array([[1]])
for sp in itertools.product(['', ' '], repeat=4):
# no error for any spacing
np.einsum('{}...a{}->{}...a{}'.format(*sp), arr)
def test_overlap():
a = np.arange(9, dtype=int).reshape(3, 3)
b = np.arange(9, dtype=int).reshape(3, 3)
d = np.dot(a, b)
# sanity check
c = np.einsum('ij,jk->ik', a, b)
assert_equal(c, d)
# gh-10080, out overlaps one of the operands
c = np.einsum('ij,jk->ik', a, b, out=b)
assert_equal(c, d)
def test_einsum_chunking_precision():
"""Most einsum operations are reductions and until NumPy 2.3 reductions
never (or almost never?) used the `GROWINNER` mechanism to increase the
inner loop size when no buffers are needed.
Because einsum reductions work roughly:
def inner(*inputs, out):
accumulate = 0
for vals in zip(*inputs):
accumulate += prod(vals)
out[0] += accumulate
Calling the inner-loop more often actually improves accuracy slightly
(same effect as pairwise summation but much less).
Without adding pairwise summation to the inner-loop it seems best to just
not use GROWINNER, a quick tests suggest that is maybe 1% slowdown for
the simplest `einsum("i,i->i", x, x)` case.
(It is not clear that we should guarantee precision to this extend.)
"""
num = 1_000_000
value = 1. + np.finfo(np.float64).eps * 8196
res = np.einsum("i->", np.broadcast_to(np.array(value), num)) / num
# At with GROWINNER 11 decimals succeed (larger will be less)
assert_almost_equal(res, value, decimal=15)
| TestEinsumPath |
python | zarr-developers__zarr-python | src/zarr/core/group.py | {
"start": 3917,
"end": 12558
} | class ____:
"""
Consolidated Metadata for this Group.
This stores the metadata of child nodes below this group. Any child groups
will have their consolidated metadata set appropriately.
"""
metadata: dict[str, ArrayV2Metadata | ArrayV3Metadata | GroupMetadata]
kind: Literal["inline"] = "inline"
must_understand: Literal[False] = False
def to_dict(self) -> dict[str, JSON]:
return {
"kind": self.kind,
"must_understand": self.must_understand,
"metadata": {
k: v.to_dict()
for k, v in sorted(
self.flattened_metadata.items(),
key=lambda item: (
item[0].count("/"),
unicodedata.normalize("NFKC", item[0]).casefold(),
),
)
},
}
@classmethod
def from_dict(cls, data: dict[str, JSON]) -> ConsolidatedMetadata:
data = dict(data)
kind = data.get("kind")
if kind != "inline":
raise ValueError(f"Consolidated metadata kind='{kind}' is not supported.")
raw_metadata = data.get("metadata")
if not isinstance(raw_metadata, dict):
raise TypeError(f"Unexpected type for 'metadata': {type(raw_metadata)}")
metadata: dict[str, ArrayV2Metadata | ArrayV3Metadata | GroupMetadata] = {}
if raw_metadata:
for k, v in raw_metadata.items():
if not isinstance(v, dict):
raise TypeError(
f"Invalid value for metadata items. key='{k}', type='{type(v).__name__}'"
)
# zarr_format is present in v2 and v3.
zarr_format = parse_zarr_format(v["zarr_format"])
if zarr_format == 3:
node_type = parse_node_type(v.get("node_type", None))
if node_type == "group":
metadata[k] = GroupMetadata.from_dict(v)
elif node_type == "array":
metadata[k] = ArrayV3Metadata.from_dict(v)
else:
assert_never(node_type)
elif zarr_format == 2:
if "shape" in v:
metadata[k] = ArrayV2Metadata.from_dict(v)
else:
metadata[k] = GroupMetadata.from_dict(v)
else:
assert_never(zarr_format)
cls._flat_to_nested(metadata)
return cls(metadata=metadata)
@staticmethod
def _flat_to_nested(
metadata: dict[str, ArrayV2Metadata | ArrayV3Metadata | GroupMetadata],
) -> None:
"""
Convert a flat metadata representation to a nested one.
Notes
-----
Flat metadata is used when persisting the consolidated metadata. The keys
include the full path, not just the node name. The key prefixes can be
used to determine which nodes are children of which other nodes.
Nested metadata is used in-memory. The outermost level will only have the
*immediate* children of the Group. All nested child groups will be stored
under the consolidated metadata of their immediate parent.
"""
# We have a flat mapping from {k: v} where the keys include the *full*
# path segment:
# {
# "/a/b": { group_metadata },
# "/a/b/array-0": { array_metadata },
# "/a/b/array-1": { array_metadata },
# }
#
# We want to reorganize the metadata such that each Group contains the
# array metadata of its immediate children.
# In the example, the group at `/a/b` will have consolidated metadata
# for its children `array-0` and `array-1`.
#
# metadata = dict(metadata)
keys = sorted(metadata, key=lambda k: k.count("/"))
grouped = {
k: list(v) for k, v in itertools.groupby(keys, key=lambda k: k.rsplit("/", 1)[0])
}
# we go top down and directly manipulate metadata.
for key, children_keys in grouped.items():
# key is a key like "a", "a/b", "a/b/c"
# The basic idea is to find the immediate parent (so "", "a", or "a/b")
# and update that node's consolidated metadata to include the metadata
# in children_keys
*prefixes, name = key.split("/")
parent = metadata
while prefixes:
# e.g. a/b/c has a parent "a/b". Walk through to get
# metadata["a"]["b"]
part = prefixes.pop(0)
# we can assume that parent[part] here is a group
# otherwise we wouldn't have a node with this `part` prefix.
# We can also assume that the parent node will have consolidated metadata,
# because we're walking top to bottom.
parent = parent[part].consolidated_metadata.metadata # type: ignore[union-attr]
node = parent[name]
children_keys = list(children_keys)
if isinstance(node, ArrayV2Metadata | ArrayV3Metadata):
# These are already present, either thanks to being an array in the
# root, or by being collected as a child in the else clause
continue
children_keys = list(children_keys)
# We pop from metadata, since we're *moving* this under group
children = {
child_key.split("/")[-1]: metadata.pop(child_key)
for child_key in children_keys
if child_key != key
}
parent[name] = replace(
node, consolidated_metadata=ConsolidatedMetadata(metadata=children)
)
@property
def flattened_metadata(self) -> dict[str, ArrayV2Metadata | ArrayV3Metadata | GroupMetadata]:
"""
Return the flattened representation of Consolidated Metadata.
The returned dictionary will have a key for each child node in the hierarchy
under this group. Under the default (nested) representation available through
``self.metadata``, the dictionary only contains keys for immediate children.
The keys of the dictionary will include the full path to a child node from
the current group, where segments are joined by ``/``.
Examples
--------
```python
from zarr.core.group import ConsolidatedMetadata, GroupMetadata
cm = ConsolidatedMetadata(
metadata={
"group-0": GroupMetadata(
consolidated_metadata=ConsolidatedMetadata(
{
"group-0-0": GroupMetadata(),
}
)
),
"group-1": GroupMetadata(),
}
)
# {'group-0': GroupMetadata(attributes={}, zarr_format=3, consolidated_metadata=None, node_type='group'),
# 'group-0/group-0-0': GroupMetadata(attributes={}, zarr_format=3, consolidated_metadata=None, node_type='group'),
# 'group-1': GroupMetadata(attributes={}, zarr_format=3, consolidated_metadata=None, node_type='group')}
```
"""
metadata = {}
def flatten(
key: str, group: GroupMetadata | ArrayV2Metadata | ArrayV3Metadata
) -> dict[str, ArrayV2Metadata | ArrayV3Metadata | GroupMetadata]:
children: dict[str, ArrayV2Metadata | ArrayV3Metadata | GroupMetadata] = {}
if isinstance(group, ArrayV2Metadata | ArrayV3Metadata):
children[key] = group
else:
if group.consolidated_metadata and group.consolidated_metadata.metadata is not None:
children[key] = replace(
group, consolidated_metadata=ConsolidatedMetadata(metadata={})
)
for name, val in group.consolidated_metadata.metadata.items():
full_key = f"{key}/{name}"
if isinstance(val, GroupMetadata):
children.update(flatten(full_key, val))
else:
children[full_key] = val
else:
children[key] = replace(group, consolidated_metadata=None)
return children
for k, v in self.metadata.items():
metadata.update(flatten(k, v))
return metadata
@dataclass(frozen=True)
| ConsolidatedMetadata |
python | huggingface__transformers | src/transformers/models/sam2_video/modeling_sam2_video.py | {
"start": 24923,
"end": 27807
} | class ____(ModelOutput):
r"""
iou_scores (`torch.FloatTensor` of shape `(batch_size, point_batch_size, num_masks)`):
The Intersection over Union (IoU) scores of the predicted masks.
pred_masks (`torch.FloatTensor` of shape `(batch_size, point_batch_size, num_masks, height, width)`):
The predicted low-resolution masks. This is an alias for `low_res_masks`. These masks need to be post-processed
by the processor to be brought to the original image size.
object_score_logits (`torch.FloatTensor` of shape `(batch_size, point_batch_size, 1)`):
Logits for the object score, indicating if an object is present.
image_embeddings (`tuple(torch.FloatTensor)`):
The features from the FPN, which are used by the mask decoder. This is a tuple of `torch.FloatTensor` where each
tensor has shape `(batch_size, channels, height, width)`.
vision_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of each stage) of shape `(batch_size, height, width, hidden_size)`.
Hidden-states of the vision model at the output of each stage.
vision_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights of the vision model.
mask_decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights of the mask decoder.
high_res_masks (`torch.FloatTensor` of shape `(batch_size, point_batch_size, num_masks, image_size, image_size)`, *optional*):
The predicted masks, upscaled to the original image size. Only used for Sam2VideoModel.
object_pointer (`torch.FloatTensor` of shape `(batch_size, point_batch_size, hidden_size)`, *optional*):
A tensor representing the object pointer, used for tracking in videos. Only used for Sam2VideoModel.
"""
iou_scores: Optional[torch.FloatTensor] = None
pred_masks: Optional[torch.FloatTensor] = None
object_score_logits: Optional[torch.FloatTensor] = None
image_embeddings: tuple[torch.FloatTensor, ...] = None
vision_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
vision_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
mask_decoder_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
high_res_masks: Optional[torch.FloatTensor] = None
object_pointer: Optional[torch.FloatTensor] = None
@dataclass
@auto_docstring(custom_intro="Base class for the Sam2 model's output.")
| Sam2VideoImageSegmentationOutput |
python | lazyprogrammer__machine_learning_examples | rl2/a3c/worker.py | {
"start": 416,
"end": 3167
} | class ____:
def __init__(self):
with tf.variable_scope("image_transformer"):
self.input_state = tf.placeholder(shape=[210, 160, 3], dtype=tf.uint8)
self.output = tf.image.rgb_to_grayscale(self.input_state)
self.output = tf.image.crop_to_bounding_box(self.output, 34, 0, 160, 160)
self.output = tf.image.resize_images(
self.output,
[84, 84],
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
self.output = tf.squeeze(self.output)
def transform(self, state, sess=None):
sess = sess or tf.get_default_session()
return sess.run(self.output, { self.input_state: state })
# Create initial state by repeating the same frame 4 times
def repeat_frame(frame):
return np.stack([frame] * 4, axis=2)
# Create next state by shifting each frame by 1
# Throw out the oldest frame
# And concatenate the newest frame
def shift_frames(state, next_frame):
return np.append(state[:,:,1:], np.expand_dims(next_frame, 2), axis=2)
# Make a Tensorflow op to copy weights from one scope to another
def get_copy_params_op(src_vars, dst_vars):
src_vars = list(sorted(src_vars, key=lambda v: v.name))
dst_vars = list(sorted(dst_vars, key=lambda v: v.name))
ops = []
for s, d in zip(src_vars, dst_vars):
op = d.assign(s)
ops.append(op)
return ops
def make_train_op(local_net, global_net):
"""
Use gradients from local network to update the global network
"""
# Idea:
# We want a list of gradients and corresponding variables
# e.g. [[g1, g2, g3], [v1, v2, v3]]
# Since that's what the optimizer expects.
# But we would like the gradients to come from the local network
# And the variables to come from the global network
# So we want to make a list like this:
# [[local_g1, local_g2, local_g3], [global_v1, global_v2, global_v3]]
# First get only the gradients
local_grads, _ = zip(*local_net.grads_and_vars)
# Clip gradients to avoid large values
local_grads, _ = tf.clip_by_global_norm(local_grads, 5.0)
# Get global vars
_, global_vars = zip(*global_net.grads_and_vars)
# Combine local grads and global vars
local_grads_global_vars = list(zip(local_grads, global_vars))
# Run a gradient descent step, e.g.
# var = var - learning_rate * grad
return global_net.optimizer.apply_gradients(
local_grads_global_vars,
global_step=tf.train.get_global_step())
# Worker object to be run in a thread
# name (String) should be unique for each thread
# env (OpenAI Gym Environment) should be unique for each thread
# policy_net (PolicyNetwork) should be a global passed to every worker
# value_net (ValueNetwork) should be a global passed to every worker
# returns_list (List) should be a global passed to every worker
| ImageTransformer |
python | anthropics__anthropic-sdk-python | src/anthropic/resources/completions.py | {
"start": 1042,
"end": 18285
} | class ____(SyncAPIResource):
@cached_property
def with_raw_response(self) -> CompletionsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/anthropics/anthropic-sdk-python#accessing-raw-response-data-eg-headers
"""
return CompletionsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> CompletionsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/anthropics/anthropic-sdk-python#with_streaming_response
"""
return CompletionsWithStreamingResponse(self)
@overload
def create(
self,
*,
max_tokens_to_sample: int,
model: ModelParam,
prompt: str,
metadata: MetadataParam | Omit = omit,
stop_sequences: SequenceNotStr[str] | Omit = omit,
stream: Literal[False] | Omit = omit,
temperature: float | Omit = omit,
top_k: int | Omit = omit,
top_p: float | Omit = omit,
betas: List[AnthropicBetaParam] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Completion:
"""[Legacy] Create a Text Completion.
The Text Completions API is a legacy API.
We recommend using the
[Messages API](https://docs.claude.com/en/api/messages) going forward.
Future models and features will not be compatible with Text Completions. See our
[migration guide](https://docs.claude.com/en/api/migrating-from-text-completions-to-messages)
for guidance in migrating from Text Completions to Messages.
Args:
max_tokens_to_sample: The maximum number of tokens to generate before stopping.
Note that our models may stop _before_ reaching this maximum. This parameter
only specifies the absolute maximum number of tokens to generate.
model: The model that will complete your prompt.\n\nSee
[models](https://docs.anthropic.com/en/docs/models-overview) for additional
details and options.
prompt: The prompt that you want Claude to complete.
For proper response generation you will need to format your prompt using
alternating `\n\nHuman:` and `\n\nAssistant:` conversational turns. For example:
```
"\n\nHuman: {userQuestion}\n\nAssistant:"
```
See [prompt validation](https://docs.claude.com/en/api/prompt-validation) and
our guide to [prompt design](https://docs.claude.com/en/docs/intro-to-prompting)
for more details.
metadata: An object describing metadata about the request.
stop_sequences: Sequences that will cause the model to stop generating.
Our models stop on `"\n\nHuman:"`, and may include additional built-in stop
sequences in the future. By providing the stop_sequences parameter, you may
include additional strings that will cause the model to stop generating.
stream: Whether to incrementally stream the response using server-sent events.
See [streaming](https://docs.claude.com/en/api/streaming) for details.
temperature: Amount of randomness injected into the response.
Defaults to `1.0`. Ranges from `0.0` to `1.0`. Use `temperature` closer to `0.0`
for analytical / multiple choice, and closer to `1.0` for creative and
generative tasks.
Note that even with `temperature` of `0.0`, the results will not be fully
deterministic.
top_k: Only sample from the top K options for each subsequent token.
Used to remove "long tail" low probability responses.
[Learn more technical details here](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277).
Recommended for advanced use cases only. You usually only need to use
`temperature`.
top_p: Use nucleus sampling.
In nucleus sampling, we compute the cumulative distribution over all the options
for each subsequent token in decreasing probability order and cut it off once it
reaches a particular probability specified by `top_p`. You should either alter
`temperature` or `top_p`, but not both.
Recommended for advanced use cases only. You usually only need to use
`temperature`.
betas: Optional header to specify the beta version(s) you want to use.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
...
@overload
def create(
self,
*,
max_tokens_to_sample: int,
model: ModelParam,
prompt: str,
stream: Literal[True],
metadata: MetadataParam | Omit = omit,
stop_sequences: SequenceNotStr[str] | Omit = omit,
temperature: float | Omit = omit,
top_k: int | Omit = omit,
top_p: float | Omit = omit,
betas: List[AnthropicBetaParam] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Stream[Completion]:
"""[Legacy] Create a Text Completion.
The Text Completions API is a legacy API.
We recommend using the
[Messages API](https://docs.claude.com/en/api/messages) going forward.
Future models and features will not be compatible with Text Completions. See our
[migration guide](https://docs.claude.com/en/api/migrating-from-text-completions-to-messages)
for guidance in migrating from Text Completions to Messages.
Args:
max_tokens_to_sample: The maximum number of tokens to generate before stopping.
Note that our models may stop _before_ reaching this maximum. This parameter
only specifies the absolute maximum number of tokens to generate.
model: The model that will complete your prompt.\n\nSee
[models](https://docs.anthropic.com/en/docs/models-overview) for additional
details and options.
prompt: The prompt that you want Claude to complete.
For proper response generation you will need to format your prompt using
alternating `\n\nHuman:` and `\n\nAssistant:` conversational turns. For example:
```
"\n\nHuman: {userQuestion}\n\nAssistant:"
```
See [prompt validation](https://docs.claude.com/en/api/prompt-validation) and
our guide to [prompt design](https://docs.claude.com/en/docs/intro-to-prompting)
for more details.
stream: Whether to incrementally stream the response using server-sent events.
See [streaming](https://docs.claude.com/en/api/streaming) for details.
metadata: An object describing metadata about the request.
stop_sequences: Sequences that will cause the model to stop generating.
Our models stop on `"\n\nHuman:"`, and may include additional built-in stop
sequences in the future. By providing the stop_sequences parameter, you may
include additional strings that will cause the model to stop generating.
temperature: Amount of randomness injected into the response.
Defaults to `1.0`. Ranges from `0.0` to `1.0`. Use `temperature` closer to `0.0`
for analytical / multiple choice, and closer to `1.0` for creative and
generative tasks.
Note that even with `temperature` of `0.0`, the results will not be fully
deterministic.
top_k: Only sample from the top K options for each subsequent token.
Used to remove "long tail" low probability responses.
[Learn more technical details here](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277).
Recommended for advanced use cases only. You usually only need to use
`temperature`.
top_p: Use nucleus sampling.
In nucleus sampling, we compute the cumulative distribution over all the options
for each subsequent token in decreasing probability order and cut it off once it
reaches a particular probability specified by `top_p`. You should either alter
`temperature` or `top_p`, but not both.
Recommended for advanced use cases only. You usually only need to use
`temperature`.
betas: Optional header to specify the beta version(s) you want to use.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
...
@overload
def create(
self,
*,
max_tokens_to_sample: int,
model: ModelParam,
prompt: str,
stream: bool,
metadata: MetadataParam | Omit = omit,
stop_sequences: SequenceNotStr[str] | Omit = omit,
temperature: float | Omit = omit,
top_k: int | Omit = omit,
top_p: float | Omit = omit,
betas: List[AnthropicBetaParam] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Completion | Stream[Completion]:
"""[Legacy] Create a Text Completion.
The Text Completions API is a legacy API.
We recommend using the
[Messages API](https://docs.claude.com/en/api/messages) going forward.
Future models and features will not be compatible with Text Completions. See our
[migration guide](https://docs.claude.com/en/api/migrating-from-text-completions-to-messages)
for guidance in migrating from Text Completions to Messages.
Args:
max_tokens_to_sample: The maximum number of tokens to generate before stopping.
Note that our models may stop _before_ reaching this maximum. This parameter
only specifies the absolute maximum number of tokens to generate.
model: The model that will complete your prompt.\n\nSee
[models](https://docs.anthropic.com/en/docs/models-overview) for additional
details and options.
prompt: The prompt that you want Claude to complete.
For proper response generation you will need to format your prompt using
alternating `\n\nHuman:` and `\n\nAssistant:` conversational turns. For example:
```
"\n\nHuman: {userQuestion}\n\nAssistant:"
```
See [prompt validation](https://docs.claude.com/en/api/prompt-validation) and
our guide to [prompt design](https://docs.claude.com/en/docs/intro-to-prompting)
for more details.
stream: Whether to incrementally stream the response using server-sent events.
See [streaming](https://docs.claude.com/en/api/streaming) for details.
metadata: An object describing metadata about the request.
stop_sequences: Sequences that will cause the model to stop generating.
Our models stop on `"\n\nHuman:"`, and may include additional built-in stop
sequences in the future. By providing the stop_sequences parameter, you may
include additional strings that will cause the model to stop generating.
temperature: Amount of randomness injected into the response.
Defaults to `1.0`. Ranges from `0.0` to `1.0`. Use `temperature` closer to `0.0`
for analytical / multiple choice, and closer to `1.0` for creative and
generative tasks.
Note that even with `temperature` of `0.0`, the results will not be fully
deterministic.
top_k: Only sample from the top K options for each subsequent token.
Used to remove "long tail" low probability responses.
[Learn more technical details here](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277).
Recommended for advanced use cases only. You usually only need to use
`temperature`.
top_p: Use nucleus sampling.
In nucleus sampling, we compute the cumulative distribution over all the options
for each subsequent token in decreasing probability order and cut it off once it
reaches a particular probability specified by `top_p`. You should either alter
`temperature` or `top_p`, but not both.
Recommended for advanced use cases only. You usually only need to use
`temperature`.
betas: Optional header to specify the beta version(s) you want to use.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
...
@required_args(["max_tokens_to_sample", "model", "prompt"], ["max_tokens_to_sample", "model", "prompt", "stream"])
def create(
self,
*,
max_tokens_to_sample: int,
model: ModelParam,
prompt: str,
metadata: MetadataParam | Omit = omit,
stop_sequences: SequenceNotStr[str] | Omit = omit,
stream: Literal[False] | Literal[True] | Omit = omit,
temperature: float | Omit = omit,
top_k: int | Omit = omit,
top_p: float | Omit = omit,
betas: List[AnthropicBetaParam] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Completion | Stream[Completion]:
if not is_given(timeout) and self._client.timeout == DEFAULT_TIMEOUT:
timeout = 600
extra_headers = {
**strip_not_given({"anthropic-beta": ",".join(str(e) for e in betas) if is_given(betas) else not_given}),
**(extra_headers or {}),
}
return self._post(
"/v1/complete",
body=maybe_transform(
{
"max_tokens_to_sample": max_tokens_to_sample,
"model": model,
"prompt": prompt,
"metadata": metadata,
"stop_sequences": stop_sequences,
"stream": stream,
"temperature": temperature,
"top_k": top_k,
"top_p": top_p,
},
completion_create_params.CompletionCreateParamsStreaming
if stream
else completion_create_params.CompletionCreateParamsNonStreaming,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Completion,
stream=stream or False,
stream_cls=Stream[Completion],
)
| Completions |
python | sympy__sympy | sympy/holonomic/holonomic.py | {
"start": 12084,
"end": 91767
} | class ____:
r"""
A Holonomic Function is a solution to a linear homogeneous ordinary
differential equation with polynomial coefficients. This differential
equation can also be represented by an annihilator i.e. a Differential
Operator ``L`` such that :math:`L.f = 0`. For uniqueness of these functions,
initial conditions can also be provided along with the annihilator.
Explanation
===========
Holonomic functions have closure properties and thus forms a ring.
Given two Holonomic Functions f and g, their sum, product,
integral and derivative is also a Holonomic Function.
For ordinary points initial condition should be a vector of values of
the derivatives i.e. :math:`[y(x_0), y'(x_0), y''(x_0) ... ]`.
For regular singular points initial conditions can also be provided in this
format:
:math:`{s0: [C_0, C_1, ...], s1: [C^1_0, C^1_1, ...], ...}`
where s0, s1, ... are the roots of indicial equation and vectors
:math:`[C_0, C_1, ...], [C^0_0, C^0_1, ...], ...` are the corresponding initial
terms of the associated power series. See Examples below.
Examples
========
>>> from sympy.holonomic.holonomic import HolonomicFunction, DifferentialOperators
>>> from sympy import QQ
>>> from sympy import symbols, S
>>> x = symbols('x')
>>> R, Dx = DifferentialOperators(QQ.old_poly_ring(x),'Dx')
>>> p = HolonomicFunction(Dx - 1, x, 0, [1]) # e^x
>>> q = HolonomicFunction(Dx**2 + 1, x, 0, [0, 1]) # sin(x)
>>> p + q # annihilator of e^x + sin(x)
HolonomicFunction((-1) + (1)*Dx + (-1)*Dx**2 + (1)*Dx**3, x, 0, [1, 2, 1])
>>> p * q # annihilator of e^x * sin(x)
HolonomicFunction((2) + (-2)*Dx + (1)*Dx**2, x, 0, [0, 1])
An example of initial conditions for regular singular points,
the indicial equation has only one root `1/2`.
>>> HolonomicFunction(-S(1)/2 + x*Dx, x, 0, {S(1)/2: [1]})
HolonomicFunction((-1/2) + (x)*Dx, x, 0, {1/2: [1]})
>>> HolonomicFunction(-S(1)/2 + x*Dx, x, 0, {S(1)/2: [1]}).to_expr()
sqrt(x)
To plot a Holonomic Function, one can use `.evalf()` for numerical
computation. Here's an example on `sin(x)**2/x` using numpy and matplotlib.
>>> import sympy.holonomic # doctest: +SKIP
>>> from sympy import var, sin # doctest: +SKIP
>>> import matplotlib.pyplot as plt # doctest: +SKIP
>>> import numpy as np # doctest: +SKIP
>>> var("x") # doctest: +SKIP
>>> r = np.linspace(1, 5, 100) # doctest: +SKIP
>>> y = sympy.holonomic.expr_to_holonomic(sin(x)**2/x, x0=1).evalf(r) # doctest: +SKIP
>>> plt.plot(r, y, label="holonomic function") # doctest: +SKIP
>>> plt.show() # doctest: +SKIP
"""
_op_priority = 20
def __init__(self, annihilator, x, x0=0, y0=None):
"""
Parameters
==========
annihilator:
Annihilator of the Holonomic Function, represented by a
`DifferentialOperator` object.
x:
Variable of the function.
x0:
The point at which initial conditions are stored.
Generally an integer.
y0:
The initial condition. The proper format for the initial condition
is described in class docstring. To make the function unique,
length of the vector `y0` should be equal to or greater than the
order of differential equation.
"""
# initial condition
self.y0 = y0
# the point for initial conditions, default is zero.
self.x0 = x0
# differential operator L such that L.f = 0
self.annihilator = annihilator
self.x = x
def __str__(self):
if self._have_init_cond():
str_sol = 'HolonomicFunction(%s, %s, %s, %s)' % (str(self.annihilator),\
sstr(self.x), sstr(self.x0), sstr(self.y0))
else:
str_sol = 'HolonomicFunction(%s, %s)' % (str(self.annihilator),\
sstr(self.x))
return str_sol
__repr__ = __str__
def unify(self, other):
"""
Unifies the base polynomial ring of a given two Holonomic
Functions.
"""
R1 = self.annihilator.parent.base
R2 = other.annihilator.parent.base
dom1 = R1.dom
dom2 = R2.dom
if R1 == R2:
return (self, other)
R = (dom1.unify(dom2)).old_poly_ring(self.x)
newparent, _ = DifferentialOperators(R, str(self.annihilator.parent.gen_symbol))
sol1 = [R1.to_sympy(i) for i in self.annihilator.listofpoly]
sol2 = [R2.to_sympy(i) for i in other.annihilator.listofpoly]
sol1 = DifferentialOperator(sol1, newparent)
sol2 = DifferentialOperator(sol2, newparent)
sol1 = HolonomicFunction(sol1, self.x, self.x0, self.y0)
sol2 = HolonomicFunction(sol2, other.x, other.x0, other.y0)
return (sol1, sol2)
def is_singularics(self):
"""
Returns True if the function have singular initial condition
in the dictionary format.
Returns False if the function have ordinary initial condition
in the list format.
Returns None for all other cases.
"""
if isinstance(self.y0, dict):
return True
elif isinstance(self.y0, list):
return False
def _have_init_cond(self):
"""
Checks if the function have initial condition.
"""
return bool(self.y0)
def _singularics_to_ord(self):
"""
Converts a singular initial condition to ordinary if possible.
"""
a = list(self.y0)[0]
b = self.y0[a]
if len(self.y0) == 1 and a == int(a) and a > 0:
a = int(a)
y0 = [S.Zero] * a
y0 += [j * factorial(a + i) for i, j in enumerate(b)]
return HolonomicFunction(self.annihilator, self.x, self.x0, y0)
def __add__(self, other):
# if the ground domains are different
if self.annihilator.parent.base != other.annihilator.parent.base:
a, b = self.unify(other)
return a + b
deg1 = self.annihilator.order
deg2 = other.annihilator.order
dim = max(deg1, deg2)
R = self.annihilator.parent.base
K = R.get_field()
rowsself = [self.annihilator]
rowsother = [other.annihilator]
gen = self.annihilator.parent.derivative_operator
# constructing annihilators up to order dim
for i in range(dim - deg1):
diff1 = (gen * rowsself[-1])
rowsself.append(diff1)
for i in range(dim - deg2):
diff2 = (gen * rowsother[-1])
rowsother.append(diff2)
row = rowsself + rowsother
# constructing the matrix of the ansatz
r = []
for expr in row:
p = []
for i in range(dim + 1):
if i >= len(expr.listofpoly):
p.append(K.zero)
else:
p.append(K.new(expr.listofpoly[i].to_list()))
r.append(p)
# solving the linear system using gauss jordan solver
r = DomainMatrix(r, (len(row), dim+1), K).transpose()
homosys = DomainMatrix.zeros((dim+1, 1), K)
sol = _find_nonzero_solution(r, homosys)
# if a solution is not obtained then increasing the order by 1 in each
# iteration
while sol.is_zero_matrix:
dim += 1
diff1 = (gen * rowsself[-1])
rowsself.append(diff1)
diff2 = (gen * rowsother[-1])
rowsother.append(diff2)
row = rowsself + rowsother
r = []
for expr in row:
p = []
for i in range(dim + 1):
if i >= len(expr.listofpoly):
p.append(K.zero)
else:
p.append(K.new(expr.listofpoly[i].to_list()))
r.append(p)
# solving the linear system using gauss jordan solver
r = DomainMatrix(r, (len(row), dim+1), K).transpose()
homosys = DomainMatrix.zeros((dim+1, 1), K)
sol = _find_nonzero_solution(r, homosys)
# taking only the coefficients needed to multiply with `self`
# can be also be done the other way by taking R.H.S and multiplying with
# `other`
sol = sol.flat()[:dim + 1 - deg1]
sol1 = _normalize(sol, self.annihilator.parent)
# annihilator of the solution
sol = sol1 * (self.annihilator)
sol = _normalize(sol.listofpoly, self.annihilator.parent, negative=False)
if not (self._have_init_cond() and other._have_init_cond()):
return HolonomicFunction(sol, self.x)
# both the functions have ordinary initial conditions
if self.is_singularics() == False and other.is_singularics() == False:
# directly add the corresponding value
if self.x0 == other.x0:
# try to extended the initial conditions
# using the annihilator
y1 = _extend_y0(self, sol.order)
y2 = _extend_y0(other, sol.order)
y0 = [a + b for a, b in zip(y1, y2)]
return HolonomicFunction(sol, self.x, self.x0, y0)
# change the initial conditions to a same point
selfat0 = self.annihilator.is_singular(0)
otherat0 = other.annihilator.is_singular(0)
if self.x0 == 0 and not selfat0 and not otherat0:
return self + other.change_ics(0)
if other.x0 == 0 and not selfat0 and not otherat0:
return self.change_ics(0) + other
selfatx0 = self.annihilator.is_singular(self.x0)
otheratx0 = other.annihilator.is_singular(self.x0)
if not selfatx0 and not otheratx0:
return self + other.change_ics(self.x0)
return self.change_ics(other.x0) + other
if self.x0 != other.x0:
return HolonomicFunction(sol, self.x)
# if the functions have singular_ics
y1 = None
y2 = None
if self.is_singularics() == False and other.is_singularics() == True:
# convert the ordinary initial condition to singular.
_y0 = [j / factorial(i) for i, j in enumerate(self.y0)]
y1 = {S.Zero: _y0}
y2 = other.y0
elif self.is_singularics() == True and other.is_singularics() == False:
_y0 = [j / factorial(i) for i, j in enumerate(other.y0)]
y1 = self.y0
y2 = {S.Zero: _y0}
elif self.is_singularics() == True and other.is_singularics() == True:
y1 = self.y0
y2 = other.y0
# computing singular initial condition for the result
# taking union of the series terms of both functions
y0 = {}
for i in y1:
# add corresponding initial terms if the power
# on `x` is same
if i in y2:
y0[i] = [a + b for a, b in zip(y1[i], y2[i])]
else:
y0[i] = y1[i]
for i in y2:
if i not in y1:
y0[i] = y2[i]
return HolonomicFunction(sol, self.x, self.x0, y0)
def integrate(self, limits, initcond=False):
"""
Integrates the given holonomic function.
Examples
========
>>> from sympy.holonomic.holonomic import HolonomicFunction, DifferentialOperators
>>> from sympy import QQ
>>> from sympy import symbols
>>> x = symbols('x')
>>> R, Dx = DifferentialOperators(QQ.old_poly_ring(x),'Dx')
>>> HolonomicFunction(Dx - 1, x, 0, [1]).integrate((x, 0, x)) # e^x - 1
HolonomicFunction((-1)*Dx + (1)*Dx**2, x, 0, [0, 1])
>>> HolonomicFunction(Dx**2 + 1, x, 0, [1, 0]).integrate((x, 0, x))
HolonomicFunction((1)*Dx + (1)*Dx**3, x, 0, [0, 1, 0])
"""
# to get the annihilator, just multiply by Dx from right
D = self.annihilator.parent.derivative_operator
# if the function have initial conditions of the series format
if self.is_singularics() == True:
r = self._singularics_to_ord()
if r:
return r.integrate(limits, initcond=initcond)
# computing singular initial condition for the function
# produced after integration.
y0 = {}
for i in self.y0:
c = self.y0[i]
c2 = []
for j, cj in enumerate(c):
if cj == 0:
c2.append(S.Zero)
# if power on `x` is -1, the integration becomes log(x)
# TODO: Implement this case
elif i + j + 1 == 0:
raise NotImplementedError("logarithmic terms in the series are not supported")
else:
c2.append(cj / S(i + j + 1))
y0[i + 1] = c2
if hasattr(limits, "__iter__"):
raise NotImplementedError("Definite integration for singular initial conditions")
return HolonomicFunction(self.annihilator * D, self.x, self.x0, y0)
# if no initial conditions are available for the function
if not self._have_init_cond():
if initcond:
return HolonomicFunction(self.annihilator * D, self.x, self.x0, [S.Zero])
return HolonomicFunction(self.annihilator * D, self.x)
# definite integral
# initial conditions for the answer will be stored at point `a`,
# where `a` is the lower limit of the integrand
if hasattr(limits, "__iter__"):
if len(limits) == 3 and limits[0] == self.x:
x0 = self.x0
a = limits[1]
b = limits[2]
definite = True
else:
definite = False
y0 = [S.Zero]
y0 += self.y0
indefinite_integral = HolonomicFunction(self.annihilator * D, self.x, self.x0, y0)
if not definite:
return indefinite_integral
# use evalf to get the values at `a`
if x0 != a:
try:
indefinite_expr = indefinite_integral.to_expr()
except (NotHyperSeriesError, NotPowerSeriesError):
indefinite_expr = None
if indefinite_expr:
lower = indefinite_expr.subs(self.x, a)
if isinstance(lower, NaN):
lower = indefinite_expr.limit(self.x, a)
else:
lower = indefinite_integral.evalf(a)
if b == self.x:
y0[0] = y0[0] - lower
return HolonomicFunction(self.annihilator * D, self.x, x0, y0)
elif S(b).is_Number:
if indefinite_expr:
upper = indefinite_expr.subs(self.x, b)
if isinstance(upper, NaN):
upper = indefinite_expr.limit(self.x, b)
else:
upper = indefinite_integral.evalf(b)
return upper - lower
# if the upper limit is `x`, the answer will be a function
if b == self.x:
return HolonomicFunction(self.annihilator * D, self.x, a, y0)
# if the upper limits is a Number, a numerical value will be returned
elif S(b).is_Number:
try:
s = HolonomicFunction(self.annihilator * D, self.x, a,\
y0).to_expr()
indefinite = s.subs(self.x, b)
if not isinstance(indefinite, NaN):
return indefinite
else:
return s.limit(self.x, b)
except (NotHyperSeriesError, NotPowerSeriesError):
return HolonomicFunction(self.annihilator * D, self.x, a, y0).evalf(b)
return HolonomicFunction(self.annihilator * D, self.x)
def diff(self, *args, **kwargs):
r"""
Differentiation of the given Holonomic function.
Examples
========
>>> from sympy.holonomic.holonomic import HolonomicFunction, DifferentialOperators
>>> from sympy import ZZ
>>> from sympy import symbols
>>> x = symbols('x')
>>> R, Dx = DifferentialOperators(ZZ.old_poly_ring(x),'Dx')
>>> HolonomicFunction(Dx**2 + 1, x, 0, [0, 1]).diff().to_expr()
cos(x)
>>> HolonomicFunction(Dx - 2, x, 0, [1]).diff().to_expr()
2*exp(2*x)
See Also
========
integrate
"""
kwargs.setdefault('evaluate', True)
if args:
if args[0] != self.x:
return S.Zero
elif len(args) == 2:
sol = self
for i in range(args[1]):
sol = sol.diff(args[0])
return sol
ann = self.annihilator
# if the function is constant.
if ann.listofpoly[0] == ann.parent.base.zero and ann.order == 1:
return S.Zero
# if the coefficient of y in the differential equation is zero.
# a shifting is done to compute the answer in this case.
elif ann.listofpoly[0] == ann.parent.base.zero:
sol = DifferentialOperator(ann.listofpoly[1:], ann.parent)
if self._have_init_cond():
# if ordinary initial condition
if self.is_singularics() == False:
return HolonomicFunction(sol, self.x, self.x0, self.y0[1:])
# TODO: support for singular initial condition
return HolonomicFunction(sol, self.x)
else:
return HolonomicFunction(sol, self.x)
# the general algorithm
R = ann.parent.base
K = R.get_field()
seq_dmf = [K.new(i.to_list()) for i in ann.listofpoly]
# -y = a1*y'/a0 + a2*y''/a0 ... + an*y^n/a0
rhs = [i / seq_dmf[0] for i in seq_dmf[1:]]
rhs.insert(0, K.zero)
# differentiate both lhs and rhs
sol = _derivate_diff_eq(rhs, K)
# add the term y' in lhs to rhs
sol = _add_lists(sol, [K.zero, K.one])
sol = _normalize(sol[1:], self.annihilator.parent, negative=False)
if not self._have_init_cond() or self.is_singularics() == True:
return HolonomicFunction(sol, self.x)
y0 = _extend_y0(self, sol.order + 1)[1:]
return HolonomicFunction(sol, self.x, self.x0, y0)
def __eq__(self, other):
if self.annihilator != other.annihilator or self.x != other.x:
return False
if self._have_init_cond() and other._have_init_cond():
return self.x0 == other.x0 and self.y0 == other.y0
return True
def __mul__(self, other):
ann_self = self.annihilator
if not isinstance(other, HolonomicFunction):
other = sympify(other)
if other.has(self.x):
raise NotImplementedError(" Can't multiply a HolonomicFunction and expressions/functions.")
if not self._have_init_cond():
return self
y0 = _extend_y0(self, ann_self.order)
y1 = [(Poly.new(j, self.x) * other).rep for j in y0]
return HolonomicFunction(ann_self, self.x, self.x0, y1)
if self.annihilator.parent.base != other.annihilator.parent.base:
a, b = self.unify(other)
return a * b
ann_other = other.annihilator
a = ann_self.order
b = ann_other.order
R = ann_self.parent.base
K = R.get_field()
list_self = [K.new(j.to_list()) for j in ann_self.listofpoly]
list_other = [K.new(j.to_list()) for j in ann_other.listofpoly]
# will be used to reduce the degree
self_red = [-list_self[i] / list_self[a] for i in range(a)]
other_red = [-list_other[i] / list_other[b] for i in range(b)]
# coeff_mull[i][j] is the coefficient of Dx^i(f).Dx^j(g)
coeff_mul = [[K.zero for i in range(b + 1)] for j in range(a + 1)]
coeff_mul[0][0] = K.one
# making the ansatz
lin_sys_elements = [[coeff_mul[i][j] for i in range(a) for j in range(b)]]
lin_sys = DomainMatrix(lin_sys_elements, (1, a*b), K).transpose()
homo_sys = DomainMatrix.zeros((a*b, 1), K)
sol = _find_nonzero_solution(lin_sys, homo_sys)
# until a non trivial solution is found
while sol.is_zero_matrix:
# updating the coefficients Dx^i(f).Dx^j(g) for next degree
for i in range(a - 1, -1, -1):
for j in range(b - 1, -1, -1):
coeff_mul[i][j + 1] += coeff_mul[i][j]
coeff_mul[i + 1][j] += coeff_mul[i][j]
if isinstance(coeff_mul[i][j], K.dtype):
coeff_mul[i][j] = DMFdiff(coeff_mul[i][j], K)
else:
coeff_mul[i][j] = coeff_mul[i][j].diff(self.x)
# reduce the terms to lower power using annihilators of f, g
for i in range(a + 1):
if coeff_mul[i][b].is_zero:
continue
for j in range(b):
coeff_mul[i][j] += other_red[j] * coeff_mul[i][b]
coeff_mul[i][b] = K.zero
# not d2 + 1, as that is already covered in previous loop
for j in range(b):
if coeff_mul[a][j] == 0:
continue
for i in range(a):
coeff_mul[i][j] += self_red[i] * coeff_mul[a][j]
coeff_mul[a][j] = K.zero
lin_sys_elements.append([coeff_mul[i][j] for i in range(a) for j in range(b)])
lin_sys = DomainMatrix(lin_sys_elements, (len(lin_sys_elements), a*b), K).transpose()
sol = _find_nonzero_solution(lin_sys, homo_sys)
sol_ann = _normalize(sol.flat(), self.annihilator.parent, negative=False)
if not (self._have_init_cond() and other._have_init_cond()):
return HolonomicFunction(sol_ann, self.x)
if self.is_singularics() == False and other.is_singularics() == False:
# if both the conditions are at same point
if self.x0 == other.x0:
# try to find more initial conditions
y0_self = _extend_y0(self, sol_ann.order)
y0_other = _extend_y0(other, sol_ann.order)
# h(x0) = f(x0) * g(x0)
y0 = [y0_self[0] * y0_other[0]]
# coefficient of Dx^j(f)*Dx^i(g) in Dx^i(fg)
for i in range(1, min(len(y0_self), len(y0_other))):
coeff = [[0 for i in range(i + 1)] for j in range(i + 1)]
for j in range(i + 1):
for k in range(i + 1):
if j + k == i:
coeff[j][k] = binomial(i, j)
sol = 0
for j in range(i + 1):
for k in range(i + 1):
sol += coeff[j][k]* y0_self[j] * y0_other[k]
y0.append(sol)
return HolonomicFunction(sol_ann, self.x, self.x0, y0)
# if the points are different, consider one
selfat0 = self.annihilator.is_singular(0)
otherat0 = other.annihilator.is_singular(0)
if self.x0 == 0 and not selfat0 and not otherat0:
return self * other.change_ics(0)
if other.x0 == 0 and not selfat0 and not otherat0:
return self.change_ics(0) * other
selfatx0 = self.annihilator.is_singular(self.x0)
otheratx0 = other.annihilator.is_singular(self.x0)
if not selfatx0 and not otheratx0:
return self * other.change_ics(self.x0)
return self.change_ics(other.x0) * other
if self.x0 != other.x0:
return HolonomicFunction(sol_ann, self.x)
# if the functions have singular_ics
y1 = None
y2 = None
if self.is_singularics() == False and other.is_singularics() == True:
_y0 = [j / factorial(i) for i, j in enumerate(self.y0)]
y1 = {S.Zero: _y0}
y2 = other.y0
elif self.is_singularics() == True and other.is_singularics() == False:
_y0 = [j / factorial(i) for i, j in enumerate(other.y0)]
y1 = self.y0
y2 = {S.Zero: _y0}
elif self.is_singularics() == True and other.is_singularics() == True:
y1 = self.y0
y2 = other.y0
y0 = {}
# multiply every possible pair of the series terms
for i in y1:
for j in y2:
k = min(len(y1[i]), len(y2[j]))
c = [sum((y1[i][b] * y2[j][a - b] for b in range(a + 1)),
start=S.Zero) for a in range(k)]
if not i + j in y0:
y0[i + j] = c
else:
y0[i + j] = [a + b for a, b in zip(c, y0[i + j])]
return HolonomicFunction(sol_ann, self.x, self.x0, y0)
__rmul__ = __mul__
def __sub__(self, other):
return self + other * -1
def __rsub__(self, other):
return self * -1 + other
def __neg__(self):
return -1 * self
def __truediv__(self, other):
return self * (S.One / other)
def __pow__(self, n):
if self.annihilator.order <= 1:
ann = self.annihilator
parent = ann.parent
if self.y0 is None:
y0 = None
else:
y0 = [list(self.y0)[0] ** n]
p0 = ann.listofpoly[0]
p1 = ann.listofpoly[1]
p0 = (Poly.new(p0, self.x) * n).rep
sol = [parent.base.to_sympy(i) for i in [p0, p1]]
dd = DifferentialOperator(sol, parent)
return HolonomicFunction(dd, self.x, self.x0, y0)
if n < 0:
raise NotHolonomicError("Negative Power on a Holonomic Function")
Dx = self.annihilator.parent.derivative_operator
result = HolonomicFunction(Dx, self.x, S.Zero, [S.One])
if n == 0:
return result
x = self
while True:
if n % 2:
result *= x
n >>= 1
if not n:
break
x *= x
return result
def degree(self):
"""
Returns the highest power of `x` in the annihilator.
"""
return max(i.degree() for i in self.annihilator.listofpoly)
def composition(self, expr, *args, **kwargs):
"""
Returns function after composition of a holonomic
function with an algebraic function. The method cannot compute
initial conditions for the result by itself, so they can be also be
provided.
Examples
========
>>> from sympy.holonomic.holonomic import HolonomicFunction, DifferentialOperators
>>> from sympy import QQ
>>> from sympy import symbols
>>> x = symbols('x')
>>> R, Dx = DifferentialOperators(QQ.old_poly_ring(x),'Dx')
>>> HolonomicFunction(Dx - 1, x).composition(x**2, 0, [1]) # e^(x**2)
HolonomicFunction((-2*x) + (1)*Dx, x, 0, [1])
>>> HolonomicFunction(Dx**2 + 1, x).composition(x**2 - 1, 1, [1, 0])
HolonomicFunction((4*x**3) + (-1)*Dx + (x)*Dx**2, x, 1, [1, 0])
See Also
========
from_hyper
"""
R = self.annihilator.parent
a = self.annihilator.order
diff = expr.diff(self.x)
listofpoly = self.annihilator.listofpoly
for i, j in enumerate(listofpoly):
if isinstance(j, self.annihilator.parent.base.dtype):
listofpoly[i] = self.annihilator.parent.base.to_sympy(j)
r = listofpoly[a].subs({self.x:expr})
subs = [-listofpoly[i].subs({self.x:expr}) / r for i in range (a)]
coeffs = [S.Zero for i in range(a)] # coeffs[i] == coeff of (D^i f)(a) in D^k (f(a))
coeffs[0] = S.One
system = [coeffs]
homogeneous = Matrix([[S.Zero for i in range(a)]]).transpose()
while True:
coeffs_next = [p.diff(self.x) for p in coeffs]
for i in range(a - 1):
coeffs_next[i + 1] += (coeffs[i] * diff)
for i in range(a):
coeffs_next[i] += (coeffs[-1] * subs[i] * diff)
coeffs = coeffs_next
# check for linear relations
system.append(coeffs)
sol, taus = (Matrix(system).transpose()
).gauss_jordan_solve(homogeneous)
if sol.is_zero_matrix is not True:
break
tau = list(taus)[0]
sol = sol.subs(tau, 1)
sol = _normalize(sol[0:], R, negative=False)
# if initial conditions are given for the resulting function
if args:
return HolonomicFunction(sol, self.x, args[0], args[1])
return HolonomicFunction(sol, self.x)
def to_sequence(self, lb=True):
r"""
Finds recurrence relation for the coefficients in the series expansion
of the function about :math:`x_0`, where :math:`x_0` is the point at
which the initial condition is stored.
Explanation
===========
If the point :math:`x_0` is ordinary, solution of the form :math:`[(R, n_0)]`
is returned. Where :math:`R` is the recurrence relation and :math:`n_0` is the
smallest ``n`` for which the recurrence holds true.
If the point :math:`x_0` is regular singular, a list of solutions in
the format :math:`(R, p, n_0)` is returned, i.e. `[(R, p, n_0), ... ]`.
Each tuple in this vector represents a recurrence relation :math:`R`
associated with a root of the indicial equation ``p``. Conditions of
a different format can also be provided in this case, see the
docstring of HolonomicFunction class.
If it's not possible to numerically compute a initial condition,
it is returned as a symbol :math:`C_j`, denoting the coefficient of
:math:`(x - x_0)^j` in the power series about :math:`x_0`.
Examples
========
>>> from sympy.holonomic.holonomic import HolonomicFunction, DifferentialOperators
>>> from sympy import QQ
>>> from sympy import symbols, S
>>> x = symbols('x')
>>> R, Dx = DifferentialOperators(QQ.old_poly_ring(x),'Dx')
>>> HolonomicFunction(Dx - 1, x, 0, [1]).to_sequence()
[(HolonomicSequence((-1) + (n + 1)Sn, n), u(0) = 1, 0)]
>>> HolonomicFunction((1 + x)*Dx**2 + Dx, x, 0, [0, 1]).to_sequence()
[(HolonomicSequence((n**2) + (n**2 + n)Sn, n), u(0) = 0, u(1) = 1, u(2) = -1/2, 2)]
>>> HolonomicFunction(-S(1)/2 + x*Dx, x, 0, {S(1)/2: [1]}).to_sequence()
[(HolonomicSequence((n), n), u(0) = 1, 1/2, 1)]
See Also
========
HolonomicFunction.series
References
==========
.. [1] https://hal.inria.fr/inria-00070025/document
.. [2] https://www3.risc.jku.at/publications/download/risc_2244/DIPLFORM.pdf
"""
if self.x0 != 0:
return self.shift_x(self.x0).to_sequence()
# check whether a power series exists if the point is singular
if self.annihilator.is_singular(self.x0):
return self._frobenius(lb=lb)
dict1 = {}
n = Symbol('n', integer=True)
dom = self.annihilator.parent.base.dom
R, _ = RecurrenceOperators(dom.old_poly_ring(n), 'Sn')
# substituting each term of the form `x^k Dx^j` in the
# annihilator, according to the formula below:
# x^k Dx^j = Sum(rf(n + 1 - k, j) * a(n + j - k) * x^n, (n, k, oo))
# for explanation see [2].
for i, j in enumerate(self.annihilator.listofpoly):
listofdmp = j.all_coeffs()
degree = len(listofdmp) - 1
for k in range(degree + 1):
coeff = listofdmp[degree - k]
if coeff == 0:
continue
if (i - k, k) in dict1:
dict1[(i - k, k)] += (dom.to_sympy(coeff) * rf(n - k + 1, i))
else:
dict1[(i - k, k)] = (dom.to_sympy(coeff) * rf(n - k + 1, i))
sol = []
keylist = [i[0] for i in dict1]
lower = min(keylist)
upper = max(keylist)
degree = self.degree()
# the recurrence relation holds for all values of
# n greater than smallest_n, i.e. n >= smallest_n
smallest_n = lower + degree
dummys = {}
eqs = []
unknowns = []
# an appropriate shift of the recurrence
for j in range(lower, upper + 1):
if j in keylist:
temp = sum((v.subs(n, n - lower)
for k, v in dict1.items() if k[0] == j),
start=S.Zero)
sol.append(temp)
else:
sol.append(S.Zero)
# the recurrence relation
sol = RecurrenceOperator(sol, R)
# computing the initial conditions for recurrence
order = sol.order
all_roots = roots(R.base.to_sympy(sol.listofpoly[-1]), n, filter='Z')
all_roots = all_roots.keys()
if all_roots:
max_root = max(all_roots) + 1
smallest_n = max(max_root, smallest_n)
order += smallest_n
y0 = _extend_y0(self, order)
# u(n) = y^n(0)/factorial(n)
u0 = [j / factorial(i) for i, j in enumerate(y0)]
# if sufficient conditions can't be computed then
# try to use the series method i.e.
# equate the coefficients of x^k in the equation formed by
# substituting the series in differential equation, to zero.
if len(u0) < order:
for i in range(degree):
eq = S.Zero
for j in dict1:
if i + j[0] < 0:
dummys[i + j[0]] = S.Zero
elif i + j[0] < len(u0):
dummys[i + j[0]] = u0[i + j[0]]
elif not i + j[0] in dummys:
dummys[i + j[0]] = Symbol('C_%s' %(i + j[0]))
unknowns.append(dummys[i + j[0]])
if j[1] <= i:
eq += dict1[j].subs(n, i) * dummys[i + j[0]]
eqs.append(eq)
# solve the system of equations formed
soleqs = solve(eqs, *unknowns)
if isinstance(soleqs, dict):
for i in range(len(u0), order):
if i not in dummys:
dummys[i] = Symbol('C_%s' %i)
if dummys[i] in soleqs:
u0.append(soleqs[dummys[i]])
else:
u0.append(dummys[i])
if lb:
return [(HolonomicSequence(sol, u0), smallest_n)]
return [HolonomicSequence(sol, u0)]
for i in range(len(u0), order):
if i not in dummys:
dummys[i] = Symbol('C_%s' %i)
s = False
for j in soleqs:
if dummys[i] in j:
u0.append(j[dummys[i]])
s = True
if not s:
u0.append(dummys[i])
if lb:
return [(HolonomicSequence(sol, u0), smallest_n)]
return [HolonomicSequence(sol, u0)]
def _frobenius(self, lb=True):
# compute the roots of indicial equation
indicialroots = self._indicial()
reals = []
compl = []
for i in ordered(indicialroots.keys()):
if i.is_real:
reals.extend([i] * indicialroots[i])
else:
a, b = i.as_real_imag()
compl.extend([(i, a, b)] * indicialroots[i])
# sort the roots for a fixed ordering of solution
compl.sort(key=lambda x : x[1])
compl.sort(key=lambda x : x[2])
reals.sort()
# grouping the roots, roots differ by an integer are put in the same group.
grp = []
for i in reals:
if len(grp) == 0:
grp.append([i])
continue
for j in grp:
if int_valued(j[0] - i):
j.append(i)
break
else:
grp.append([i])
# True if none of the roots differ by an integer i.e.
# each element in group have only one member
independent = all(len(i) == 1 for i in grp)
allpos = all(i >= 0 for i in reals)
allint = all(int_valued(i) for i in reals)
# if initial conditions are provided
# then use them.
if self.is_singularics() == True:
rootstoconsider = []
for i in ordered(self.y0.keys()):
for j in ordered(indicialroots.keys()):
if equal_valued(j, i):
rootstoconsider.append(i)
elif allpos and allint:
rootstoconsider = [min(reals)]
elif independent:
rootstoconsider = [i[0] for i in grp] + [j[0] for j in compl]
elif not allint:
rootstoconsider = [i for i in reals if not int(i) == i]
elif not allpos:
if not self._have_init_cond() or S(self.y0[0]).is_finite == False:
rootstoconsider = [min(reals)]
else:
posroots = [i for i in reals if i >= 0]
rootstoconsider = [min(posroots)]
n = Symbol('n', integer=True)
dom = self.annihilator.parent.base.dom
R, _ = RecurrenceOperators(dom.old_poly_ring(n), 'Sn')
finalsol = []
char = ord('C')
for p in rootstoconsider:
dict1 = {}
for i, j in enumerate(self.annihilator.listofpoly):
listofdmp = j.all_coeffs()
degree = len(listofdmp) - 1
for k in range(degree + 1):
coeff = listofdmp[degree - k]
if coeff == 0:
continue
if (i - k, k - i) in dict1:
dict1[(i - k, k - i)] += (dom.to_sympy(coeff) * rf(n - k + 1 + p, i))
else:
dict1[(i - k, k - i)] = (dom.to_sympy(coeff) * rf(n - k + 1 + p, i))
sol = []
keylist = [i[0] for i in dict1]
lower = min(keylist)
upper = max(keylist)
degree = max(i[1] for i in dict1)
degree2 = min(i[1] for i in dict1)
smallest_n = lower + degree
dummys = {}
eqs = []
unknowns = []
for j in range(lower, upper + 1):
if j in keylist:
temp = sum((v.subs(n, n - lower)
for k, v in dict1.items() if k[0] == j),
start=S.Zero)
sol.append(temp)
else:
sol.append(S.Zero)
# the recurrence relation
sol = RecurrenceOperator(sol, R)
# computing the initial conditions for recurrence
order = sol.order
all_roots = roots(R.base.to_sympy(sol.listofpoly[-1]), n, filter='Z')
all_roots = all_roots.keys()
if all_roots:
max_root = max(all_roots) + 1
smallest_n = max(max_root, smallest_n)
order += smallest_n
u0 = []
if self.is_singularics() == True:
u0 = self.y0[p]
elif self.is_singularics() == False and p >= 0 and int(p) == p and len(rootstoconsider) == 1:
y0 = _extend_y0(self, order + int(p))
# u(n) = y^n(0)/factorial(n)
if len(y0) > int(p):
u0 = [y0[i] / factorial(i) for i in range(int(p), len(y0))]
if len(u0) < order:
for i in range(degree2, degree):
eq = S.Zero
for j in dict1:
if i + j[0] < 0:
dummys[i + j[0]] = S.Zero
elif i + j[0] < len(u0):
dummys[i + j[0]] = u0[i + j[0]]
elif not i + j[0] in dummys:
letter = chr(char) + '_%s' %(i + j[0])
dummys[i + j[0]] = Symbol(letter)
unknowns.append(dummys[i + j[0]])
if j[1] <= i:
eq += dict1[j].subs(n, i) * dummys[i + j[0]]
eqs.append(eq)
# solve the system of equations formed
soleqs = solve(eqs, *unknowns)
if isinstance(soleqs, dict):
for i in range(len(u0), order):
if i not in dummys:
letter = chr(char) + '_%s' %i
dummys[i] = Symbol(letter)
if dummys[i] in soleqs:
u0.append(soleqs[dummys[i]])
else:
u0.append(dummys[i])
if lb:
finalsol.append((HolonomicSequence(sol, u0), p, smallest_n))
continue
else:
finalsol.append((HolonomicSequence(sol, u0), p))
continue
for i in range(len(u0), order):
if i not in dummys:
letter = chr(char) + '_%s' %i
dummys[i] = Symbol(letter)
s = False
for j in soleqs:
if dummys[i] in j:
u0.append(j[dummys[i]])
s = True
if not s:
u0.append(dummys[i])
if lb:
finalsol.append((HolonomicSequence(sol, u0), p, smallest_n))
else:
finalsol.append((HolonomicSequence(sol, u0), p))
char += 1
return finalsol
def series(self, n=6, coefficient=False, order=True, _recur=None):
r"""
Finds the power series expansion of given holonomic function about :math:`x_0`.
Explanation
===========
A list of series might be returned if :math:`x_0` is a regular point with
multiple roots of the indicial equation.
Examples
========
>>> from sympy.holonomic.holonomic import HolonomicFunction, DifferentialOperators
>>> from sympy import QQ
>>> from sympy import symbols
>>> x = symbols('x')
>>> R, Dx = DifferentialOperators(QQ.old_poly_ring(x),'Dx')
>>> HolonomicFunction(Dx - 1, x, 0, [1]).series() # e^x
1 + x + x**2/2 + x**3/6 + x**4/24 + x**5/120 + O(x**6)
>>> HolonomicFunction(Dx**2 + 1, x, 0, [0, 1]).series(n=8) # sin(x)
x - x**3/6 + x**5/120 - x**7/5040 + O(x**8)
See Also
========
HolonomicFunction.to_sequence
"""
if _recur is None:
recurrence = self.to_sequence()
else:
recurrence = _recur
if isinstance(recurrence, tuple) and len(recurrence) == 2:
recurrence = recurrence[0]
constantpower = 0
elif isinstance(recurrence, tuple) and len(recurrence) == 3:
constantpower = recurrence[1]
recurrence = recurrence[0]
elif len(recurrence) == 1 and len(recurrence[0]) == 2:
recurrence = recurrence[0][0]
constantpower = 0
elif len(recurrence) == 1 and len(recurrence[0]) == 3:
constantpower = recurrence[0][1]
recurrence = recurrence[0][0]
else:
return [self.series(_recur=i) for i in recurrence]
n = n - int(constantpower)
l = len(recurrence.u0) - 1
k = recurrence.recurrence.order
x = self.x
x0 = self.x0
seq_dmp = recurrence.recurrence.listofpoly
R = recurrence.recurrence.parent.base
K = R.get_field()
seq = [K.new(j.to_list()) for j in seq_dmp]
sub = [-seq[i] / seq[k] for i in range(k)]
sol = list(recurrence.u0)
if l + 1 < n:
# use the initial conditions to find the next term
for i in range(l + 1 - k, n - k):
coeff = sum((DMFsubs(sub[j], i) * sol[i + j]
for j in range(k) if i + j >= 0), start=S.Zero)
sol.append(coeff)
if coefficient:
return sol
ser = sum((x**(i + constantpower) * j for i, j in enumerate(sol)),
start=S.Zero)
if order:
ser += Order(x**(n + int(constantpower)), x)
if x0 != 0:
return ser.subs(x, x - x0)
return ser
def _indicial(self):
"""
Computes roots of the Indicial equation.
"""
if self.x0 != 0:
return self.shift_x(self.x0)._indicial()
list_coeff = self.annihilator.listofpoly
R = self.annihilator.parent.base
x = self.x
s = R.zero
y = R.one
def _pole_degree(poly):
root_all = roots(R.to_sympy(poly), x, filter='Z')
if 0 in root_all.keys():
return root_all[0]
else:
return 0
degree = max(j.degree() for j in list_coeff)
inf = 10 * (max(1, degree) + max(1, self.annihilator.order))
deg = lambda q: inf if q.is_zero else _pole_degree(q)
b = min(deg(q) - j for j, q in enumerate(list_coeff))
for i, j in enumerate(list_coeff):
listofdmp = j.all_coeffs()
degree = len(listofdmp) - 1
if 0 <= i + b <= degree:
s = s + listofdmp[degree - i - b] * y
y *= R.from_sympy(x - i)
return roots(R.to_sympy(s), x)
def evalf(self, points, method='RK4', h=0.05, derivatives=False):
r"""
Finds numerical value of a holonomic function using numerical methods.
(RK4 by default). A set of points (real or complex) must be provided
which will be the path for the numerical integration.
Explanation
===========
The path should be given as a list :math:`[x_1, x_2, \dots x_n]`. The numerical
values will be computed at each point in this order
:math:`x_1 \rightarrow x_2 \rightarrow x_3 \dots \rightarrow x_n`.
Returns values of the function at :math:`x_1, x_2, \dots x_n` in a list.
Examples
========
>>> from sympy.holonomic.holonomic import HolonomicFunction, DifferentialOperators
>>> from sympy import QQ
>>> from sympy import symbols
>>> x = symbols('x')
>>> R, Dx = DifferentialOperators(QQ.old_poly_ring(x),'Dx')
A straight line on the real axis from (0 to 1)
>>> r = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]
Runge-Kutta 4th order on e^x from 0.1 to 1.
Exact solution at 1 is 2.71828182845905
>>> HolonomicFunction(Dx - 1, x, 0, [1]).evalf(r)
[1.10517083333333, 1.22140257085069, 1.34985849706254, 1.49182424008069,
1.64872063859684, 1.82211796209193, 2.01375162659678, 2.22553956329232,
2.45960141378007, 2.71827974413517]
Euler's method for the same
>>> HolonomicFunction(Dx - 1, x, 0, [1]).evalf(r, method='Euler')
[1.1, 1.21, 1.331, 1.4641, 1.61051, 1.771561, 1.9487171, 2.14358881,
2.357947691, 2.5937424601]
One can also observe that the value obtained using Runge-Kutta 4th order
is much more accurate than Euler's method.
"""
from sympy.holonomic.numerical import _evalf
lp = False
# if a point `b` is given instead of a mesh
if not hasattr(points, "__iter__"):
lp = True
b = S(points)
if self.x0 == b:
return _evalf(self, [b], method=method, derivatives=derivatives)[-1]
if not b.is_Number:
raise NotImplementedError
a = self.x0
if a > b:
h = -h
n = int((b - a) / h)
points = [a + h]
for i in range(n - 1):
points.append(points[-1] + h)
for i in roots(self.annihilator.parent.base.to_sympy(self.annihilator.listofpoly[-1]), self.x):
if i == self.x0 or i in points:
raise SingularityError(self, i)
if lp:
return _evalf(self, points, method=method, derivatives=derivatives)[-1]
return _evalf(self, points, method=method, derivatives=derivatives)
def change_x(self, z):
"""
Changes only the variable of Holonomic Function, for internal
purposes. For composition use HolonomicFunction.composition()
"""
dom = self.annihilator.parent.base.dom
R = dom.old_poly_ring(z)
parent, _ = DifferentialOperators(R, 'Dx')
sol = [R(j.to_list()) for j in self.annihilator.listofpoly]
sol = DifferentialOperator(sol, parent)
return HolonomicFunction(sol, z, self.x0, self.y0)
def shift_x(self, a):
"""
Substitute `x + a` for `x`.
"""
x = self.x
listaftershift = self.annihilator.listofpoly
base = self.annihilator.parent.base
sol = [base.from_sympy(base.to_sympy(i).subs(x, x + a)) for i in listaftershift]
sol = DifferentialOperator(sol, self.annihilator.parent)
x0 = self.x0 - a
if not self._have_init_cond():
return HolonomicFunction(sol, x)
return HolonomicFunction(sol, x, x0, self.y0)
def to_hyper(self, as_list=False, _recur=None):
r"""
Returns a hypergeometric function (or linear combination of them)
representing the given holonomic function.
Explanation
===========
Returns an answer of the form:
`a_1 \cdot x^{b_1} \cdot{hyper()} + a_2 \cdot x^{b_2} \cdot{hyper()} \dots`
This is very useful as one can now use ``hyperexpand`` to find the
symbolic expressions/functions.
Examples
========
>>> from sympy.holonomic.holonomic import HolonomicFunction, DifferentialOperators
>>> from sympy import ZZ
>>> from sympy import symbols
>>> x = symbols('x')
>>> R, Dx = DifferentialOperators(ZZ.old_poly_ring(x),'Dx')
>>> # sin(x)
>>> HolonomicFunction(Dx**2 + 1, x, 0, [0, 1]).to_hyper()
x*hyper((), (3/2,), -x**2/4)
>>> # exp(x)
>>> HolonomicFunction(Dx - 1, x, 0, [1]).to_hyper()
hyper((), (), x)
See Also
========
from_hyper, from_meijerg
"""
if _recur is None:
recurrence = self.to_sequence()
else:
recurrence = _recur
if isinstance(recurrence, tuple) and len(recurrence) == 2:
smallest_n = recurrence[1]
recurrence = recurrence[0]
constantpower = 0
elif isinstance(recurrence, tuple) and len(recurrence) == 3:
smallest_n = recurrence[2]
constantpower = recurrence[1]
recurrence = recurrence[0]
elif len(recurrence) == 1 and len(recurrence[0]) == 2:
smallest_n = recurrence[0][1]
recurrence = recurrence[0][0]
constantpower = 0
elif len(recurrence) == 1 and len(recurrence[0]) == 3:
smallest_n = recurrence[0][2]
constantpower = recurrence[0][1]
recurrence = recurrence[0][0]
else:
sol = self.to_hyper(as_list=as_list, _recur=recurrence[0])
for i in recurrence[1:]:
sol += self.to_hyper(as_list=as_list, _recur=i)
return sol
u0 = recurrence.u0
r = recurrence.recurrence
x = self.x
x0 = self.x0
# order of the recurrence relation
m = r.order
# when no recurrence exists, and the power series have finite terms
if m == 0:
nonzeroterms = roots(r.parent.base.to_sympy(r.listofpoly[0]), recurrence.n, filter='R')
sol = S.Zero
for j, i in enumerate(nonzeroterms):
if i < 0 or not int_valued(i):
continue
i = int(i)
if i < len(u0):
if isinstance(u0[i], (PolyElement, FracElement)):
u0[i] = u0[i].as_expr()
sol += u0[i] * x**i
else:
sol += Symbol('C_%s' %j) * x**i
if isinstance(sol, (PolyElement, FracElement)):
sol = sol.as_expr() * x**constantpower
else:
sol = sol * x**constantpower
if as_list:
if x0 != 0:
return [(sol.subs(x, x - x0), )]
return [(sol, )]
if x0 != 0:
return sol.subs(x, x - x0)
return sol
if smallest_n + m > len(u0):
raise NotImplementedError("Can't compute sufficient Initial Conditions")
# check if the recurrence represents a hypergeometric series
if any(i != r.parent.base.zero for i in r.listofpoly[1:-1]):
raise NotHyperSeriesError(self, self.x0)
a = r.listofpoly[0]
b = r.listofpoly[-1]
# the constant multiple of argument of hypergeometric function
if isinstance(a.LC(), (PolyElement, FracElement)):
c = - (S(a.LC().as_expr()) * m**(a.degree())) / (S(b.LC().as_expr()) * m**(b.degree()))
else:
c = - (S(a.LC()) * m**(a.degree())) / (S(b.LC()) * m**(b.degree()))
sol = 0
arg1 = roots(r.parent.base.to_sympy(a), recurrence.n)
arg2 = roots(r.parent.base.to_sympy(b), recurrence.n)
# iterate through the initial conditions to find
# the hypergeometric representation of the given
# function.
# The answer will be a linear combination
# of different hypergeometric series which satisfies
# the recurrence.
if as_list:
listofsol = []
for i in range(smallest_n + m):
# if the recurrence relation doesn't hold for `n = i`,
# then a Hypergeometric representation doesn't exist.
# add the algebraic term a * x**i to the solution,
# where a is u0[i]
if i < smallest_n:
if as_list:
listofsol.append(((S(u0[i]) * x**(i+constantpower)).subs(x, x-x0), ))
else:
sol += S(u0[i]) * x**i
continue
# if the coefficient u0[i] is zero, then the
# independent hypergeomtric series starting with
# x**i is not a part of the answer.
if S(u0[i]) == 0:
continue
ap = []
bq = []
# substitute m * n + i for n
for k in ordered(arg1.keys()):
ap.extend([nsimplify((i - k) / m)] * arg1[k])
for k in ordered(arg2.keys()):
bq.extend([nsimplify((i - k) / m)] * arg2[k])
# convention of (k + 1) in the denominator
if 1 in bq:
bq.remove(1)
else:
ap.append(1)
if as_list:
listofsol.append(((S(u0[i])*x**(i+constantpower)).subs(x, x-x0), (hyper(ap, bq, c*x**m)).subs(x, x-x0)))
else:
sol += S(u0[i]) * hyper(ap, bq, c * x**m) * x**i
if as_list:
return listofsol
sol = sol * x**constantpower
if x0 != 0:
return sol.subs(x, x - x0)
return sol
def to_expr(self):
"""
Converts a Holonomic Function back to elementary functions.
Examples
========
>>> from sympy.holonomic.holonomic import HolonomicFunction, DifferentialOperators
>>> from sympy import ZZ
>>> from sympy import symbols, S
>>> x = symbols('x')
>>> R, Dx = DifferentialOperators(ZZ.old_poly_ring(x),'Dx')
>>> HolonomicFunction(x**2*Dx**2 + x*Dx + (x**2 - 1), x, 0, [0, S(1)/2]).to_expr()
besselj(1, x)
>>> HolonomicFunction((1 + x)*Dx**3 + Dx**2, x, 0, [1, 1, 1]).to_expr()
x*log(x + 1) + log(x + 1) + 1
"""
return hyperexpand(self.to_hyper()).simplify()
def change_ics(self, b, lenics=None):
"""
Changes the point `x0` to ``b`` for initial conditions.
Examples
========
>>> from sympy.holonomic import expr_to_holonomic
>>> from sympy import symbols, sin, exp
>>> x = symbols('x')
>>> expr_to_holonomic(sin(x)).change_ics(1)
HolonomicFunction((1) + (1)*Dx**2, x, 1, [sin(1), cos(1)])
>>> expr_to_holonomic(exp(x)).change_ics(2)
HolonomicFunction((-1) + (1)*Dx, x, 2, [exp(2)])
"""
symbolic = True
if lenics is None and len(self.y0) > self.annihilator.order:
lenics = len(self.y0)
dom = self.annihilator.parent.base.domain
try:
sol = expr_to_holonomic(self.to_expr(), x=self.x, x0=b, lenics=lenics, domain=dom)
except (NotPowerSeriesError, NotHyperSeriesError):
symbolic = False
if symbolic and sol.x0 == b:
return sol
y0 = self.evalf(b, derivatives=True)
return HolonomicFunction(self.annihilator, self.x, b, y0)
def to_meijerg(self):
"""
Returns a linear combination of Meijer G-functions.
Examples
========
>>> from sympy.holonomic import expr_to_holonomic
>>> from sympy import sin, cos, hyperexpand, log, symbols
>>> x = symbols('x')
>>> hyperexpand(expr_to_holonomic(cos(x) + sin(x)).to_meijerg())
sin(x) + cos(x)
>>> hyperexpand(expr_to_holonomic(log(x)).to_meijerg()).simplify()
log(x)
See Also
========
to_hyper
"""
# convert to hypergeometric first
rep = self.to_hyper(as_list=True)
sol = S.Zero
for i in rep:
if len(i) == 1:
sol += i[0]
elif len(i) == 2:
sol += i[0] * _hyper_to_meijerg(i[1])
return sol
def from_hyper(func, x0=0, evalf=False):
r"""
Converts a hypergeometric function to holonomic.
``func`` is the Hypergeometric Function and ``x0`` is the point at
which initial conditions are required.
Examples
========
>>> from sympy.holonomic.holonomic import from_hyper
>>> from sympy import symbols, hyper, S
>>> x = symbols('x')
>>> from_hyper(hyper([], [S(3)/2], x**2/4))
HolonomicFunction((-x) + (2)*Dx + (x)*Dx**2, x, 1, [sinh(1), -sinh(1) + cosh(1)])
"""
a = func.ap
b = func.bq
z = func.args[2]
x = z.atoms(Symbol).pop()
R, Dx = DifferentialOperators(QQ.old_poly_ring(x), 'Dx')
# generalized hypergeometric differential equation
xDx = x*Dx
r1 = 1
for ai in a: # XXX gives sympify error if Mul is used with list of all factors
r1 *= xDx + ai
xDx_1 = xDx - 1
# r2 = Mul(*([Dx] + [xDx_1 + bi for bi in b])) # XXX gives sympify error
r2 = Dx
for bi in b:
r2 *= xDx_1 + bi
sol = r1 - r2
simp = hyperexpand(func)
if simp in (Infinity, NegativeInfinity):
return HolonomicFunction(sol, x).composition(z)
# if the function is known symbolically
if not isinstance(simp, hyper):
y0 = _find_conditions(simp, x, x0, sol.order, use_limit=False)
while not y0:
# if values don't exist at 0, then try to find initial
# conditions at 1. If it doesn't exist at 1 too then
# try 2 and so on.
x0 += 1
y0 = _find_conditions(simp, x, x0, sol.order, use_limit=False)
return HolonomicFunction(sol, x).composition(z, x0, y0)
if isinstance(simp, hyper):
x0 = 1
# use evalf if the function can't be simplified
y0 = _find_conditions(simp, x, x0, sol.order, evalf, use_limit=False)
while not y0:
x0 += 1
y0 = _find_conditions(simp, x, x0, sol.order, evalf, use_limit=False)
return HolonomicFunction(sol, x).composition(z, x0, y0)
return HolonomicFunction(sol, x).composition(z)
def from_meijerg(func, x0=0, evalf=False, initcond=True, domain=QQ):
"""
Converts a Meijer G-function to Holonomic.
``func`` is the G-Function and ``x0`` is the point at
which initial conditions are required.
Examples
========
>>> from sympy.holonomic.holonomic import from_meijerg
>>> from sympy import symbols, meijerg, S
>>> x = symbols('x')
>>> from_meijerg(meijerg(([], []), ([S(1)/2], [0]), x**2/4))
HolonomicFunction((1) + (1)*Dx**2, x, 0, [0, 1/sqrt(pi)])
"""
a = func.ap
b = func.bq
n = len(func.an)
m = len(func.bm)
p = len(a)
z = func.args[2]
x = z.atoms(Symbol).pop()
R, Dx = DifferentialOperators(domain.old_poly_ring(x), 'Dx')
# compute the differential equation satisfied by the
# Meijer G-function.
xDx = x*Dx
xDx1 = xDx + 1
r1 = x*(-1)**(m + n - p)
for ai in a: # XXX gives sympify error if args given in list
r1 *= xDx1 - ai
# r2 = Mul(*[xDx - bi for bi in b]) # gives sympify error
r2 = 1
for bi in b:
r2 *= xDx - bi
sol = r1 - r2
if not initcond:
return HolonomicFunction(sol, x).composition(z)
simp = hyperexpand(func)
if simp in (Infinity, NegativeInfinity):
return HolonomicFunction(sol, x).composition(z)
# computing initial conditions
if not isinstance(simp, meijerg):
y0 = _find_conditions(simp, x, x0, sol.order, use_limit=False)
while not y0:
x0 += 1
y0 = _find_conditions(simp, x, x0, sol.order, use_limit=False)
return HolonomicFunction(sol, x).composition(z, x0, y0)
if isinstance(simp, meijerg):
x0 = 1
y0 = _find_conditions(simp, x, x0, sol.order, evalf, use_limit=False)
while not y0:
x0 += 1
y0 = _find_conditions(simp, x, x0, sol.order, evalf, use_limit=False)
return HolonomicFunction(sol, x).composition(z, x0, y0)
return HolonomicFunction(sol, x).composition(z)
x_1 = Dummy('x_1')
_lookup_table = None
domain_for_table = None
from sympy.integrals.meijerint import _mytype
def expr_to_holonomic(func, x=None, x0=0, y0=None, lenics=None, domain=None, initcond=True):
"""
Converts a function or an expression to a holonomic function.
Parameters
==========
func:
The expression to be converted.
x:
variable for the function.
x0:
point at which initial condition must be computed.
y0:
One can optionally provide initial condition if the method
is not able to do it automatically.
lenics:
Number of terms in the initial condition. By default it is
equal to the order of the annihilator.
domain:
Ground domain for the polynomials in ``x`` appearing as coefficients
in the annihilator.
initcond:
Set it false if you do not want the initial conditions to be computed.
Examples
========
>>> from sympy.holonomic.holonomic import expr_to_holonomic
>>> from sympy import sin, exp, symbols
>>> x = symbols('x')
>>> expr_to_holonomic(sin(x))
HolonomicFunction((1) + (1)*Dx**2, x, 0, [0, 1])
>>> expr_to_holonomic(exp(x))
HolonomicFunction((-1) + (1)*Dx, x, 0, [1])
See Also
========
sympy.integrals.meijerint._rewrite1, _convert_poly_rat_alg, _create_table
"""
func = sympify(func)
syms = func.free_symbols
if not x:
if len(syms) == 1:
x= syms.pop()
else:
raise ValueError("Specify the variable for the function")
elif x in syms:
syms.remove(x)
extra_syms = list(syms)
if domain is None:
if func.has(Float):
domain = RR
else:
domain = QQ
if len(extra_syms) != 0:
domain = domain[extra_syms].get_field()
# try to convert if the function is polynomial or rational
solpoly = _convert_poly_rat_alg(func, x, x0=x0, y0=y0, lenics=lenics, domain=domain, initcond=initcond)
if solpoly:
return solpoly
# create the lookup table
global _lookup_table, domain_for_table
if not _lookup_table or domain != domain_for_table:
domain_for_table = domain
_lookup_table = {}
_create_table(_lookup_table, domain=domain)
# use the table directly to convert to Holonomic
if func.is_Function:
f = func.subs(x, x_1)
t = _mytype(f, x_1)
if t in _lookup_table:
l = _lookup_table[t]
sol = l[0][1].change_x(x)
else:
sol = _convert_meijerint(func, x, initcond=False, domain=domain)
if not sol:
raise NotImplementedError
if y0:
sol.y0 = y0
if y0 or not initcond:
sol.x0 = x0
return sol
if not lenics:
lenics = sol.annihilator.order
_y0 = _find_conditions(func, x, x0, lenics)
while not _y0:
x0 += 1
_y0 = _find_conditions(func, x, x0, lenics)
return HolonomicFunction(sol.annihilator, x, x0, _y0)
if y0 or not initcond:
sol = sol.composition(func.args[0])
if y0:
sol.y0 = y0
sol.x0 = x0
return sol
if not lenics:
lenics = sol.annihilator.order
_y0 = _find_conditions(func, x, x0, lenics)
while not _y0:
x0 += 1
_y0 = _find_conditions(func, x, x0, lenics)
return sol.composition(func.args[0], x0, _y0)
# iterate through the expression recursively
args = func.args
f = func.func
sol = expr_to_holonomic(args[0], x=x, initcond=False, domain=domain)
if f is Add:
for i in range(1, len(args)):
sol += expr_to_holonomic(args[i], x=x, initcond=False, domain=domain)
elif f is Mul:
for i in range(1, len(args)):
sol *= expr_to_holonomic(args[i], x=x, initcond=False, domain=domain)
elif f is Pow:
sol = sol**args[1]
sol.x0 = x0
if not sol:
raise NotImplementedError
if y0:
sol.y0 = y0
if y0 or not initcond:
return sol
if sol.y0:
return sol
if not lenics:
lenics = sol.annihilator.order
if sol.annihilator.is_singular(x0):
r = sol._indicial()
l = list(r)
if len(r) == 1 and r[l[0]] == S.One:
r = l[0]
g = func / (x - x0)**r
singular_ics = _find_conditions(g, x, x0, lenics)
singular_ics = [j / factorial(i) for i, j in enumerate(singular_ics)]
y0 = {r:singular_ics}
return HolonomicFunction(sol.annihilator, x, x0, y0)
_y0 = _find_conditions(func, x, x0, lenics)
while not _y0:
x0 += 1
_y0 = _find_conditions(func, x, x0, lenics)
return HolonomicFunction(sol.annihilator, x, x0, _y0)
## Some helper functions ##
def _normalize(list_of, parent, negative=True):
"""
Normalize a given annihilator
"""
num = []
denom = []
base = parent.base
K = base.get_field()
lcm_denom = base.from_sympy(S.One)
list_of_coeff = []
# convert polynomials to the elements of associated
# fraction field
for i, j in enumerate(list_of):
if isinstance(j, base.dtype):
list_of_coeff.append(K.new(j.to_list()))
elif not isinstance(j, K.dtype):
list_of_coeff.append(K.from_sympy(sympify(j)))
else:
list_of_coeff.append(j)
# corresponding numerators of the sequence of polynomials
num.append(list_of_coeff[i].numer())
# corresponding denominators
denom.append(list_of_coeff[i].denom())
# lcm of denominators in the coefficients
for i in denom:
lcm_denom = i.lcm(lcm_denom)
if negative:
lcm_denom = -lcm_denom
lcm_denom = K.new(lcm_denom.to_list())
# multiply the coefficients with lcm
for i, j in enumerate(list_of_coeff):
list_of_coeff[i] = j * lcm_denom
gcd_numer = base((list_of_coeff[-1].numer() / list_of_coeff[-1].denom()).to_list())
# gcd of numerators in the coefficients
for i in num:
gcd_numer = i.gcd(gcd_numer)
gcd_numer = K.new(gcd_numer.to_list())
# divide all the coefficients by the gcd
for i, j in enumerate(list_of_coeff):
frac_ans = j / gcd_numer
list_of_coeff[i] = base((frac_ans.numer() / frac_ans.denom()).to_list())
return DifferentialOperator(list_of_coeff, parent)
def _derivate_diff_eq(listofpoly, K):
"""
Let a differential equation a0(x)y(x) + a1(x)y'(x) + ... = 0
where a0, a1,... are polynomials or rational functions. The function
returns b0, b1, b2... such that the differential equation
b0(x)y(x) + b1(x)y'(x) +... = 0 is formed after differentiating the
former equation.
"""
sol = []
a = len(listofpoly) - 1
sol.append(DMFdiff(listofpoly[0], K))
for i, j in enumerate(listofpoly[1:]):
sol.append(DMFdiff(j, K) + listofpoly[i])
sol.append(listofpoly[a])
return sol
def _hyper_to_meijerg(func):
"""
Converts a `hyper` to meijerg.
"""
ap = func.ap
bq = func.bq
if any(i <= 0 and int(i) == i for i in ap):
return hyperexpand(func)
z = func.args[2]
# parameters of the `meijerg` function.
an = (1 - i for i in ap)
anp = ()
bm = (S.Zero, )
bmq = (1 - i for i in bq)
k = S.One
for i in bq:
k = k * gamma(i)
for i in ap:
k = k / gamma(i)
return k * meijerg(an, anp, bm, bmq, -z)
def _add_lists(list1, list2):
"""Takes polynomial sequences of two annihilators a and b and returns
the list of polynomials of sum of a and b.
"""
if len(list1) <= len(list2):
sol = [a + b for a, b in zip(list1, list2)] + list2[len(list1):]
else:
sol = [a + b for a, b in zip(list1, list2)] + list1[len(list2):]
return sol
def _extend_y0(Holonomic, n):
"""
Tries to find more initial conditions by substituting the initial
value point in the differential equation.
"""
if Holonomic.annihilator.is_singular(Holonomic.x0) or Holonomic.is_singularics() == True:
return Holonomic.y0
annihilator = Holonomic.annihilator
a = annihilator.order
listofpoly = []
y0 = Holonomic.y0
R = annihilator.parent.base
K = R.get_field()
for j in annihilator.listofpoly:
if isinstance(j, annihilator.parent.base.dtype):
listofpoly.append(K.new(j.to_list()))
if len(y0) < a or n <= len(y0):
return y0
list_red = [-listofpoly[i] / listofpoly[a]
for i in range(a)]
y1 = y0[:min(len(y0), a)]
for _ in range(n - a):
sol = 0
for a, b in zip(y1, list_red):
r = DMFsubs(b, Holonomic.x0)
if not getattr(r, 'is_finite', True):
return y0
if isinstance(r, (PolyElement, FracElement)):
r = r.as_expr()
sol += a * r
y1.append(sol)
list_red = _derivate_diff_eq(list_red, K)
return y0 + y1[len(y0):]
def DMFdiff(frac, K):
# differentiate a DMF object represented as p/q
if not isinstance(frac, DMF):
return frac.diff()
p = K.numer(frac)
q = K.denom(frac)
sol_num = - p * q.diff() + q * p.diff()
sol_denom = q**2
return K((sol_num.to_list(), sol_denom.to_list()))
def DMFsubs(frac, x0, mpm=False):
# substitute the point x0 in DMF object of the form p/q
if not isinstance(frac, DMF):
return frac
p = frac.num
q = frac.den
sol_p = S.Zero
sol_q = S.Zero
if mpm:
from mpmath import mp
for i, j in enumerate(reversed(p)):
if mpm:
j = sympify(j)._to_mpmath(mp.prec)
sol_p += j * x0**i
for i, j in enumerate(reversed(q)):
if mpm:
j = sympify(j)._to_mpmath(mp.prec)
sol_q += j * x0**i
if isinstance(sol_p, (PolyElement, FracElement)):
sol_p = sol_p.as_expr()
if isinstance(sol_q, (PolyElement, FracElement)):
sol_q = sol_q.as_expr()
return sol_p / sol_q
def _convert_poly_rat_alg(func, x, x0=0, y0=None, lenics=None, domain=QQ, initcond=True):
"""
Converts polynomials, rationals and algebraic functions to holonomic.
"""
ispoly = func.is_polynomial()
if not ispoly:
israt = func.is_rational_function()
else:
israt = True
if not (ispoly or israt):
basepoly, ratexp = func.as_base_exp()
if basepoly.is_polynomial() and ratexp.is_Number:
if isinstance(ratexp, Float):
ratexp = nsimplify(ratexp)
m, n = ratexp.p, ratexp.q
is_alg = True
else:
is_alg = False
else:
is_alg = True
if not (ispoly or israt or is_alg):
return None
R = domain.old_poly_ring(x)
_, Dx = DifferentialOperators(R, 'Dx')
# if the function is constant
if not func.has(x):
return HolonomicFunction(Dx, x, 0, [func])
if ispoly:
# differential equation satisfied by polynomial
sol = func * Dx - func.diff(x)
sol = _normalize(sol.listofpoly, sol.parent, negative=False)
is_singular = sol.is_singular(x0)
# try to compute the conditions for singular points
if y0 is None and x0 == 0 and is_singular:
rep = R.from_sympy(func).to_list()
for i, j in enumerate(reversed(rep)):
if j == 0:
continue
coeff = list(reversed(rep))[i:]
indicial = i
break
for i, j in enumerate(coeff):
if isinstance(j, (PolyElement, FracElement)):
coeff[i] = j.as_expr()
y0 = {indicial: S(coeff)}
elif israt:
p, q = func.as_numer_denom()
# differential equation satisfied by rational
sol = p * q * Dx + p * q.diff(x) - q * p.diff(x)
sol = _normalize(sol.listofpoly, sol.parent, negative=False)
elif is_alg:
sol = n * (x / m) * Dx - 1
sol = HolonomicFunction(sol, x).composition(basepoly).annihilator
is_singular = sol.is_singular(x0)
# try to compute the conditions for singular points
if y0 is None and x0 == 0 and is_singular and \
(lenics is None or lenics <= 1):
rep = R.from_sympy(basepoly).to_list()
for i, j in enumerate(reversed(rep)):
if j == 0:
continue
if isinstance(j, (PolyElement, FracElement)):
j = j.as_expr()
coeff = S(j)**ratexp
indicial = S(i) * ratexp
break
if isinstance(coeff, (PolyElement, FracElement)):
coeff = coeff.as_expr()
y0 = {indicial: S([coeff])}
if y0 or not initcond:
return HolonomicFunction(sol, x, x0, y0)
if not lenics:
lenics = sol.order
if sol.is_singular(x0):
r = HolonomicFunction(sol, x, x0)._indicial()
l = list(r)
if len(r) == 1 and r[l[0]] == S.One:
r = l[0]
g = func / (x - x0)**r
singular_ics = _find_conditions(g, x, x0, lenics)
singular_ics = [j / factorial(i) for i, j in enumerate(singular_ics)]
y0 = {r:singular_ics}
return HolonomicFunction(sol, x, x0, y0)
y0 = _find_conditions(func, x, x0, lenics)
while not y0:
x0 += 1
y0 = _find_conditions(func, x, x0, lenics)
return HolonomicFunction(sol, x, x0, y0)
def _convert_meijerint(func, x, initcond=True, domain=QQ):
args = meijerint._rewrite1(func, x)
if args:
fac, po, g, _ = args
else:
return None
# lists for sum of meijerg functions
fac_list = [fac * i[0] for i in g]
t = po.as_base_exp()
s = t[1] if t[0] == x else S.Zero
po_list = [s + i[1] for i in g]
G_list = [i[2] for i in g]
# finds meijerg representation of x**s * meijerg(a1 ... ap, b1 ... bq, z)
def _shift(func, s):
z = func.args[-1]
if z.has(I):
z = z.subs(exp_polar, exp)
d = z.collect(x, evaluate=False)
b = list(d)[0]
a = d[b]
t = b.as_base_exp()
b = t[1] if t[0] == x else S.Zero
r = s / b
an = (i + r for i in func.args[0][0])
ap = (i + r for i in func.args[0][1])
bm = (i + r for i in func.args[1][0])
bq = (i + r for i in func.args[1][1])
return a**-r, meijerg((an, ap), (bm, bq), z)
coeff, m = _shift(G_list[0], po_list[0])
sol = fac_list[0] * coeff * from_meijerg(m, initcond=initcond, domain=domain)
# add all the meijerg functions after converting to holonomic
for i in range(1, len(G_list)):
coeff, m = _shift(G_list[i], po_list[i])
sol += fac_list[i] * coeff * from_meijerg(m, initcond=initcond, domain=domain)
return sol
def _create_table(table, domain=QQ):
"""
Creates the look-up table. For a similar implementation
see meijerint._create_lookup_table.
"""
def add(formula, annihilator, arg, x0=0, y0=()):
"""
Adds a formula in the dictionary
"""
table.setdefault(_mytype(formula, x_1), []).append((formula,
HolonomicFunction(annihilator, arg, x0, y0)))
R = domain.old_poly_ring(x_1)
_, Dx = DifferentialOperators(R, 'Dx')
# add some basic functions
add(sin(x_1), Dx**2 + 1, x_1, 0, [0, 1])
add(cos(x_1), Dx**2 + 1, x_1, 0, [1, 0])
add(exp(x_1), Dx - 1, x_1, 0, 1)
add(log(x_1), Dx + x_1*Dx**2, x_1, 1, [0, 1])
add(erf(x_1), 2*x_1*Dx + Dx**2, x_1, 0, [0, 2/sqrt(pi)])
add(erfc(x_1), 2*x_1*Dx + Dx**2, x_1, 0, [1, -2/sqrt(pi)])
add(erfi(x_1), -2*x_1*Dx + Dx**2, x_1, 0, [0, 2/sqrt(pi)])
add(sinh(x_1), Dx**2 - 1, x_1, 0, [0, 1])
add(cosh(x_1), Dx**2 - 1, x_1, 0, [1, 0])
add(sinc(x_1), x_1 + 2*Dx + x_1*Dx**2, x_1)
add(Si(x_1), x_1*Dx + 2*Dx**2 + x_1*Dx**3, x_1)
add(Ci(x_1), x_1*Dx + 2*Dx**2 + x_1*Dx**3, x_1)
add(Shi(x_1), -x_1*Dx + 2*Dx**2 + x_1*Dx**3, x_1)
def _find_conditions(func, x, x0, order, evalf=False, use_limit=True):
y0 = []
for _ in range(order):
val = func.subs(x, x0)
if evalf:
val = val.evalf()
if use_limit and isinstance(val, NaN):
val = limit(func, x, x0)
if val.is_finite is False or isinstance(val, NaN):
return None
y0.append(val)
func = func.diff(x)
return y0
| HolonomicFunction |
python | simonw__datasette | datasette/utils/__init__.py | {
"start": 675,
"end": 6801
} | class ____:
"""Paginated results from allowed_resources query."""
resources: List["Resource"]
next: str | None # Keyset token for next page (None if no more results)
_datasette: typing.Any = dataclasses.field(default=None, repr=False)
_action: str = dataclasses.field(default=None, repr=False)
_actor: typing.Any = dataclasses.field(default=None, repr=False)
_parent: str | None = dataclasses.field(default=None, repr=False)
_include_is_private: bool = dataclasses.field(default=False, repr=False)
_include_reasons: bool = dataclasses.field(default=False, repr=False)
_limit: int = dataclasses.field(default=100, repr=False)
async def all(self):
"""
Async generator that yields all resources across all pages.
Automatically handles pagination under the hood. This is useful when you need
to iterate through all results without manually managing pagination tokens.
Yields:
Resource objects one at a time
Example:
page = await datasette.allowed_resources("view-table", actor)
async for table in page.all():
print(f"{table.parent}/{table.child}")
"""
# Yield all resources from current page
for resource in self.resources:
yield resource
# Continue fetching subsequent pages if there are more
next_token = self.next
while next_token:
page = await self._datasette.allowed_resources(
self._action,
self._actor,
parent=self._parent,
include_is_private=self._include_is_private,
include_reasons=self._include_reasons,
limit=self._limit,
next=next_token,
)
for resource in page.resources:
yield resource
next_token = page.next
# From https://www.sqlite.org/lang_keywords.html
reserved_words = set(
(
"abort action add after all alter analyze and as asc attach autoincrement "
"before begin between by cascade case cast check collate column commit "
"conflict constraint create cross current_date current_time "
"current_timestamp database default deferrable deferred delete desc detach "
"distinct drop each else end escape except exclusive exists explain fail "
"for foreign from full glob group having if ignore immediate in index "
"indexed initially inner insert instead intersect into is isnull join key "
"left like limit match natural no not notnull null of offset on or order "
"outer plan pragma primary query raise recursive references regexp reindex "
"release rename replace restrict right rollback row savepoint select set "
"table temp temporary then to transaction trigger union unique update using "
"vacuum values view virtual when where with without"
).split()
)
APT_GET_DOCKERFILE_EXTRAS = r"""
RUN apt-get update && \
apt-get install -y {} && \
rm -rf /var/lib/apt/lists/*
"""
# Can replace with sqlite-utils when I add that dependency
SPATIALITE_PATHS = (
"/usr/lib/x86_64-linux-gnu/mod_spatialite.so",
"/usr/local/lib/mod_spatialite.dylib",
"/usr/local/lib/mod_spatialite.so",
"/opt/homebrew/lib/mod_spatialite.dylib",
)
# Used to display /-/versions.json SpatiaLite information
SPATIALITE_FUNCTIONS = (
"spatialite_version",
"spatialite_target_cpu",
"check_strict_sql_quoting",
"freexl_version",
"proj_version",
"geos_version",
"rttopo_version",
"libxml2_version",
"HasIconv",
"HasMathSQL",
"HasGeoCallbacks",
"HasProj",
"HasProj6",
"HasGeos",
"HasGeosAdvanced",
"HasGeosTrunk",
"HasGeosReentrant",
"HasGeosOnlyReentrant",
"HasMiniZip",
"HasRtTopo",
"HasLibXML2",
"HasEpsg",
"HasFreeXL",
"HasGeoPackage",
"HasGCP",
"HasTopology",
"HasKNN",
"HasRouting",
)
# Length of hash subset used in hashed URLs:
HASH_LENGTH = 7
# Can replace this with Column from sqlite_utils when I add that dependency
Column = namedtuple(
"Column", ("cid", "name", "type", "notnull", "default_value", "is_pk", "hidden")
)
functions_marked_as_documented = []
def documented(fn):
functions_marked_as_documented.append(fn)
return fn
@documented
async def await_me_maybe(value: typing.Any) -> typing.Any:
"If value is callable, call it. If awaitable, await it. Otherwise return it."
if callable(value):
value = value()
if asyncio.iscoroutine(value):
value = await value
return value
def urlsafe_components(token):
"""Splits token on commas and tilde-decodes each component"""
return [tilde_decode(b) for b in token.split(",")]
def path_from_row_pks(row, pks, use_rowid, quote=True):
"""Generate an optionally tilde-encoded unique identifier
for a row from its primary keys."""
if use_rowid:
bits = [row["rowid"]]
else:
bits = [
row[pk]["value"] if isinstance(row[pk], dict) else row[pk] for pk in pks
]
if quote:
bits = [tilde_encode(str(bit)) for bit in bits]
else:
bits = [str(bit) for bit in bits]
return ",".join(bits)
def compound_keys_after_sql(pks, start_index=0):
# Implementation of keyset pagination
# See https://github.com/simonw/datasette/issues/190
# For pk1/pk2/pk3 returns:
#
# ([pk1] > :p0)
# or
# ([pk1] = :p0 and [pk2] > :p1)
# or
# ([pk1] = :p0 and [pk2] = :p1 and [pk3] > :p2)
or_clauses = []
pks_left = pks[:]
while pks_left:
and_clauses = []
last = pks_left[-1]
rest = pks_left[:-1]
and_clauses = [
f"{escape_sqlite(pk)} = :p{i + start_index}" for i, pk in enumerate(rest)
]
and_clauses.append(f"{escape_sqlite(last)} > :p{len(rest) + start_index}")
or_clauses.append(f"({' and '.join(and_clauses)})")
pks_left.pop()
or_clauses.reverse()
return "({})".format("\n or\n".join(or_clauses))
| PaginatedResources |
python | openai__openai-python | src/openai/types/responses/response_input_text.py | {
"start": 194,
"end": 375
} | class ____(BaseModel):
text: str
"""The text input to the model."""
type: Literal["input_text"]
"""The type of the input item. Always `input_text`."""
| ResponseInputText |
python | dagster-io__dagster | python_modules/dagster/dagster_tests/daemon_tests/test_backfill.py | {
"start": 11929,
"end": 151221
} | class ____(dg.Config):
name: str
@dg.asset(partitions_def=asset_job_partitions, backfill_policy=BackfillPolicy.single_run())
def bp_single_run(context: AssetExecutionContext):
return {k: 1 for k in context.partition_keys}
@dg.asset(partitions_def=asset_job_partitions, backfill_policy=BackfillPolicy.single_run())
def bp_single_run_config(context: AssetExecutionContext, config: BpSingleRunConfig):
context.log.info(config.name)
return {k: 1 for k in context.partition_keys}
@dg.asset(partitions_def=asset_job_partitions, backfill_policy=BackfillPolicy.multi_run(2))
def bp_multi_run(context: AssetExecutionContext):
return {k: 1 for k in context.partition_keys}
@dg.asset(partitions_def=asset_job_partitions)
def bp_none(context: AssetExecutionContext):
return 1
old_dynamic_partitions_def = dg.DynamicPartitionsDefinition(
partition_fn=lambda _: ["a", "b", "c", "d"]
)
@dg.job(partitions_def=old_dynamic_partitions_def)
def old_dynamic_partitions_job():
always_succeed()
@dg.repository
def the_repo():
return [
the_job,
conditional_failure_job,
partial_job,
config_job,
always_succeed_job,
parallel_failure_job,
old_dynamic_partitions_job,
# the lineage graph defined with these assets is such that: foo -> a1 -> bar -> b1
# this requires ab1 to be split into two separate asset definitions using the automatic
# subsetting capabilities. ab2 is defines similarly, so in total 4 copies of the "reusable"
# op will exist in the full plan, whereas only a single copy will be needed for a subset
# plan which only materializes foo -> a1 -> bar
foo,
bar,
ab1,
ab2,
dg.define_asset_job(
"twisted_asset_mess", selection="*b2", partitions_def=static_partitions
),
always_fails,
pass_on_retry,
# baz is a configurable asset which has no dependencies
baz,
my_multi_asset,
asset_a,
asset_b,
asset_c,
asset_d,
daily_1,
daily_2,
asset_e,
asset_f,
asset_g,
multi_partitioned_asset_with_single_run_bp,
multi_partitioned_asset,
fails_once_asset_a,
downstream_of_fails_once_asset_b,
downstream_of_fails_once_asset_c,
asset_with_single_run_backfill_policy,
asset_with_multi_run_backfill_policy,
complex_asset_with_backfill_policy,
bp_single_run,
bp_single_run_config,
bp_multi_run,
bp_none,
dg.define_asset_job(
"bp_single_run_asset_job",
selection=[bp_single_run_config, bp_single_run],
tags={"alpha": "beta"},
config={"ops": {"bp_single_run_config": {"config": {"name": "harry"}}}},
),
dg.define_asset_job(
"bp_multi_run_asset_job",
selection=[bp_multi_run],
tags={"alpha": "beta"},
),
dg.define_asset_job(
"bp_none_asset_job",
selection=[bp_none],
),
dg.define_asset_job(
"standard_partitioned_asset_job",
selection=AssetSelection.assets("foo", "a1", "bar"),
),
dg.define_asset_job(
"multi_asset_job",
selection=[my_multi_asset],
),
]
def wait_for_all_runs_to_start(instance, timeout=10):
start_time = time.time()
while True:
if time.time() - start_time > timeout:
raise Exception("Timed out waiting for runs to start")
time.sleep(0.5)
pending_states = [
DagsterRunStatus.NOT_STARTED,
DagsterRunStatus.STARTING,
DagsterRunStatus.STARTED,
]
pending_runs = [run for run in instance.get_runs() if run.status in pending_states]
if len(pending_runs) == 0:
break
def wait_for_all_runs_to_finish(instance, timeout=10):
start_time = time.time()
FINISHED_STATES = [
DagsterRunStatus.SUCCESS,
DagsterRunStatus.FAILURE,
DagsterRunStatus.CANCELED,
]
while True:
if time.time() - start_time > timeout:
raise Exception("Timed out waiting for runs to start")
time.sleep(0.5)
not_finished_runs = [
run for run in instance.get_runs() if run.status not in FINISHED_STATES
]
if len(not_finished_runs) == 0:
break
@pytest.mark.parametrize("parallel", [True, False])
def test_simple_backfill(
instance: DagsterInstance,
workspace_context: WorkspaceProcessContext,
remote_repo: RemoteRepository,
parallel: bool,
):
partition_set = remote_repo.get_partition_set("the_job_partition_set")
instance.add_backfill(
PartitionBackfill(
backfill_id="simple",
partition_set_origin=partition_set.get_remote_origin(),
status=BulkActionStatus.REQUESTED,
partition_names=["one", "two", "three"],
from_failure=False,
reexecution_steps=None,
tags=None,
backfill_timestamp=get_current_timestamp(),
)
)
assert instance.get_runs_count() == 0
if parallel:
backfill_daemon_futures = {}
list(
execute_backfill_iteration(
workspace_context,
get_default_daemon_logger("BackfillDaemon"),
threadpool_executor=ThreadPoolExecutor(2),
backfill_futures=backfill_daemon_futures,
)
)
wait_for_futures(backfill_daemon_futures)
else:
list(
execute_backfill_iteration(
workspace_context, get_default_daemon_logger("BackfillDaemon")
)
)
assert instance.get_runs_count() == 3
runs = instance.get_runs()
three, two, one = runs
assert one.tags[BACKFILL_ID_TAG] == "simple"
assert one.tags[PARTITION_NAME_TAG] == "one"
assert two.tags[BACKFILL_ID_TAG] == "simple"
assert two.tags[PARTITION_NAME_TAG] == "two"
assert three.tags[BACKFILL_ID_TAG] == "simple"
assert three.tags[PARTITION_NAME_TAG] == "three"
@pytest.mark.parametrize("parallel", [True, False])
def test_two_backfills_at_the_same_time(
tmp_path: Path,
parallel: bool,
):
# In order to avoid deadlock, we need to ensure that the instance we
# are using will launch runs in separate subprocesses rather than in
# the same in-memory process. This is akin to the context created in
# https://github.com/dagster-io/dagster/blob/a116c44/python_modules/dagster/dagster_tests/scheduler_tests/conftest.py#L53-L71
with dg.instance_for_test(
overrides={
"event_log_storage": {
"module": "dagster._core.storage.event_log",
"class": "ConsolidatedSqliteEventLogStorage",
"config": {"base_dir": str(tmp_path)},
},
"run_retries": {"enabled": True},
}
) as instance:
with create_test_daemon_workspace_context(
workspace_load_target=ModuleTarget(
module_name="dagster_tests.daemon_tests.test_backfill",
attribute="the_repo",
working_directory=os.path.join(os.path.dirname(__file__), "..", ".."),
location_name="test_location",
),
instance=instance,
) as workspace_context:
remote_repo = cast(
"CodeLocation",
next(
iter(
workspace_context.create_request_context()
.get_code_location_entries()
.values()
)
).code_location,
).get_repository("the_repo")
first_partition_set = remote_repo.get_partition_set("the_job_partition_set")
second_partition_keys = my_config.partitions_def.get_partition_keys()
second_partition_set = remote_repo.get_partition_set(
"comp_always_succeed_partition_set"
)
instance.add_backfill(
PartitionBackfill(
backfill_id="simple",
partition_set_origin=first_partition_set.get_remote_origin(),
status=BulkActionStatus.REQUESTED,
partition_names=["one", "two", "three"],
from_failure=False,
reexecution_steps=None,
tags=None,
backfill_timestamp=get_current_timestamp(),
)
)
instance.add_backfill(
PartitionBackfill(
backfill_id="partition_schedule_from_job",
partition_set_origin=second_partition_set.get_remote_origin(),
status=BulkActionStatus.REQUESTED,
partition_names=second_partition_keys[:3],
from_failure=False,
reexecution_steps=None,
tags=None,
backfill_timestamp=get_current_timestamp(),
)
)
assert instance.get_runs_count() == 0
if parallel:
threadpool_executor = ThreadPoolExecutor(4)
backfill_daemon_futures = {}
list(
execute_backfill_iteration(
workspace_context,
get_default_daemon_logger("BackfillDaemon"),
threadpool_executor=threadpool_executor,
backfill_futures=backfill_daemon_futures,
)
)
wait_for_futures(backfill_daemon_futures)
else:
list(
execute_backfill_iteration(
workspace_context, get_default_daemon_logger("BackfillDaemon")
)
)
assert instance.get_runs_count() == 6
runs = list(instance.get_runs())
backfill_ids = sorted(run.tags[BACKFILL_ID_TAG] for run in runs)
partition_names = {run.tags[PARTITION_NAME_TAG] for run in runs}
assert backfill_ids == ["partition_schedule_from_job"] * 3 + ["simple"] * 3
assert partition_names == {"one", "two", "three", *second_partition_keys[:3]}
@pytest.mark.parametrize("parallel", [True, False])
def test_failure_backfill(
instance: DagsterInstance,
workspace_context: WorkspaceProcessContext,
remote_repo: RemoteRepository,
parallel: bool,
):
output_file = _failure_flag_file()
partition_set = remote_repo.get_partition_set("conditional_failure_job_partition_set")
instance.add_backfill(
PartitionBackfill(
backfill_id="shouldfail",
partition_set_origin=partition_set.get_remote_origin(),
status=BulkActionStatus.REQUESTED,
partition_names=["one", "two", "three"],
from_failure=False,
reexecution_steps=None,
tags=None,
backfill_timestamp=get_current_timestamp(),
)
)
assert instance.get_runs_count() == 0
try:
touch_file(output_file)
if parallel:
backfill_daemon_futures = {}
list(
execute_backfill_iteration(
workspace_context,
get_default_daemon_logger("BackfillDaemon"),
threadpool_executor=ThreadPoolExecutor(2),
backfill_futures=backfill_daemon_futures,
)
)
wait_for_futures(backfill_daemon_futures)
else:
list(
execute_backfill_iteration(
workspace_context, get_default_daemon_logger("BackfillDaemon")
)
)
wait_for_all_runs_to_start(instance)
finally:
os.remove(output_file)
assert instance.get_runs_count() == 3
runs = instance.get_runs()
three, two, one = runs
assert one.tags[BACKFILL_ID_TAG] == "shouldfail"
assert one.tags[PARTITION_NAME_TAG] == "one"
assert one.status == DagsterRunStatus.FAILURE
assert step_succeeded(instance, one, "always_succeed")
assert step_failed(instance, one, "conditionally_fail")
assert step_did_not_run(instance, one, "after_failure")
assert two.tags[BACKFILL_ID_TAG] == "shouldfail"
assert two.tags[PARTITION_NAME_TAG] == "two"
assert two.status == DagsterRunStatus.FAILURE
assert step_succeeded(instance, two, "always_succeed")
assert step_failed(instance, two, "conditionally_fail")
assert step_did_not_run(instance, two, "after_failure")
assert three.tags[BACKFILL_ID_TAG] == "shouldfail"
assert three.tags[PARTITION_NAME_TAG] == "three"
assert three.status == DagsterRunStatus.FAILURE
assert step_succeeded(instance, three, "always_succeed")
assert step_failed(instance, three, "conditionally_fail")
assert step_did_not_run(instance, three, "after_failure")
instance.add_backfill(
PartitionBackfill(
backfill_id="fromfailure",
partition_set_origin=partition_set.get_remote_origin(),
status=BulkActionStatus.REQUESTED,
partition_names=["one", "two", "three"],
from_failure=True,
reexecution_steps=None,
tags=None,
backfill_timestamp=get_current_timestamp(),
)
)
assert not os.path.isfile(_failure_flag_file())
if parallel:
backfill_daemon_futures = {}
list(
execute_backfill_iteration(
workspace_context,
get_default_daemon_logger("BackfillDaemon"),
threadpool_executor=ThreadPoolExecutor(2),
backfill_futures=backfill_daemon_futures,
)
)
wait_for_futures(backfill_daemon_futures)
else:
list(
execute_backfill_iteration(
workspace_context, get_default_daemon_logger("BackfillDaemon")
)
)
wait_for_all_runs_to_start(instance)
assert instance.get_runs_count() == 6
from_failure_filter = dg.RunsFilter(tags={BACKFILL_ID_TAG: "fromfailure"})
assert instance.get_runs_count(filters=from_failure_filter) == 3
runs = instance.get_runs(filters=from_failure_filter)
three, two, one = runs
assert one.tags[BACKFILL_ID_TAG] == "fromfailure"
assert one.tags[PARTITION_NAME_TAG] == "one"
assert one.status == DagsterRunStatus.SUCCESS
assert step_did_not_run(instance, one, "always_succeed")
assert step_succeeded(instance, one, "conditionally_fail")
assert step_succeeded(instance, one, "after_failure")
assert two.tags[BACKFILL_ID_TAG] == "fromfailure"
assert two.tags[PARTITION_NAME_TAG] == "two"
assert two.status == DagsterRunStatus.SUCCESS
assert step_did_not_run(instance, one, "always_succeed")
assert step_succeeded(instance, one, "conditionally_fail")
assert step_succeeded(instance, one, "after_failure")
assert three.tags[BACKFILL_ID_TAG] == "fromfailure"
assert three.tags[PARTITION_NAME_TAG] == "three"
assert three.status == DagsterRunStatus.SUCCESS
assert step_did_not_run(instance, one, "always_succeed")
assert step_succeeded(instance, one, "conditionally_fail")
assert step_succeeded(instance, one, "after_failure")
def test_job_backfill_status(
instance: DagsterInstance,
workspace_context: WorkspaceProcessContext,
remote_repo: RemoteRepository,
):
partition_set = remote_repo.get_partition_set("the_job_partition_set")
instance.add_backfill(
PartitionBackfill(
backfill_id="simple",
partition_set_origin=partition_set.get_remote_origin(),
status=BulkActionStatus.REQUESTED,
partition_names=["one", "two", "three"],
from_failure=False,
reexecution_steps=None,
tags=None,
backfill_timestamp=get_current_timestamp(),
)
)
assert instance.get_runs_count() == 0
# seed an in progress run so that this run won't get launched by the backfill daemon and will
# remain in the in progress state
fake_run = create_run_for_test(
instance=instance,
status=DagsterRunStatus.STARTED,
tags={
**DagsterRun.tags_for_backfill_id("simple"),
PARTITION_NAME_TAG: "one",
},
)
list(execute_backfill_iteration(workspace_context, get_default_daemon_logger("BackfillDaemon")))
assert instance.get_runs_count() == 3
backfill = instance.get_backfill("simple")
assert backfill
assert backfill.status == BulkActionStatus.REQUESTED
# manually update the run to be in a finished state, backfill should be marked complete on next iteration
instance.delete_run(fake_run.run_id)
create_run_for_test(
instance=instance,
status=DagsterRunStatus.SUCCESS,
tags={
**DagsterRun.tags_for_backfill_id("simple"),
PARTITION_NAME_TAG: "one",
},
)
list(execute_backfill_iteration(workspace_context, get_default_daemon_logger("BackfillDaemon")))
assert instance.get_runs_count() == 3
backfill = instance.get_backfill("simple")
assert backfill
assert backfill.status == BulkActionStatus.COMPLETED_SUCCESS
assert backfill.backfill_end_timestamp is not None
@pytest.mark.skipif(IS_WINDOWS, reason="flaky in windows")
def test_partial_backfill(
instance: DagsterInstance,
workspace_context: WorkspaceProcessContext,
remote_repo: RemoteRepository,
):
partition_set = remote_repo.get_partition_set("partial_job_partition_set")
# create full runs, where every step is executed
instance.add_backfill(
PartitionBackfill(
backfill_id="full",
partition_set_origin=partition_set.get_remote_origin(),
status=BulkActionStatus.REQUESTED,
partition_names=["one", "two", "three"],
from_failure=False,
reexecution_steps=None,
tags=None,
backfill_timestamp=get_current_timestamp(),
)
)
assert instance.get_runs_count() == 0
list(execute_backfill_iteration(workspace_context, get_default_daemon_logger("BackfillDaemon")))
wait_for_all_runs_to_start(instance)
assert instance.get_runs_count() == 3
runs = instance.get_runs()
three, two, one = runs
assert one.tags[BACKFILL_ID_TAG] == "full"
assert one.tags[PARTITION_NAME_TAG] == "one"
assert one.status == DagsterRunStatus.SUCCESS
assert step_succeeded(instance, one, "step_one")
assert step_succeeded(instance, one, "step_two")
assert step_succeeded(instance, one, "step_three")
assert two.tags[BACKFILL_ID_TAG] == "full"
assert two.tags[PARTITION_NAME_TAG] == "two"
assert two.status == DagsterRunStatus.SUCCESS
assert step_succeeded(instance, two, "step_one")
assert step_succeeded(instance, two, "step_two")
assert step_succeeded(instance, two, "step_three")
assert three.tags[BACKFILL_ID_TAG] == "full"
assert three.tags[PARTITION_NAME_TAG] == "three"
assert three.status == DagsterRunStatus.SUCCESS
assert step_succeeded(instance, three, "step_one")
assert step_succeeded(instance, three, "step_two")
assert step_succeeded(instance, three, "step_three")
# delete one of the runs, the partial reexecution should still succeed because the steps
# can be executed independently, require no input/output config
instance.delete_run(one.run_id)
assert instance.get_runs_count() == 2
# create partial runs
instance.add_backfill(
PartitionBackfill(
backfill_id="partial",
partition_set_origin=partition_set.get_remote_origin(),
status=BulkActionStatus.REQUESTED,
partition_names=["one", "two", "three"],
from_failure=False,
reexecution_steps=["step_one"],
tags=None,
backfill_timestamp=get_current_timestamp(),
)
)
list(execute_backfill_iteration(workspace_context, get_default_daemon_logger("BackfillDaemon")))
wait_for_all_runs_to_start(instance)
assert instance.get_runs_count() == 5
partial_filter = dg.RunsFilter(tags={BACKFILL_ID_TAG: "partial"})
assert instance.get_runs_count(filters=partial_filter) == 3
runs = instance.get_runs(filters=partial_filter)
three, two, one = runs
assert one.status == DagsterRunStatus.SUCCESS
assert step_succeeded(instance, one, "step_one")
assert step_did_not_run(instance, one, "step_two")
assert step_did_not_run(instance, one, "step_three")
assert two.status == DagsterRunStatus.SUCCESS
assert step_succeeded(instance, two, "step_one")
assert step_did_not_run(instance, two, "step_two")
assert step_did_not_run(instance, two, "step_three")
assert three.status == DagsterRunStatus.SUCCESS
assert step_succeeded(instance, three, "step_one")
assert step_did_not_run(instance, three, "step_two")
assert step_did_not_run(instance, three, "step_three")
def test_large_backfill(
instance: DagsterInstance,
workspace_context: WorkspaceProcessContext,
remote_repo: RemoteRepository,
):
partition_set = remote_repo.get_partition_set("config_job_partition_set")
instance.add_backfill(
PartitionBackfill(
backfill_id="simple",
partition_set_origin=partition_set.get_remote_origin(),
status=BulkActionStatus.REQUESTED,
partition_names=["one", "two", "three"],
from_failure=False,
reexecution_steps=None,
tags=None,
backfill_timestamp=get_current_timestamp(),
)
)
assert instance.get_runs_count() == 0
list(execute_backfill_iteration(workspace_context, get_default_daemon_logger("BackfillDaemon")))
assert instance.get_runs_count() == 3
def test_backfill_is_processed_only_once(
instance: DagsterInstance,
workspace_context: WorkspaceProcessContext,
remote_repo: RemoteRepository,
):
backfill_id = "simple"
partition_set = remote_repo.get_partition_set("config_job_partition_set")
instance.add_backfill(
PartitionBackfill(
backfill_id=backfill_id,
partition_set_origin=partition_set.get_remote_origin(),
status=BulkActionStatus.REQUESTED,
partition_names=["one", "two", "three"],
from_failure=False,
reexecution_steps=None,
tags=None,
backfill_timestamp=get_current_timestamp(),
)
)
assert instance.get_runs_count() == 0
threadpool_executor = ThreadPoolExecutor(2)
backfill_daemon_futures = {}
list(
execute_backfill_iteration(
workspace_context,
get_default_daemon_logger("BackfillDaemon"),
threadpool_executor=threadpool_executor,
backfill_futures=backfill_daemon_futures,
)
)
assert instance.get_runs_count() == 0
future = backfill_daemon_futures[backfill_id]
with mock.patch.object(
threadpool_executor, "submit", side_effect=AssertionError("Should not be called")
):
list(
execute_backfill_iteration(
workspace_context,
get_default_daemon_logger("BackfillDaemon"),
threadpool_executor=threadpool_executor,
backfill_futures=backfill_daemon_futures,
)
)
assert instance.get_runs_count() == 0
assert backfill_daemon_futures[backfill_id] is future
wait_for_futures(backfill_daemon_futures)
assert instance.get_runs_count() == 3
def test_unloadable_backfill(instance, workspace_context):
unloadable_origin = _unloadable_partition_set_origin()
instance.add_backfill(
PartitionBackfill(
backfill_id="simple",
partition_set_origin=unloadable_origin,
status=BulkActionStatus.REQUESTED,
partition_names=["one", "two", "three"],
from_failure=False,
reexecution_steps=None,
tags=None,
backfill_timestamp=get_current_timestamp(),
)
)
assert instance.get_runs_count() == 0
list(execute_backfill_iteration(workspace_context, get_default_daemon_logger("BackfillDaemon")))
assert instance.get_runs_count() == 0
backfill = instance.get_backfill("simple")
assert backfill.status == BulkActionStatus.FAILING
assert isinstance(backfill.error, SerializableErrorInfo)
# one more iteration to ensure the launched runs are canceled, then the backfill is marked failed
list(execute_backfill_iteration(workspace_context, get_default_daemon_logger("BackfillDaemon")))
backfill = instance.get_backfill("simple")
assert backfill.status == BulkActionStatus.FAILED
def test_unloadable_asset_backfill(instance, workspace_context):
backfill_id = "simple_fan_out_backfill"
asset_backfill_data = AssetBackfillData.empty(
target_subset=AssetGraphSubset(
partitions_subsets_by_asset_key={
dg.AssetKey(["does_not_exist"]): my_config.partitions_def.empty_subset()
}
),
backfill_start_timestamp=get_current_timestamp(),
dynamic_partitions_store=instance,
)
backfill = PartitionBackfill(
backfill_id=backfill_id,
status=BulkActionStatus.REQUESTED,
from_failure=False,
tags={},
backfill_timestamp=get_current_timestamp(),
asset_selection=[dg.AssetKey(["does_not_exist"])],
serialized_asset_backfill_data=None,
asset_backfill_data=asset_backfill_data,
title=None,
description=None,
)
instance.add_backfill(backfill)
assert instance.get_runs_count() == 0
backfill = instance.get_backfill(backfill_id)
assert backfill
assert backfill.status == BulkActionStatus.REQUESTED
list(execute_backfill_iteration(workspace_context, get_default_daemon_logger("BackfillDaemon")))
assert instance.get_runs_count() == 0
backfill = instance.get_backfill("simple_fan_out_backfill")
# No retries because of the nature of the error
assert backfill.status == BulkActionStatus.FAILING
assert backfill.failure_count == 1
assert isinstance(backfill.error, SerializableErrorInfo)
assert backfill.backfill_end_timestamp is None
# once more iteration to ensure all launched runs are canceled, then the backfill is marked failed
list(execute_backfill_iteration(workspace_context, get_default_daemon_logger("BackfillDaemon")))
backfill = instance.get_backfill("simple_fan_out_backfill")
assert backfill.status == BulkActionStatus.FAILED
assert backfill.backfill_end_timestamp is not None
def test_asset_backfill_retryable_error(instance, workspace_context):
asset_selection = [dg.AssetKey("asset_f"), dg.AssetKey("asset_g")]
asset_graph = workspace_context.create_request_context().asset_graph
num_partitions = 2
target_partitions = partitions_f.get_partition_keys()[0:num_partitions]
backfill_id = "backfill_with_roots_multiple_partitions"
backfill = PartitionBackfill.from_asset_partitions(
asset_graph=asset_graph,
backfill_id=backfill_id,
tags={},
backfill_timestamp=get_current_timestamp(),
asset_selection=asset_selection,
partition_names=target_partitions,
dynamic_partitions_store=instance,
all_partitions=False,
title=None,
description=None,
run_config=None,
)
instance.add_backfill(backfill)
assert instance.get_runs_count() == 0
backfill = instance.get_backfill(backfill_id)
assert backfill
assert backfill.status == BulkActionStatus.REQUESTED
# The following backfill iteration will attempt to submit run requests for asset_f's two partitions.
# The first call to get_job_execution_data_from_run_request will succeed, but the second call will
# raise a DagsterUserCodeUnreachableError. Subsequently only the first partition will be successfully
# submitted.
def raise_retryable_error(*args, **kwargs):
raise Exception("This is transient because it is not a DagsterError or a CheckError")
with mock.patch(
"dagster._core.execution.submit_asset_runs.get_job_execution_data_from_run_request",
side_effect=raise_retryable_error,
):
with environ({"DAGSTER_MAX_ASSET_BACKFILL_RETRIES": "2"}):
errors = [
error
for error in list(
execute_backfill_iteration(
workspace_context, get_default_daemon_logger("BackfillDaemon")
)
)
if error
]
assert len(errors) == 1
assert "This is transient because it is not a DagsterError or a CheckError" in str(
errors[0]
)
assert instance.get_runs_count() == 0
updated_backfill = instance.get_backfill(backfill_id)
assert updated_backfill
assert updated_backfill.asset_backfill_data
# Requested with failure_count 1 because it will retry
assert updated_backfill.status == BulkActionStatus.REQUESTED
assert updated_backfill.failure_count == 1
errors = [
error
for error in list(
execute_backfill_iteration(
workspace_context, get_default_daemon_logger("BackfillDaemon")
)
)
if error
]
assert len(errors) == 1
updated_backfill = instance.get_backfill(backfill_id)
assert updated_backfill.status == BulkActionStatus.REQUESTED
assert updated_backfill.failure_count == 2
# Fails once it exceeds DAGSTER_MAX_ASSET_BACKFILL_RETRIES retries
errors = [
error
for error in list(
execute_backfill_iteration(
workspace_context, get_default_daemon_logger("BackfillDaemon")
)
)
if error
]
assert len(errors) == 1
updated_backfill = instance.get_backfill(backfill_id)
assert updated_backfill.status == BulkActionStatus.FAILING
assert updated_backfill.failure_count == 3
assert updated_backfill.backfill_end_timestamp is None
# one more iteration for the backfill to ensure all runs are canceled, then it's marked failed
list(
execute_backfill_iteration(
workspace_context, get_default_daemon_logger("BackfillDaemon")
)
)
updated_backfill = instance.get_backfill(backfill_id)
assert updated_backfill.status == BulkActionStatus.FAILED
assert updated_backfill.backfill_end_timestamp is not None
def test_unloadable_backfill_retry(
instance, workspace_context, unloadable_location_workspace_context
):
asset_selection = [dg.AssetKey("asset_a"), dg.AssetKey("asset_b"), dg.AssetKey("asset_c")]
partition_keys = partitions_a.get_partition_keys()
instance.add_backfill(
PartitionBackfill.from_asset_partitions(
asset_graph=workspace_context.create_request_context().asset_graph,
backfill_id="retry_backfill",
tags={"custom_tag_key": "custom_tag_value"},
backfill_timestamp=get_current_timestamp(),
asset_selection=asset_selection,
partition_names=partition_keys,
dynamic_partitions_store=instance,
all_partitions=False,
title=None,
description=None,
run_config=None,
)
)
assert instance.get_runs_count() == 0
with environ({"DAGSTER_BACKFILL_RETRY_DEFINITION_CHANGED_ERROR": "1"}):
# backfill can't start, but doesn't error
list(
execute_backfill_iteration(
unloadable_location_workspace_context, get_default_daemon_logger("BackfillDaemon")
)
)
assert instance.get_runs_count() == 0
backfill = instance.get_backfill("retry_backfill")
assert backfill.status == BulkActionStatus.REQUESTED
# retries, still not loadable
list(
execute_backfill_iteration(
unloadable_location_workspace_context, get_default_daemon_logger("BackfillDaemon")
)
)
assert instance.get_runs_count() == 0
backfill = instance.get_backfill("retry_backfill")
assert backfill.status == BulkActionStatus.REQUESTED
# continues once the code location is loadable again
list(
execute_backfill_iteration(
workspace_context, get_default_daemon_logger("BackfillDaemon")
)
)
assert instance.get_runs_count() == 1
def test_unloadable_failing_backfill_still_cancels_runs(
instance, workspace_context, unloadable_location_workspace_context
):
"""If a backfill is marked failing or canceling, but the backfill data is no longer loadable,
we still want to cancel the runs and mark the backfill as completed. However, we won't be able to
update the asset backfill data.
"""
asset_selection = [dg.AssetKey("asset_a"), dg.AssetKey("asset_b"), dg.AssetKey("asset_c")]
partition_keys = partitions_a.get_partition_keys()
instance.add_backfill(
PartitionBackfill.from_asset_partitions(
asset_graph=workspace_context.create_request_context().asset_graph,
backfill_id="retry_backfill",
tags={"custom_tag_key": "custom_tag_value"},
backfill_timestamp=get_current_timestamp(),
asset_selection=asset_selection,
partition_names=partition_keys,
dynamic_partitions_store=instance,
all_partitions=False,
title=None,
description=None,
run_config=None,
)
)
assert instance.get_runs_count() == 0
create_run_for_test(
instance, tags={BACKFILL_ID_TAG: "retry_backfill"}, status=DagsterRunStatus.STARTED
)
runs = instance.get_runs()
assert len(runs) == 1
assert runs[0].status == DagsterRunStatus.STARTED
backfill = instance.get_backfill("retry_backfill")
updated_backfill = backfill.with_status(BulkActionStatus.FAILING)
instance.update_backfill(updated_backfill)
# backfill data will be unloadble, but will still cancel the run this iteration
list(
execute_backfill_iteration(
unloadable_location_workspace_context, get_default_daemon_logger("BackfillDaemon")
)
)
backfill = instance.get_backfill("retry_backfill")
assert backfill.status == BulkActionStatus.FAILING
# the `cancel_run` method is not implemented for the SyncInMemoryRunLauncher which is what it used
# in this test. So manually report the run as canceled
instance.report_run_canceled(runs[0])
# on the next iteration, the run has been canceled and the backfill will terminate
list(
execute_backfill_iteration(
unloadable_location_workspace_context, get_default_daemon_logger("BackfillDaemon")
)
)
assert instance.get_runs_count() == 1
backfill = instance.get_backfill("retry_backfill")
assert backfill.status == BulkActionStatus.FAILED
runs = instance.get_runs()
assert len(runs) == 1
assert runs[0].status == DagsterRunStatus.CANCELED
def test_backfill_from_partitioned_job(
instance: DagsterInstance,
workspace_context: WorkspaceProcessContext,
remote_repo: RemoteRepository,
):
partition_keys = my_config.partitions_def.get_partition_keys()
partition_set = remote_repo.get_partition_set("comp_always_succeed_partition_set")
instance.add_backfill(
PartitionBackfill(
backfill_id="partition_schedule_from_job",
partition_set_origin=partition_set.get_remote_origin(),
status=BulkActionStatus.REQUESTED,
partition_names=partition_keys[:3],
from_failure=False,
reexecution_steps=None,
tags=None,
backfill_timestamp=get_current_timestamp(),
)
)
assert instance.get_runs_count() == 0
list(execute_backfill_iteration(workspace_context, get_default_daemon_logger("BackfillDaemon")))
assert instance.get_runs_count() == 3
runs = reversed(list(instance.get_runs()))
for idx, run in enumerate(runs):
assert run.tags[BACKFILL_ID_TAG] == "partition_schedule_from_job"
assert run.tags[PARTITION_NAME_TAG] == partition_keys[idx]
def test_backfill_with_asset_selection(
instance: DagsterInstance,
workspace_context: WorkspaceProcessContext,
remote_repo: RemoteRepository,
):
partition_keys = static_partitions.get_partition_keys()
asset_selection = [dg.AssetKey("foo"), dg.AssetKey("a1"), dg.AssetKey("bar")]
job_def = the_repo.get_job("standard_partitioned_asset_job")
assert job_def
asset_job_name = job_def.name
partition_set_name = f"{asset_job_name}_partition_set"
partition_set = remote_repo.get_partition_set(partition_set_name)
instance.add_backfill(
PartitionBackfill(
backfill_id="backfill_with_asset_selection",
partition_set_origin=partition_set.get_remote_origin(),
status=BulkActionStatus.REQUESTED,
partition_names=partition_keys,
from_failure=False,
reexecution_steps=None,
tags=None,
backfill_timestamp=get_current_timestamp(),
asset_selection=asset_selection,
)
)
assert instance.get_runs_count() == 0
list(execute_backfill_iteration(workspace_context, get_default_daemon_logger("BackfillDaemon")))
wait_for_all_runs_to_start(instance, timeout=30)
assert instance.get_runs_count() == 3
wait_for_all_runs_to_finish(instance, timeout=30)
assert instance.get_runs_count() == 3
runs = reversed(list(instance.get_runs()))
for idx, run in enumerate(runs):
assert run.tags[BACKFILL_ID_TAG] == "backfill_with_asset_selection"
assert run.tags[PARTITION_NAME_TAG] == partition_keys[idx]
assert step_succeeded(instance, run, "foo")
assert step_succeeded(instance, run, "reusable")
assert step_succeeded(instance, run, "bar")
def test_pure_asset_backfill_with_multiple_assets_selected(
instance: DagsterInstance,
workspace_context: WorkspaceProcessContext,
remote_repo: RemoteRepository,
):
asset_selection = [
dg.AssetKey("asset_a"),
dg.AssetKey("asset_b"),
dg.AssetKey("asset_c"),
dg.AssetKey("asset_d"),
]
partition_keys = partitions_a.get_partition_keys()
instance.add_backfill(
PartitionBackfill.from_asset_partitions(
asset_graph=workspace_context.create_request_context().asset_graph,
backfill_id="backfill_with_multiple_assets_selected",
tags={"custom_tag_key": "custom_tag_value"},
backfill_timestamp=get_current_timestamp(),
asset_selection=asset_selection,
partition_names=partition_keys,
dynamic_partitions_store=instance,
all_partitions=False,
title=None,
description=None,
run_config=None,
)
)
assert instance.get_runs_count() == 0
backfill = instance.get_backfill("backfill_with_multiple_assets_selected")
assert backfill
assert backfill.status == BulkActionStatus.REQUESTED
assert all(
not error
for error in list(
execute_backfill_iteration(
workspace_context, get_default_daemon_logger("BackfillDaemon")
)
)
)
assert instance.get_runs_count() == 1
wait_for_all_runs_to_start(instance, timeout=30)
wait_for_all_runs_to_finish(instance, timeout=30)
run = instance.get_runs()[0]
assert run.tags[BACKFILL_ID_TAG] == "backfill_with_multiple_assets_selected"
assert run.tags["custom_tag_key"] == "custom_tag_value"
assert run.asset_selection == {dg.AssetKey(["asset_a"])}
assert all(
not error
for error in list(
execute_backfill_iteration(
workspace_context, get_default_daemon_logger("BackfillDaemon")
)
)
)
assert instance.get_runs_count() == 4
wait_for_all_runs_to_start(instance, timeout=30)
wait_for_all_runs_to_finish(instance, timeout=30)
runs = instance.get_runs()
assert any([run.asset_selection == {dg.AssetKey(["asset_b"])}] for run in runs)
assert any([run.asset_selection == {dg.AssetKey(["asset_c"])}] for run in runs)
assert any([run.asset_selection == {dg.AssetKey(["asset_d"])}] for run in runs)
assert all([run.status == DagsterRunStatus.SUCCESS] for run in runs)
def test_pure_asset_backfill(
instance: DagsterInstance,
workspace_context: WorkspaceProcessContext,
remote_repo: RemoteRepository,
):
del remote_repo
partition_keys = static_partitions.get_partition_keys()
asset_selection = [dg.AssetKey("foo"), dg.AssetKey("a1"), dg.AssetKey("bar")]
instance.add_backfill(
PartitionBackfill.from_asset_partitions(
asset_graph=workspace_context.create_request_context().asset_graph,
backfill_id="backfill_with_asset_selection",
tags={"custom_tag_key": "custom_tag_value"},
backfill_timestamp=get_current_timestamp(),
asset_selection=asset_selection,
partition_names=partition_keys,
dynamic_partitions_store=instance,
all_partitions=False,
title=None,
description=None,
run_config=None,
)
)
assert instance.get_runs_count() == 0
backfill = instance.get_backfill("backfill_with_asset_selection")
assert backfill
assert backfill.status == BulkActionStatus.REQUESTED
list(execute_backfill_iteration(workspace_context, get_default_daemon_logger("BackfillDaemon")))
assert instance.get_runs_count() == 3
wait_for_all_runs_to_start(instance, timeout=30)
assert instance.get_runs_count() == 3
wait_for_all_runs_to_finish(instance, timeout=30)
assert instance.get_runs_count() == 3
runs = reversed(list(instance.get_runs()))
for run in runs:
assert run.tags[BACKFILL_ID_TAG] == "backfill_with_asset_selection"
assert run.tags["custom_tag_key"] == "custom_tag_value"
assert step_succeeded(instance, run, "foo")
assert step_succeeded(instance, run, "reusable")
assert step_succeeded(instance, run, "bar")
list(execute_backfill_iteration(workspace_context, get_default_daemon_logger("BackfillDaemon")))
backfill = instance.get_backfill("backfill_with_asset_selection")
assert backfill
assert backfill.status == BulkActionStatus.COMPLETED_SUCCESS
assert backfill.backfill_end_timestamp is not None
def test_backfill_from_failure_for_subselection(
instance: DagsterInstance,
workspace_context: WorkspaceProcessContext,
remote_repo: RemoteRepository,
):
parallel_failure_job.execute_in_process(
partition_key="one",
instance=instance,
op_selection=["fail_three", "success_four"],
raise_on_error=False,
)
assert instance.get_runs_count() == 1
wait_for_all_runs_to_finish(instance)
run = next(iter(instance.get_runs()))
assert run.status == DagsterRunStatus.FAILURE
partition_set = remote_repo.get_partition_set("parallel_failure_job_partition_set")
instance.add_backfill(
PartitionBackfill(
backfill_id="fromfailure",
partition_set_origin=partition_set.get_remote_origin(),
status=BulkActionStatus.REQUESTED,
partition_names=["one"],
from_failure=True,
reexecution_steps=None,
tags=None,
backfill_timestamp=get_current_timestamp(),
)
)
list(execute_backfill_iteration(workspace_context, get_default_daemon_logger("BackfillDaemon")))
assert instance.get_runs_count() == 2
child_run = next(iter(instance.get_runs(limit=1)))
assert child_run.resolved_op_selection == run.resolved_op_selection
assert child_run.op_selection == run.op_selection
def test_asset_backfill_cancellation(
instance: DagsterInstance,
workspace_context: WorkspaceProcessContext,
):
asset_selection = [dg.AssetKey("asset_a"), dg.AssetKey("asset_b"), dg.AssetKey("asset_c")]
partition_keys = partitions_a.get_partition_keys()
backfill_id = "backfill_with_multiple_assets_selected"
instance.add_backfill(
PartitionBackfill.from_asset_partitions(
asset_graph=workspace_context.create_request_context().asset_graph,
backfill_id=backfill_id,
tags={"custom_tag_key": "custom_tag_value"},
backfill_timestamp=get_current_timestamp(),
asset_selection=asset_selection,
partition_names=partition_keys,
dynamic_partitions_store=instance,
all_partitions=False,
title=None,
description=None,
run_config=None,
)
)
assert instance.get_runs_count() == 0
backfill = instance.get_backfill(backfill_id)
assert backfill
assert backfill.status == BulkActionStatus.REQUESTED
assert all(
not error
for error in list(
execute_backfill_iteration(
workspace_context, get_default_daemon_logger("BackfillDaemon")
)
)
)
assert instance.get_runs_count() == 1
run = instance.get_runs()[0]
assert run.tags[BACKFILL_ID_TAG] == backfill_id
assert run.asset_selection == {dg.AssetKey(["asset_a"])}
wait_for_all_runs_to_start(instance, timeout=30)
instance.update_backfill(backfill.with_status(BulkActionStatus.CANCELING))
wait_for_all_runs_to_finish(instance, timeout=30)
assert all(
not error
for error in list(
execute_backfill_iteration(
workspace_context, get_default_daemon_logger("BackfillDaemon")
)
)
)
backfill = instance.get_backfill(backfill_id)
assert backfill
assert backfill.status == BulkActionStatus.CANCELED
assert instance.get_runs_count() == 1 # Assert that additional runs are not created
assert backfill.backfill_end_timestamp is not None
# Check run submission at chunk boundary and off of chunk boundary
@pytest.mark.parametrize("num_partitions", [DEFAULT_CHUNK_SIZE * 2, (DEFAULT_CHUNK_SIZE) + 1])
def test_asset_backfill_submit_runs_in_chunks(
instance: DagsterInstance,
workspace_context: WorkspaceProcessContext,
num_partitions: int,
set_default_chunk_size,
):
asset_selection = [dg.AssetKey("daily_1"), dg.AssetKey("daily_2")]
target_partitions = daily_partitions_def.get_partition_keys()[0:num_partitions]
backfill_id = f"backfill_with_{num_partitions}_partitions"
asset_graph = workspace_context.create_request_context().asset_graph
instance.add_backfill(
PartitionBackfill.from_asset_partitions(
asset_graph=asset_graph,
backfill_id=backfill_id,
tags={},
backfill_timestamp=get_current_timestamp(),
asset_selection=asset_selection,
partition_names=target_partitions,
dynamic_partitions_store=instance,
all_partitions=False,
title=None,
description=None,
run_config=None,
)
)
assert instance.get_runs_count() == 0
backfill = instance.get_backfill(backfill_id)
assert backfill
assert backfill.status == BulkActionStatus.REQUESTED
assert all(
not error
for error in list(
execute_backfill_iteration(
workspace_context, get_default_daemon_logger("BackfillDaemon")
)
)
)
backfill = check.not_none(instance.get_backfill(backfill_id))
for asset_key in asset_selection:
assert (
backfill.get_asset_backfill_data(asset_graph)
.requested_subset.get_partitions_subset(asset_key, asset_graph)
.get_partition_keys()
== target_partitions
)
assert instance.get_runs_count() == num_partitions
def test_asset_backfill_mid_iteration_cancel(
instance: DagsterInstance, workspace_context: WorkspaceProcessContext, set_default_chunk_size
):
asset_selection = [dg.AssetKey("daily_1"), dg.AssetKey("daily_2")]
asset_graph = workspace_context.create_request_context().asset_graph
num_partitions = DEFAULT_CHUNK_SIZE * 2
target_partitions = daily_partitions_def.get_partition_keys()[0:num_partitions]
backfill_id = f"backfill_with_{num_partitions}_partitions"
backfill = PartitionBackfill.from_asset_partitions(
asset_graph=asset_graph,
backfill_id=backfill_id,
tags={},
backfill_timestamp=get_current_timestamp(),
asset_selection=asset_selection,
partition_names=target_partitions,
dynamic_partitions_store=instance,
all_partitions=False,
title=None,
description=None,
run_config=None,
)
instance.add_backfill(backfill)
assert instance.get_runs_count() == 0
backfill = instance.get_backfill(backfill_id)
assert backfill
assert backfill.status == BulkActionStatus.REQUESTED
def _override_backfill_cancellation(backfill: PartitionBackfill):
instance._run_storage.update_backfill( # noqa: SLF001
backfill.with_status(BulkActionStatus.CANCELING)
)
# After submitting the first chunk, update the backfill to be CANCELING
with mock.patch(
"dagster._core.instance.DagsterInstance.update_backfill",
side_effect=_override_backfill_cancellation,
):
assert all(
not error
for error in list(
execute_backfill_iteration(
workspace_context, get_default_daemon_logger("BackfillDaemon")
)
)
)
assert instance.get_runs_count() == DEFAULT_CHUNK_SIZE
# Check that the requested subset only contains runs that were submitted
updated_backfill = instance.get_backfill(backfill_id)
assert updated_backfill
updated_asset_backfill_data = check.not_none(backfill.asset_backfill_data)
assert all(
len(partitions_subset) == DEFAULT_CHUNK_SIZE
for partitions_subset in updated_asset_backfill_data.requested_subset.partitions_subsets_by_asset_key.values()
)
# Execute backfill iteration again, confirming that no new runs have been added
assert all(
not error
for error in list(
execute_backfill_iteration(
workspace_context, get_default_daemon_logger("BackfillDaemon")
)
)
)
assert instance.get_runs_count() == DEFAULT_CHUNK_SIZE
assert instance.get_runs_count(dg.RunsFilter(statuses=IN_PROGRESS_RUN_STATUSES)) == 0
def test_asset_backfill_forcible_mark_as_canceled_during_canceling_iteration(
instance: DagsterInstance, workspace_context: WorkspaceProcessContext
):
asset_selection = [dg.AssetKey("daily_1"), dg.AssetKey("daily_2")]
asset_graph = workspace_context.create_request_context().asset_graph
backfill_id = "backfill_id"
backfill = PartitionBackfill.from_asset_partitions(
asset_graph=asset_graph,
backfill_id=backfill_id,
tags={},
backfill_timestamp=get_current_timestamp(),
asset_selection=asset_selection,
partition_names=["2023-01-01"],
dynamic_partitions_store=instance,
all_partitions=False,
title=None,
description=None,
run_config=None,
).with_status(BulkActionStatus.CANCELING)
instance.add_backfill(
# Add some partitions in a "requested" state to mock that certain partitions are hanging
backfill.with_asset_backfill_data(
backfill.asset_backfill_data._replace( # pyright: ignore[reportOptionalMemberAccess]
requested_subset=AssetGraphSubset(
non_partitioned_asset_keys={dg.AssetKey("daily_1")}
)
),
dynamic_partitions_store=instance,
asset_graph=asset_graph,
)
)
backfill = instance.get_backfill(backfill_id)
assert backfill
assert backfill.status == BulkActionStatus.CANCELING
override_get_backfill_num_calls = 0
def _override_get_backfill(_):
nonlocal override_get_backfill_num_calls
if override_get_backfill_num_calls == 1:
# Mark backfill as canceled during the middle of the cancellation iteration
override_get_backfill_num_calls += 1
return backfill.with_status(BulkActionStatus.CANCELED)
else:
override_get_backfill_num_calls += 1
return backfill
# After submitting the first chunk, update the backfill to be CANCELING
with mock.patch(
"dagster._core.instance.DagsterInstance.get_backfill",
side_effect=_override_get_backfill,
):
# Mock that a run is still in progress. If we don't add this, then the backfill will be
# marked as failed
with mock.patch("dagster._core.instance.DagsterInstance.get_run_ids", side_effect=["fake"]):
assert all(
not error
for error in list(
execute_backfill_iteration(
workspace_context, get_default_daemon_logger("BackfillDaemon")
)
)
)
updated_backfill = instance.get_backfill(backfill_id)
assert updated_backfill
# Assert that the backfill was indeed marked as canceled
assert updated_backfill.status == BulkActionStatus.CANCELED
def test_asset_backfill_mid_iteration_code_location_unreachable_error(
instance: DagsterInstance, workspace_context: WorkspaceProcessContext
):
from dagster._core.execution.submit_asset_runs import get_job_execution_data_from_run_request
asset_selection = [dg.AssetKey("asset_a"), dg.AssetKey("asset_e")]
asset_graph = workspace_context.create_request_context().asset_graph
num_partitions = 1
target_partitions = partitions_a.get_partition_keys()[0:num_partitions]
backfill_id = "simple_fan_out_backfill"
backfill = PartitionBackfill.from_asset_partitions(
asset_graph=asset_graph,
backfill_id=backfill_id,
tags={},
backfill_timestamp=get_current_timestamp(),
asset_selection=asset_selection,
partition_names=target_partitions,
dynamic_partitions_store=instance,
all_partitions=False,
title=None,
description=None,
run_config=None,
)
instance.add_backfill(backfill)
assert instance.get_runs_count() == 0
backfill = instance.get_backfill(backfill_id)
assert backfill
assert backfill.status == BulkActionStatus.REQUESTED
assert backfill.failure_count == 0
assert all(
not error
for error in list(
execute_backfill_iteration(
workspace_context, get_default_daemon_logger("BackfillDaemon")
)
)
)
updated_backfill = instance.get_backfill(backfill_id)
assert updated_backfill
assert updated_backfill.asset_backfill_data
assert (
updated_backfill.asset_backfill_data.requested_subset.num_partitions_and_non_partitioned_assets
== 1
)
assert instance.get_runs_count() == 1
# The following backfill iteration will attempt to submit run requests for asset_e's three partitions.
# The first call to get_job_execution_data_from_run_request will succeed, but the second call will
# raise a DagsterUserCodeUnreachableError. Subsequently only the first partition will be successfully
# submitted.
counter = 0
async def raise_code_unreachable_error_on_second_call(*args, **kwargs):
nonlocal counter
if counter == 0:
counter += 1
return await get_job_execution_data_from_run_request(*args, **kwargs)
elif counter == 1:
counter += 1
raise DagsterUserCodeUnreachableError()
else:
# Should not attempt to create a run for the third partition if the second
# errored with DagsterUserCodeUnreachableError
raise Exception("Should not reach")
with mock.patch(
"dagster._core.execution.submit_asset_runs.get_job_execution_data_from_run_request",
side_effect=raise_code_unreachable_error_on_second_call,
):
errors = [
error
for error in list(
execute_backfill_iteration(
workspace_context, get_default_daemon_logger("BackfillDaemon")
)
)
if error
]
assert len(errors) == 1
assert (
"Unable to reach the code server. Backfill will resume once the code server is available"
in str(errors[0])
)
assert instance.get_runs_count() == 2
updated_backfill = instance.get_backfill(backfill_id)
assert updated_backfill
assert (
updated_backfill.failure_count == 0
) # because of the nature of the error, failure count not incremented
# Runs were still removed off the list of submitting run requests because the error was
# caught and the backfill data updated
assert len(updated_backfill.submitting_run_requests) == 2
assert len(updated_backfill.reserved_run_ids) == 2
assert updated_backfill.asset_backfill_data
assert (
updated_backfill.asset_backfill_data.materialized_subset.num_partitions_and_non_partitioned_assets
== 1
)
# Requested subset still updated since the error was caught and the backfill data updated
assert (
updated_backfill.asset_backfill_data.requested_subset.num_partitions_and_non_partitioned_assets
== 2
)
# Execute backfill iteration again, confirming that the two partitions that did not submit runs
# on the previous iteration are requested on this iteration.
assert all(
not error
for error in list(
execute_backfill_iteration(
workspace_context, get_default_daemon_logger("BackfillDaemon")
)
)
)
# Assert that two new runs are submitted
assert instance.get_runs_count() == 4
updated_backfill = instance.get_backfill(backfill_id)
assert updated_backfill
assert updated_backfill.asset_backfill_data
assert (
updated_backfill.asset_backfill_data.requested_subset.num_partitions_and_non_partitioned_assets
== 4
)
def test_asset_backfill_first_iteration_code_location_unreachable_error_no_runs_submitted(
instance: DagsterInstance, workspace_context: WorkspaceProcessContext
):
# tests that we can recover from unreachable code location error during the first tick when
# we are requesting the root assets
asset_selection = [dg.AssetKey("asset_a"), dg.AssetKey("asset_e")]
asset_graph = workspace_context.create_request_context().asset_graph
num_partitions = 1
target_partitions = partitions_a.get_partition_keys()[0:num_partitions]
backfill_id = "backfill_with_roots"
backfill = PartitionBackfill.from_asset_partitions(
asset_graph=asset_graph,
backfill_id=backfill_id,
tags={},
backfill_timestamp=get_current_timestamp(),
asset_selection=asset_selection,
partition_names=target_partitions,
dynamic_partitions_store=instance,
all_partitions=False,
title=None,
description=None,
run_config=None,
)
instance.add_backfill(backfill)
assert instance.get_runs_count() == 0
backfill = instance.get_backfill(backfill_id)
assert backfill
assert backfill.status == BulkActionStatus.REQUESTED
# The following backfill iteration will attempt to submit run requests for asset_a's partition.
# The call will raise a DagsterUserCodeUnreachableError and no runs will be submitted
def raise_code_unreachable_error(*args, **kwargs):
raise DagsterUserCodeUnreachableError()
with mock.patch(
"dagster._core.execution.submit_asset_runs.get_job_execution_data_from_run_request",
side_effect=raise_code_unreachable_error,
):
errors = [
error
for error in list(
execute_backfill_iteration(
workspace_context, get_default_daemon_logger("BackfillDaemon")
)
)
if error
]
assert len(errors) == 1
assert (
"Unable to reach the code server. Backfill will resume once the code server is available"
in str(errors[0])
)
assert instance.get_runs_count() == 0
updated_backfill = instance.get_backfill(backfill_id)
assert updated_backfill
assert updated_backfill.asset_backfill_data
assert len(updated_backfill.submitting_run_requests) == 1
assert (
updated_backfill.asset_backfill_data.requested_subset.num_partitions_and_non_partitioned_assets
== 0 # chunk did not finish, so requested_subset was not updated
)
assert updated_backfill.asset_backfill_data.requested_runs_for_target_roots
# Execute backfill iteration again, confirming that the partition for asset_a is requested again
assert all(
not error
for error in list(
execute_backfill_iteration(
workspace_context, get_default_daemon_logger("BackfillDaemon")
)
)
)
# Assert that one run is submitted
assert instance.get_runs_count() == 1
updated_backfill = instance.get_backfill(backfill_id)
assert updated_backfill
assert updated_backfill.asset_backfill_data
assert (
updated_backfill.asset_backfill_data.requested_subset.num_partitions_and_non_partitioned_assets
== 1
)
def test_asset_backfill_first_iteration_code_location_unreachable_error_some_runs_submitted(
instance: DagsterInstance, workspace_context: WorkspaceProcessContext
):
# tests that we can recover from unreachable code location error during the first tick when
# we are requesting the root assets
from dagster._core.execution.submit_asset_runs import get_job_execution_data_from_run_request
asset_selection = [dg.AssetKey("asset_f"), dg.AssetKey("asset_g")]
asset_graph = workspace_context.create_request_context().asset_graph
num_partitions = 2
target_partitions = partitions_f.get_partition_keys()[0:num_partitions]
backfill_id = "backfill_with_roots_multiple_partitions"
backfill = PartitionBackfill.from_asset_partitions(
asset_graph=asset_graph,
backfill_id=backfill_id,
tags={},
backfill_timestamp=get_current_timestamp(),
asset_selection=asset_selection,
partition_names=target_partitions,
dynamic_partitions_store=instance,
all_partitions=False,
title=None,
description=None,
run_config=None,
)
instance.add_backfill(backfill)
assert instance.get_runs_count() == 0
backfill = instance.get_backfill(backfill_id)
assert backfill
assert backfill.status == BulkActionStatus.REQUESTED
# The following backfill iteration will attempt to submit run requests for asset_f's two partitions.
# The first call to get_job_execution_data_from_run_request will succeed, but the second call will
# raise a DagsterUserCodeUnreachableError. Subsequently only the first partition will be successfully
# submitted.
counter = 0
async def raise_code_unreachable_error_on_second_call(*args, **kwargs):
nonlocal counter
if counter == 0:
counter += 1
return await get_job_execution_data_from_run_request(*args, **kwargs)
elif counter == 1:
counter += 1
raise DagsterUserCodeUnreachableError()
else:
# Should not attempt to create a run for the third partition if the second
# errored with DagsterUserCodeUnreachableError
raise Exception("Should not reach")
with mock.patch(
"dagster._core.execution.submit_asset_runs.get_job_execution_data_from_run_request",
side_effect=raise_code_unreachable_error_on_second_call,
):
errors = [
error
for error in list(
execute_backfill_iteration(
workspace_context, get_default_daemon_logger("BackfillDaemon")
)
)
if error
]
assert len(errors) == 1
assert (
"Unable to reach the code server. Backfill will resume once the code server is available"
in str(errors[0])
)
assert instance.get_runs_count() == 1
updated_backfill = instance.get_backfill(backfill_id)
assert updated_backfill
assert updated_backfill.asset_backfill_data
# error was caught and the submitting run requests and backfill data were updated with
# what was submitted before the failure
assert len(updated_backfill.submitting_run_requests or []) == 1
assert (
updated_backfill.asset_backfill_data.requested_subset.num_partitions_and_non_partitioned_assets
== 1
)
assert updated_backfill.asset_backfill_data.requested_runs_for_target_roots
# Execute backfill iteration again, confirming that the remaining partition for asset_f is requested again
assert all(
not error
for error in list(
execute_backfill_iteration(
workspace_context, get_default_daemon_logger("BackfillDaemon")
)
)
)
# Assert that one run is submitted
assert instance.get_runs_count() == 2
updated_backfill = instance.get_backfill(backfill_id)
assert updated_backfill
assert updated_backfill.asset_backfill_data
# Chunk finished so requested_subset is now updated
assert (
updated_backfill.asset_backfill_data.requested_subset.num_partitions_and_non_partitioned_assets
== 2
)
assert updated_backfill.asset_backfill_data.requested_runs_for_target_roots
def test_backfill_warns_when_runs_completed_but_partitions_marked_as_in_progress(
instance: DagsterInstance, workspace_context: WorkspaceProcessContext, caplog
):
asset_selection = [dg.AssetKey("daily_1"), dg.AssetKey("daily_2")]
asset_graph = workspace_context.create_request_context().asset_graph
target_partitions = ["2023-01-01"]
backfill_id = "backfill_with_hanging_partitions"
backfill = PartitionBackfill.from_asset_partitions(
asset_graph=asset_graph,
backfill_id=backfill_id,
tags={},
backfill_timestamp=get_current_timestamp(),
asset_selection=asset_selection,
partition_names=target_partitions,
dynamic_partitions_store=instance,
all_partitions=False,
title=None,
description=None,
run_config=None,
)
instance.add_backfill(backfill)
assert instance.get_runs_count() == 0
backfill = instance.get_backfill(backfill_id)
assert backfill
assert backfill.status == BulkActionStatus.REQUESTED
assert all(
not error
for error in list(
execute_backfill_iteration(
workspace_context, get_default_daemon_logger("BackfillDaemon")
)
)
)
assert instance.get_runs_count() == 1
updated_backfill = instance.get_backfill(backfill_id)
assert updated_backfill
assert updated_backfill.asset_backfill_data
assert all(
not error
for error in list(
execute_backfill_iteration(
workspace_context, get_default_daemon_logger("BackfillDaemon")
)
)
)
updated_backfill = instance.get_backfill(backfill_id)
assert updated_backfill
assert updated_backfill.asset_backfill_data
assert len(updated_backfill.asset_backfill_data.materialized_subset) == 2
# Replace materialized_subset with an empty subset to mock "hanging" partitions
# Mark the backfill as CANCELING
instance.update_backfill(
updated_backfill.with_asset_backfill_data(
updated_backfill.asset_backfill_data._replace(materialized_subset=AssetGraphSubset()),
dynamic_partitions_store=instance,
asset_graph=asset_graph,
).with_status(BulkActionStatus.CANCELING)
)
errors = list(
filter(
lambda e: e is not None,
execute_backfill_iteration(
workspace_context, get_default_daemon_logger("BackfillDaemon")
),
)
)
assert len(errors) == 0
updated_backfill = check.not_none(instance.get_backfill(backfill_id))
assert updated_backfill.status == BulkActionStatus.CANCELED
logs = caplog.text
assert (
"All runs have completed, but not all requested partitions have been marked as materialized or failed"
) in logs
# Job must have a partitions definition with a-b-c-d partitions
def _get_abcd_job_backfill(remote_repo: RemoteRepository, job_name: str) -> PartitionBackfill:
partition_set = remote_repo.get_partition_set(f"{job_name}_partition_set")
return PartitionBackfill(
backfill_id="simple",
partition_set_origin=partition_set.get_remote_origin(),
status=BulkActionStatus.REQUESTED,
partition_names=["a", "b", "c", "d"],
from_failure=False,
reexecution_steps=None,
tags=None,
backfill_timestamp=get_current_timestamp(),
)
def test_asset_job_backfill_single_run(
instance: DagsterInstance,
workspace_context: WorkspaceProcessContext,
remote_repo: RemoteRepository,
):
backfill = _get_abcd_job_backfill(remote_repo, "bp_single_run_asset_job")
assert instance.get_runs_count() == 0
instance.add_backfill(backfill)
list(execute_backfill_iteration(workspace_context, get_default_daemon_logger("BackfillDaemon")))
assert instance.get_runs_count() == 1
run = instance.get_runs()[0]
assert run.tags[BACKFILL_ID_TAG] == "simple"
assert run.tags[ASSET_PARTITION_RANGE_START_TAG] == "a"
assert run.tags[ASSET_PARTITION_RANGE_END_TAG] == "d"
assert run.tags["alpha"] == "beta"
def test_asset_job_backfill_single_run_multiple_iterations(
instance: DagsterInstance,
workspace_context: WorkspaceProcessContext,
remote_repo: RemoteRepository,
):
"""Tests that job backfills correctly find existing runs for partitions in the backfill and don't
relaunch those partitions. This is a regression test for a bug where the backfill would relaunch
runs for BackfillPolicy.single_run asset jobs since we were incorrectly determining which partitions
had already been launched.
"""
backfill = _get_abcd_job_backfill(remote_repo, "bp_single_run_asset_job")
assert instance.get_runs_count() == 0
instance.add_backfill(backfill)
# seed an in progress run. The mimics the backfill daemon having already launched the run for these
# partitions
fake_run = create_run_for_test(
instance=instance,
status=DagsterRunStatus.STARTED,
tags={
**DagsterRun.tags_for_backfill_id("simple"),
ASSET_PARTITION_RANGE_START_TAG: "a",
ASSET_PARTITION_RANGE_END_TAG: "d",
},
)
assert instance.get_runs_count() == 1
run = instance.get_runs()[0]
assert run.tags[BACKFILL_ID_TAG] == "simple"
assert run.tags[ASSET_PARTITION_RANGE_START_TAG] == "a"
assert run.tags[ASSET_PARTITION_RANGE_END_TAG] == "d"
for _ in range(3): # simulate the daemon ticking a few times
list(
execute_backfill_iteration(
workspace_context, get_default_daemon_logger("BackfillDaemon")
)
)
# backfill should not create any new runs
assert instance.get_runs_count() == 1
# manually update the run to be in a finished state, backfill should be marked complete on next iteration
instance.delete_run(fake_run.run_id)
create_run_for_test(
instance=instance,
status=DagsterRunStatus.SUCCESS,
tags={
**DagsterRun.tags_for_backfill_id("simple"),
ASSET_PARTITION_RANGE_START_TAG: "a",
ASSET_PARTITION_RANGE_END_TAG: "d",
},
)
list(execute_backfill_iteration(workspace_context, get_default_daemon_logger("BackfillDaemon")))
assert instance.get_runs_count() == 1
backfill = instance.get_backfill("simple")
assert backfill
assert backfill.status == BulkActionStatus.COMPLETED_SUCCESS
assert backfill.backfill_end_timestamp is not None
def test_asset_job_backfill_multi_run(
instance: DagsterInstance,
workspace_context: WorkspaceProcessContext,
remote_repo: RemoteRepository,
):
backfill = _get_abcd_job_backfill(remote_repo, "bp_multi_run_asset_job")
assert instance.get_runs_count() == 0
instance.add_backfill(backfill)
list(execute_backfill_iteration(workspace_context, get_default_daemon_logger("BackfillDaemon")))
assert instance.get_runs_count() == 2
run_1, run_2 = instance.get_runs()
assert run_1.tags[BACKFILL_ID_TAG] == "simple"
assert run_1.tags[ASSET_PARTITION_RANGE_START_TAG] == "c"
assert run_1.tags[ASSET_PARTITION_RANGE_END_TAG] == "d"
assert run_2.tags[BACKFILL_ID_TAG] == "simple"
assert run_2.tags[ASSET_PARTITION_RANGE_START_TAG] == "a"
assert run_2.tags[ASSET_PARTITION_RANGE_END_TAG] == "b"
def test_asset_job_backfill_default(
instance: DagsterInstance,
workspace_context: WorkspaceProcessContext,
remote_repo: RemoteRepository,
):
backfill = _get_abcd_job_backfill(remote_repo, "bp_none_asset_job")
assert instance.get_runs_count() == 0
instance.add_backfill(backfill)
list(execute_backfill_iteration(workspace_context, get_default_daemon_logger("BackfillDaemon")))
assert instance.get_runs_count() == 4
run_1, run_2, run_3, run_4 = instance.get_runs()
assert run_1.tags[BACKFILL_ID_TAG] == "simple"
assert run_1.tags[PARTITION_NAME_TAG] == "d"
assert run_2.tags[BACKFILL_ID_TAG] == "simple"
assert run_2.tags[PARTITION_NAME_TAG] == "c"
assert run_3.tags[BACKFILL_ID_TAG] == "simple"
assert run_3.tags[PARTITION_NAME_TAG] == "b"
assert run_4.tags[BACKFILL_ID_TAG] == "simple"
assert run_4.tags[PARTITION_NAME_TAG] == "a"
def test_asset_backfill_with_single_run_backfill_policy(
instance: DagsterInstance, workspace_context: WorkspaceProcessContext
):
partitions = ["2023-01-01", "2023-01-02", "2023-01-03", "2023-01-04", "2023-01-05"]
asset_graph = workspace_context.create_request_context().asset_graph
backfill_id = "asset_backfill_with_backfill_policy"
backfill = PartitionBackfill.from_partitions_by_assets(
backfill_id=backfill_id,
asset_graph=asset_graph,
backfill_timestamp=get_current_timestamp(),
tags={},
dynamic_partitions_store=instance,
partitions_by_assets=[
PartitionsByAssetSelector(
asset_key=asset_with_single_run_backfill_policy.key,
partitions=PartitionsSelector(
[PartitionRangeSelector(partitions[0], partitions[-1])]
),
)
],
title=None,
description=None,
run_config=None,
)
instance.add_backfill(backfill)
assert instance.get_runs_count() == 0
backfill = instance.get_backfill(backfill_id)
assert backfill
assert backfill.status == BulkActionStatus.REQUESTED
assert backfill.asset_selection == [asset_with_single_run_backfill_policy.key]
assert all(
not error
for error in list(
execute_backfill_iteration(
workspace_context, get_default_daemon_logger("BackfillDaemon")
)
)
)
assert instance.get_runs_count() == 1
assert instance.get_runs()[0].tags.get(ASSET_PARTITION_RANGE_START_TAG) == partitions[0]
assert instance.get_runs()[0].tags.get(ASSET_PARTITION_RANGE_END_TAG) == partitions[-1]
def test_asset_backfill_from_asset_graph_subset_with_single_run_backfill_policy(
instance: DagsterInstance, workspace_context: WorkspaceProcessContext
):
partitions = ["2023-01-01", "2023-01-02", "2023-01-03", "2023-01-04", "2023-01-05"]
backfill_id = "asset_backfill_from_asset_graph_subset_with_backfill_policy"
asset_graph_subset = AssetGraphSubset.from_asset_partition_set(
asset_partitions_set={
AssetKeyPartitionKey(asset_with_single_run_backfill_policy.key, pk) for pk in partitions
},
asset_graph=workspace_context.create_request_context().asset_graph,
)
backfill = PartitionBackfill.from_asset_graph_subset(
backfill_id=backfill_id,
asset_graph_subset=asset_graph_subset,
backfill_timestamp=get_current_timestamp(),
tags={},
dynamic_partitions_store=instance,
title=None,
description=None,
run_config=None,
)
instance.add_backfill(backfill)
assert instance.get_runs_count() == 0
backfill = instance.get_backfill(backfill_id)
assert backfill
assert backfill.status == BulkActionStatus.REQUESTED
assert backfill.asset_selection == [asset_with_single_run_backfill_policy.key]
assert all(
not error
for error in list(
execute_backfill_iteration(
workspace_context, get_default_daemon_logger("BackfillDaemon")
)
)
)
assert instance.get_runs_count() == 1
assert instance.get_runs()[0].tags.get(ASSET_PARTITION_RANGE_START_TAG) == partitions[0]
assert instance.get_runs()[0].tags.get(ASSET_PARTITION_RANGE_END_TAG) == partitions[-1]
def test_asset_backfill_with_multi_run_backfill_policy(
instance: DagsterInstance, workspace_context: WorkspaceProcessContext
):
partitions = ["2023-01-01", "2023-01-02", "2023-01-03", "2023-01-04"]
asset_graph = workspace_context.create_request_context().asset_graph
backfill_id = "asset_backfill_with_multi_run_backfill_policy"
backfill = PartitionBackfill.from_asset_partitions(
asset_graph=asset_graph,
backfill_id=backfill_id,
tags={},
backfill_timestamp=get_current_timestamp(),
asset_selection=[asset_with_multi_run_backfill_policy.key],
partition_names=partitions,
dynamic_partitions_store=instance,
all_partitions=False,
title=None,
description=None,
run_config=None,
)
instance.add_backfill(backfill)
assert instance.get_runs_count() == 0
backfill = instance.get_backfill(backfill_id)
assert backfill
assert backfill.status == BulkActionStatus.REQUESTED
assert all(
not error
for error in list(
execute_backfill_iteration(
workspace_context, get_default_daemon_logger("BackfillDaemon")
)
)
)
assert instance.get_runs_count() == 4
updated_backfill = instance.get_backfill(backfill_id)
assert updated_backfill
assert list(
check.not_none(
updated_backfill.asset_backfill_data
).requested_subset.iterate_asset_partitions()
) == [
AssetKeyPartitionKey(asset_with_multi_run_backfill_policy.key, partition)
for partition in partitions
]
def test_complex_asset_with_backfill_policy(
instance: DagsterInstance, workspace_context: WorkspaceProcessContext
):
# repro of bug
partitions = ["2023-01-01", "2023-01-02", "2023-01-03"]
asset_graph = workspace_context.create_request_context().asset_graph
backfill_id = "complex_asset_with_backfills"
backfill = PartitionBackfill.from_partitions_by_assets(
backfill_id=backfill_id,
asset_graph=asset_graph,
backfill_timestamp=get_current_timestamp(),
tags={},
dynamic_partitions_store=instance,
partitions_by_assets=[
PartitionsByAssetSelector(
asset_key=asset_with_single_run_backfill_policy.key,
partitions=PartitionsSelector(
[PartitionRangeSelector(partitions[0], partitions[-1])]
),
),
PartitionsByAssetSelector(
asset_key=complex_asset_with_backfill_policy.key,
partitions=PartitionsSelector(
[PartitionRangeSelector(partitions[0], partitions[-1])]
),
),
],
title=None,
description=None,
run_config=None,
)
instance.add_backfill(backfill)
assert instance.get_runs_count() == 0
backfill = instance.get_backfill(backfill_id)
assert backfill
assert backfill.status == BulkActionStatus.REQUESTED
assert backfill.asset_selection == [
asset_with_single_run_backfill_policy.key,
complex_asset_with_backfill_policy.key,
]
assert all(
not error
for error in list(
execute_backfill_iteration(
workspace_context, get_default_daemon_logger("BackfillDaemon")
)
)
)
backfill = instance.get_backfill(backfill_id)
assert backfill
assert backfill.status == BulkActionStatus.REQUESTED
assert set(
check.not_none(backfill.asset_backfill_data).requested_subset.iterate_asset_partitions()
) == {
AssetKeyPartitionKey(asset_with_single_run_backfill_policy.key, partition)
for partition in partitions
}
# 1 run for the full range of the upstream partition
assert instance.get_runs_count() == 1
wait_for_all_runs_to_start(instance, timeout=30)
wait_for_all_runs_to_finish(instance, timeout=30)
assert all(
not error
for error in list(
execute_backfill_iteration(
workspace_context, get_default_daemon_logger("BackfillDaemon")
)
)
)
# 1 run for the full range of the downstream partition
assert instance.get_runs_count() == 2
wait_for_all_runs_to_start(instance, timeout=30)
wait_for_all_runs_to_finish(instance, timeout=30)
assert all(
not error
for error in list(
execute_backfill_iteration(
workspace_context, get_default_daemon_logger("BackfillDaemon")
)
)
)
backfill = instance.get_backfill(backfill_id)
assert backfill
assert backfill.status == BulkActionStatus.COMPLETED_SUCCESS
assert backfill.backfill_end_timestamp is not None
def test_error_code_location(
caplog, instance, workspace_context, unloadable_location_workspace_context
):
asset_selection = [dg.AssetKey("asset_a")]
partition_keys = partitions_a.get_partition_keys()
backfill_id = "dummy_backfill"
instance.add_backfill(
PartitionBackfill.from_asset_partitions(
asset_graph=workspace_context.create_request_context().asset_graph,
backfill_id=backfill_id,
tags={},
backfill_timestamp=get_current_timestamp(),
asset_selection=asset_selection,
partition_names=partition_keys,
dynamic_partitions_store=instance,
all_partitions=False,
title=None,
description=None,
run_config=None,
)
)
errors = list(
execute_backfill_iteration(
unloadable_location_workspace_context, get_default_daemon_logger("BackfillDaemon")
)
)
assert len(errors) == 1
assert (
"dagster._core.errors.DagsterAssetBackfillDataLoadError: Asset AssetKey(['asset_a']) existed at"
" storage-time, but no longer does. This could be because it's inside a code location"
" that's failing to load" in errors[0].message # pyright: ignore[reportOptionalMemberAccess]
)
assert "Failure loading location" in caplog.text
@pytest.mark.parametrize("backcompat_serialization", [True, False])
def test_raise_error_on_asset_backfill_partitions_defs_changes(
caplog,
instance,
partitions_defs_changes_location_1_workspace_context,
partitions_defs_changes_location_2_workspace_context,
backcompat_serialization: bool,
):
asset_selection = [dg.AssetKey("time_partitions_def_changes")]
partition_keys = ["2023-01-01"]
backfill_id = "dummy_backfill"
asset_graph = (
partitions_defs_changes_location_1_workspace_context.create_request_context().asset_graph
)
backfill = PartitionBackfill.from_asset_partitions(
asset_graph=asset_graph,
backfill_id=backfill_id,
tags={},
backfill_timestamp=get_current_timestamp(),
asset_selection=asset_selection,
partition_names=partition_keys,
dynamic_partitions_store=instance,
all_partitions=False,
title=None,
description=None,
run_config=None,
)
if backcompat_serialization:
backfill = backfill._replace(
serialized_asset_backfill_data=check.not_none(backfill.asset_backfill_data).serialize(
instance, asset_graph
),
asset_backfill_data=None,
)
instance.add_backfill(backfill)
errors = list(
execute_backfill_iteration(
partitions_defs_changes_location_2_workspace_context,
get_default_daemon_logger("BackfillDaemon"),
)
)
assert len(errors) == 1
error_msg = check.not_none(errors[0]).message
if backcompat_serialization:
assert ("partitions definition has changed") in error_msg or (
"partitions definition for asset AssetKey(['time_partitions_def_changes']) has changed"
) in error_msg
else:
# doesn't have deser issues but does detect that the partition was removed
assert ("The following partitions were removed: ['2023-01-01']") in error_msg
@pytest.mark.parametrize("backcompat_serialization", [True, False])
def test_raise_error_on_partitions_defs_removed(
caplog,
instance,
partitions_defs_changes_location_1_workspace_context,
partitions_defs_changes_location_2_workspace_context,
backcompat_serialization: bool,
):
asset_selection = [dg.AssetKey("partitions_def_removed")]
partition_keys = ["2023-01-01"]
backfill_id = "dummy_backfill"
asset_graph = (
partitions_defs_changes_location_1_workspace_context.create_request_context().asset_graph
)
backfill = PartitionBackfill.from_asset_partitions(
asset_graph=asset_graph,
backfill_id=backfill_id,
tags={},
backfill_timestamp=get_current_timestamp(),
asset_selection=asset_selection,
partition_names=partition_keys,
dynamic_partitions_store=instance,
all_partitions=False,
title=None,
description=None,
run_config=None,
)
if backcompat_serialization:
backfill = backfill._replace(
serialized_asset_backfill_data=check.not_none(backfill.asset_backfill_data).serialize(
instance, asset_graph
),
asset_backfill_data=None,
)
instance.add_backfill(backfill)
errors = [
e
for e in execute_backfill_iteration(
partitions_defs_changes_location_2_workspace_context,
get_default_daemon_logger("BackfillDaemon"),
)
if e is not None
]
assert len(errors) == 1
assert ("had a PartitionsDefinition at storage-time, but no longer does") in errors[0].message
def test_raise_error_on_target_static_partition_removed(
caplog,
instance,
partitions_defs_changes_location_1_workspace_context,
partitions_defs_changes_location_2_workspace_context,
):
asset_selection = [dg.AssetKey("static_partition_removed")]
partition_keys = ["a"]
asset_graph = (
partitions_defs_changes_location_1_workspace_context.create_request_context().asset_graph
)
backfill = PartitionBackfill.from_asset_partitions(
asset_graph=asset_graph,
backfill_id="dummy_backfill",
tags={},
backfill_timestamp=get_current_timestamp(),
asset_selection=asset_selection,
partition_names=partition_keys,
dynamic_partitions_store=instance,
all_partitions=False,
title=None,
description=None,
run_config=None,
)
instance.add_backfill(backfill)
# When a static partitions def is changed, but all target partitions still exist,
# backfill executes successfully
errors = [
e
for e in execute_backfill_iteration(
partitions_defs_changes_location_2_workspace_context,
get_default_daemon_logger("BackfillDaemon"),
)
if e is not None
]
assert len(errors) == 0
backfill = PartitionBackfill.from_asset_partitions(
asset_graph=asset_graph,
backfill_id="dummy_backfill_2",
tags={},
backfill_timestamp=get_current_timestamp(),
asset_selection=asset_selection,
partition_names=["c"],
dynamic_partitions_store=instance,
all_partitions=False,
title=None,
description=None,
run_config=None,
)
instance.add_backfill(backfill)
# When a static partitions def is changed, but any target partitions is removed,
# error is raised
errors = [
e
for e in execute_backfill_iteration(
partitions_defs_changes_location_2_workspace_context,
get_default_daemon_logger("BackfillDaemon"),
)
if e is not None
]
assert len(errors) == 1
assert ("The following partitions were removed: {'c'}.") in errors[0].message
def test_partitions_def_changed_backfill_retry_envvar_set(
caplog,
instance,
partitions_defs_changes_location_1_workspace_context,
partitions_defs_changes_location_2_workspace_context,
):
asset_selection = [dg.AssetKey("time_partitions_def_changes")]
partition_keys = ["2023-01-01"]
backfill_id = "dummy_backfill"
asset_graph = (
partitions_defs_changes_location_1_workspace_context.create_request_context().asset_graph
)
backfill = PartitionBackfill.from_asset_partitions(
asset_graph=asset_graph,
backfill_id=backfill_id,
tags={},
backfill_timestamp=get_current_timestamp(),
asset_selection=asset_selection,
partition_names=partition_keys,
dynamic_partitions_store=instance,
all_partitions=False,
title=None,
description=None,
run_config=None,
)
instance.add_backfill(backfill)
with environ({"DAGSTER_BACKFILL_RETRY_DEFINITION_CHANGED_ERROR": "1"}):
errors = list(
execute_backfill_iteration(
partitions_defs_changes_location_2_workspace_context,
get_default_daemon_logger("BackfillDaemon"),
)
)
assert len(errors) == 1
error_msg = check.not_none(errors[0]).message
assert (
"Targeted partitions for asset AssetKey(['time_partitions_def_changes']) have been removed since this backfill was stored. The following partitions were removed: ['2023-01-01']"
) in error_msg
assert ("The following partitions were removed: ['2023-01-01']") in error_msg
def test_asset_backfill_logging(caplog, instance, workspace_context):
asset_selection = [
dg.AssetKey("asset_a"),
dg.AssetKey("asset_b"),
dg.AssetKey("asset_c"),
]
partition_keys = partitions_a.get_partition_keys()
backfill_id = "backfill_with_multiple_assets_selected"
instance.add_backfill(
PartitionBackfill.from_asset_partitions(
asset_graph=workspace_context.create_request_context().asset_graph,
backfill_id=backfill_id,
tags={"custom_tag_key": "custom_tag_value"},
backfill_timestamp=get_current_timestamp(),
asset_selection=asset_selection,
partition_names=partition_keys,
dynamic_partitions_store=instance,
all_partitions=False,
title=None,
description=None,
run_config=None,
)
)
assert instance.get_runs_count() == 0
backfill = instance.get_backfill(backfill_id)
assert backfill
assert backfill.status == BulkActionStatus.REQUESTED
assert all(
not error
for error in list(
execute_backfill_iteration(
workspace_context, get_default_daemon_logger("BackfillDaemon")
)
)
)
logs = caplog.text
assert "Evaluating asset backfill backfill_with_multiple_assets_selected" in logs
assert "DefaultPartitionsSubset(subset={'foo_b'})" in logs
assert "latest_storage_id=None" in logs
assert "AssetBackfillData" in logs
assert (
"""Asset partitions to request:
- asset_a: {foo_a}"""
in logs
)
def test_asset_backfill_failure_logging(caplog, instance, workspace_context):
asset_selection = [
dg.AssetKey("always_fails"),
]
partition_keys = static_partitions.get_partition_keys()
backfill_id = "backfill_with_failure"
instance.add_backfill(
PartitionBackfill.from_asset_partitions(
asset_graph=workspace_context.create_request_context().asset_graph,
backfill_id=backfill_id,
tags={"custom_tag_key": "custom_tag_value"},
backfill_timestamp=get_current_timestamp(),
asset_selection=asset_selection,
partition_names=partition_keys,
dynamic_partitions_store=instance,
all_partitions=False,
title=None,
description=None,
run_config=None,
)
)
assert instance.get_runs_count() == 0
backfill = instance.get_backfill(backfill_id)
assert backfill
assert backfill.status == BulkActionStatus.REQUESTED
assert all(
not error
for error in list(
execute_backfill_iteration(
workspace_context, get_default_daemon_logger("BackfillDaemon")
)
)
)
logs = caplog.text
assert (
"""Overall backfill status:
**Materialized assets:**
None
**Failed assets and their downstream assets:**
None
**Assets requested or in progress:**
- always_fails:"""
in logs
)
wait_for_all_runs_to_finish(instance)
caplog.clear()
assert all(
not error
for error in list(
execute_backfill_iteration(
workspace_context, get_default_daemon_logger("BackfillDaemon")
)
)
)
backfill = instance.get_backfill(backfill_id)
assert backfill
assert backfill.status == BulkActionStatus.COMPLETED_FAILED
logs = caplog.text
assert (
"""Overall backfill status:
**Materialized assets:**
None
**Failed assets and their downstream assets:**
- always_fails:"""
) in logs
assert (
"""**Assets requested or in progress:**
None"""
in logs
)
def test_backfill_with_title_and_description(
instance: DagsterInstance,
workspace_context: WorkspaceProcessContext,
remote_repo: RemoteRepository,
):
asset_selection = [
dg.AssetKey("asset_a"),
dg.AssetKey("asset_b"),
]
partition_keys = partitions_a.get_partition_keys()
instance.add_backfill(
PartitionBackfill.from_asset_partitions(
asset_graph=workspace_context.create_request_context().asset_graph,
backfill_id="backfill_with_title",
tags={"custom_tag_key": "custom_tag_value"},
backfill_timestamp=get_current_timestamp(),
asset_selection=asset_selection,
partition_names=partition_keys,
dynamic_partitions_store=instance,
all_partitions=False,
title="Custom title",
description="this backfill is fancy",
run_config=None,
)
)
assert instance.get_runs_count() == 0
backfill = instance.get_backfill("backfill_with_title")
assert backfill
assert backfill.status == BulkActionStatus.REQUESTED
assert backfill.title == "Custom title"
assert backfill.description == "this backfill is fancy"
assert all(
not error
for error in list(
execute_backfill_iteration(
workspace_context, get_default_daemon_logger("BackfillDaemon")
)
)
)
assert instance.get_runs_count() == 1
wait_for_all_runs_to_start(instance, timeout=30)
wait_for_all_runs_to_finish(instance, timeout=30)
assert backfill.title == "Custom title"
assert backfill.description == "this backfill is fancy"
assert all(
not error
for error in list(
execute_backfill_iteration(
workspace_context, get_default_daemon_logger("BackfillDaemon")
)
)
)
assert instance.get_runs_count() == 2
wait_for_all_runs_to_start(instance, timeout=30)
wait_for_all_runs_to_finish(instance, timeout=30)
assert backfill.title == "Custom title"
assert backfill.description == "this backfill is fancy"
runs = instance.get_runs()
assert all([run.status == DagsterRunStatus.SUCCESS] for run in runs)
def test_asset_backfill_with_run_config_simple(
instance: DagsterInstance, run_config_assets_workspace_context: WorkspaceProcessContext
):
hourly_partitions_def = dg.HourlyPartitionsDefinition("2023-10-01-00:00")
daily_partitions_def = dg.DailyPartitionsDefinition("2023-10-01")
hourly_subset = hourly_partitions_def.empty_subset().with_partition_key_range(
hourly_partitions_def, dg.PartitionKeyRange("2023-11-01-00:00", "2023-11-01-03:00")
)
daily_subset = daily_partitions_def.empty_subset().with_partition_key_range(
daily_partitions_def, dg.PartitionKeyRange("2023-11-01", "2023-11-01")
)
run_config = {
"ops": {
"hourly": {"config": {"a": 0}},
"daily": {"config": {"b": "b"}},
},
}
instance.add_backfill(
PartitionBackfill.from_asset_graph_subset(
backfill_id="run_config_backfill",
backfill_timestamp=get_current_timestamp(),
tags={},
asset_graph_subset=AssetGraphSubset(
partitions_subsets_by_asset_key={
AssetKey("hourly"): hourly_subset,
AssetKey("daily"): daily_subset,
}
),
dynamic_partitions_store=instance,
title="Custom title",
description="this backfill is fancy",
run_config=run_config,
)
)
assert instance.get_runs_count() == 0
backfill = instance.get_backfill("run_config_backfill")
assert backfill
assert backfill.status == BulkActionStatus.REQUESTED
assert backfill.run_config == run_config
assert all(
not error
for error in list(
execute_backfill_iteration(
run_config_assets_workspace_context, get_default_daemon_logger("BackfillDaemon")
)
)
)
assert instance.get_runs_count() == 4
wait_for_all_runs_to_start(instance, timeout=30)
wait_for_all_runs_to_finish(instance, timeout=30)
assert all(
not error
for error in list(
execute_backfill_iteration(
run_config_assets_workspace_context, get_default_daemon_logger("BackfillDaemon")
)
)
)
assert instance.get_runs_count() == 5
wait_for_all_runs_to_start(instance, timeout=30)
wait_for_all_runs_to_finish(instance, timeout=30)
runs = instance.get_runs()
assert all([run.status == DagsterRunStatus.SUCCESS] for run in runs)
def test_asset_backfill_with_run_config_complex(
instance: DagsterInstance, run_config_assets_workspace_context: WorkspaceProcessContext
):
daily_partitions_def = dg.DailyPartitionsDefinition("2023-10-01")
daily_partitions_def_2 = dg.DailyPartitionsDefinition("2023-10-02")
daily_subset = daily_partitions_def.empty_subset().with_partition_key_range(
daily_partitions_def, dg.PartitionKeyRange("2023-11-01", "2023-11-04")
)
daily_subset_2 = daily_partitions_def_2.empty_subset().with_partition_key_range(
daily_partitions_def_2, dg.PartitionKeyRange("2023-11-01", "2023-11-04")
)
run_config = {
"ops": {
"c_and_d_asset": {"config": {"a": 0}},
},
}
instance.add_backfill(
PartitionBackfill.from_asset_graph_subset(
backfill_id="run_config_backfill",
backfill_timestamp=get_current_timestamp(),
tags={},
asset_graph_subset=AssetGraphSubset(
partitions_subsets_by_asset_key={
AssetKey("C"): daily_subset,
AssetKey("middle"): daily_subset_2,
AssetKey("D"): daily_subset,
}
),
dynamic_partitions_store=instance,
title="Custom title",
description="this backfill is fancy",
run_config=run_config,
)
)
assert instance.get_runs_count() == 0
backfill = instance.get_backfill("run_config_backfill")
assert backfill
assert backfill.status == BulkActionStatus.REQUESTED
assert backfill.run_config == run_config
assert all(
not error
for error in list(
execute_backfill_iteration(
run_config_assets_workspace_context, get_default_daemon_logger("BackfillDaemon")
)
)
)
assert instance.get_runs_count() == 4
wait_for_all_runs_to_start(instance, timeout=30)
wait_for_all_runs_to_finish(instance, timeout=30)
assert all(
not error
for error in list(
execute_backfill_iteration(
run_config_assets_workspace_context, get_default_daemon_logger("BackfillDaemon")
)
)
)
assert instance.get_runs_count() == 8
wait_for_all_runs_to_start(instance, timeout=30)
wait_for_all_runs_to_finish(instance, timeout=30)
assert all(
not error
for error in list(
execute_backfill_iteration(
run_config_assets_workspace_context, get_default_daemon_logger("BackfillDaemon")
)
)
)
assert instance.get_runs_count() == 12
wait_for_all_runs_to_start(instance, timeout=30)
wait_for_all_runs_to_finish(instance, timeout=30)
runs = instance.get_runs()
assert all([run.status == DagsterRunStatus.SUCCESS] for run in runs)
assert all([run.run_config == run_config] for run in runs)
def test_job_backfill_with_run_config(
instance: DagsterInstance,
run_config_assets_workspace_context: WorkspaceProcessContext,
remote_repo: RemoteRepository,
):
code_location = cast(
"CodeLocation",
next(
iter(
run_config_assets_workspace_context.create_request_context()
.get_code_location_entries()
.values()
)
).code_location,
)
run_config = {
"ops": {
"daily": {"config": {"b": "b"}},
"other_daily": {"config": {"b": "b"}},
},
}
partition_set = code_location.get_repository("__repository__").get_partition_set(
"daily_job_partition_set"
)
instance.add_backfill(
PartitionBackfill(
backfill_id="run_config_backfill",
partition_set_origin=partition_set.get_remote_origin(),
status=BulkActionStatus.REQUESTED,
partition_names=["2023-11-01", "2023-11-02"],
from_failure=False,
reexecution_steps=None,
tags=None,
backfill_timestamp=get_current_timestamp(),
run_config=run_config,
)
)
assert instance.get_runs_count() == 0
list(
execute_backfill_iteration(
run_config_assets_workspace_context, get_default_daemon_logger("BackfillDaemon")
)
)
assert instance.get_runs_count() == 2
runs = instance.get_runs()
two, one = runs
assert two.run_config == run_config
assert one.run_config == run_config
def test_old_dynamic_partitions_job_backfill(
instance: DagsterInstance,
workspace_context: WorkspaceProcessContext,
remote_repo: RemoteRepository,
):
backfill = _get_abcd_job_backfill(remote_repo, "old_dynamic_partitions_job")
assert instance.get_runs_count() == 0
instance.add_backfill(backfill)
list(execute_backfill_iteration(workspace_context, get_default_daemon_logger("BackfillDaemon")))
assert instance.get_runs_count() == 4
@pytest.fixture
def instance_with_backfill_log_storage_enabled(instance):
def override_backfill_storage_setting(self):
return True
orig_backfill_storage_setting = instance.backfill_log_storage_enabled
try:
instance.backfill_log_storage_enabled = override_backfill_storage_setting.__get__(
instance, dg.DagsterInstance
)
yield instance
finally:
instance.backfill_log_storage_enabled = orig_backfill_storage_setting
def test_asset_backfill_logs(
instance_with_backfill_log_storage_enabled: DagsterInstance,
workspace_context: WorkspaceProcessContext,
remote_repo: RemoteRepository,
):
instance = instance_with_backfill_log_storage_enabled
partition_keys = static_partitions.get_partition_keys()
asset_selection = [dg.AssetKey("foo"), dg.AssetKey("a1"), dg.AssetKey("bar")]
instance.add_backfill(
PartitionBackfill.from_asset_partitions(
asset_graph=workspace_context.create_request_context().asset_graph,
backfill_id="backfill_with_asset_selection",
tags={"custom_tag_key": "custom_tag_value"},
backfill_timestamp=get_current_timestamp(),
asset_selection=asset_selection,
partition_names=partition_keys,
dynamic_partitions_store=instance,
all_partitions=False,
title=None,
description=None,
run_config=None,
)
)
assert instance.get_runs_count() == 0
backfill = instance.get_backfill("backfill_with_asset_selection")
assert backfill
assert backfill.status == BulkActionStatus.REQUESTED
list(execute_backfill_iteration(workspace_context, get_default_daemon_logger("BackfillDaemon")))
assert instance.get_runs_count() == 3
wait_for_all_runs_to_start(instance, timeout=15)
assert instance.get_runs_count() == 3
wait_for_all_runs_to_finish(instance, timeout=15)
os.environ["DAGSTER_CAPTURED_LOG_CHUNK_SIZE"] = "20"
cm = instance.compute_log_manager
logs, cursor = cm.read_log_lines_for_log_key_prefix(
["backfill", backfill.backfill_id], cursor=None, io_type=ComputeIOType.STDERR
)
assert cursor is not None
assert logs
for log_line in logs:
if not log_line:
continue
try:
record_dict = seven.json.loads(log_line)
except json.JSONDecodeError:
continue
assert record_dict.get("msg")
list(execute_backfill_iteration(workspace_context, get_default_daemon_logger("BackfillDaemon")))
backfill = instance.get_backfill("backfill_with_asset_selection")
assert backfill
assert backfill.status == BulkActionStatus.COMPLETED_SUCCESS
assert backfill.backfill_end_timestamp is not None
# set num_lines high so we know we get all of the remaining logs
os.environ["DAGSTER_CAPTURED_LOG_CHUNK_SIZE"] = "100"
logs, cursor = cm.read_log_lines_for_log_key_prefix(
["backfill", backfill.backfill_id], cursor=cursor.to_string(), io_type=ComputeIOType.STDERR
)
assert cursor is not None
assert not cursor.has_more_now
for log_line in logs:
if not log_line:
continue
try:
record_dict = seven.json.loads(log_line)
except json.JSONDecodeError:
continue
assert record_dict.get("msg")
@pytest.mark.parametrize("parallel", [True, False])
def test_asset_backfill_from_asset_graph_subset(
instance: DagsterInstance,
workspace_context: WorkspaceProcessContext,
remote_repo: RemoteRepository,
parallel: bool,
):
del remote_repo
partition_keys = static_partitions.get_partition_keys()
asset_selection = [dg.AssetKey("foo"), dg.AssetKey("a1"), dg.AssetKey("bar")]
asset_graph_subset = AssetGraphSubset.from_asset_partition_set(
asset_partitions_set={
AssetKeyPartitionKey(ak, pk) for ak in asset_selection for pk in partition_keys
},
asset_graph=workspace_context.create_request_context().asset_graph,
)
instance.add_backfill(
PartitionBackfill.from_asset_graph_subset(
backfill_id="backfill_from_asset_graph_subset",
tags={"custom_tag_key": "custom_tag_value"},
backfill_timestamp=get_current_timestamp(),
dynamic_partitions_store=instance,
title=None,
description=None,
asset_graph_subset=asset_graph_subset,
run_config=None,
)
)
assert instance.get_runs_count() == 0
backfill = instance.get_backfill("backfill_from_asset_graph_subset")
assert backfill
assert backfill.status == BulkActionStatus.REQUESTED
if parallel:
backfill_daemon_futures = {}
list(
execute_backfill_iteration(
workspace_context,
get_default_daemon_logger("BackfillDaemon"),
threadpool_executor=ThreadPoolExecutor(2),
backfill_futures=backfill_daemon_futures,
)
)
wait_for_futures(backfill_daemon_futures)
else:
list(
execute_backfill_iteration(
workspace_context, get_default_daemon_logger("BackfillDaemon")
)
)
assert instance.get_runs_count() == 3
wait_for_all_runs_to_start(instance, timeout=30)
assert instance.get_runs_count() == 3
wait_for_all_runs_to_finish(instance, timeout=30)
assert instance.get_runs_count() == 3
runs = reversed(list(instance.get_runs()))
for run in runs:
assert run.tags[BACKFILL_ID_TAG] == "backfill_from_asset_graph_subset"
assert run.tags["custom_tag_key"] == "custom_tag_value"
assert step_succeeded(instance, run, "foo")
assert step_succeeded(instance, run, "reusable")
assert step_succeeded(instance, run, "bar")
if parallel:
backfill_daemon_futures = {}
list(
execute_backfill_iteration(
workspace_context,
get_default_daemon_logger("BackfillDaemon"),
threadpool_executor=ThreadPoolExecutor(2),
backfill_futures=backfill_daemon_futures,
)
)
wait_for_futures(backfill_daemon_futures)
else:
list(
execute_backfill_iteration(
workspace_context, get_default_daemon_logger("BackfillDaemon")
)
)
backfill = instance.get_backfill("backfill_from_asset_graph_subset")
assert backfill
assert backfill.status == BulkActionStatus.COMPLETED_SUCCESS
assert backfill.backfill_end_timestamp is not None
def test_asset_backfill_from_asset_graph_subset_with_static_and_time_partitions(
instance: DagsterInstance,
workspace_context: WorkspaceProcessContext,
remote_repo: RemoteRepository,
):
del remote_repo
static_partition_keys = static_partitions.get_partition_keys()
static_asset_selection = [dg.AssetKey("foo"), dg.AssetKey("a1"), dg.AssetKey("bar")]
static_asset_partition_set = {
AssetKeyPartitionKey(ak, pk)
for ak in static_asset_selection
for pk in static_partition_keys
}
time_asset_selection = [dg.AssetKey("daily_1"), dg.AssetKey("daily_2")]
time_target_partitions = daily_partitions_def.get_partition_keys()[0:5]
time_asset_partition_set = {
AssetKeyPartitionKey(ak, pk) for ak in time_asset_selection for pk in time_target_partitions
}
asset_graph_subset = AssetGraphSubset.from_asset_partition_set(
asset_partitions_set=static_asset_partition_set | time_asset_partition_set,
asset_graph=workspace_context.create_request_context().asset_graph,
)
instance.add_backfill(
PartitionBackfill.from_asset_graph_subset(
backfill_id="backfill_from_asset_graph_subset_with_static_and_time_partitions",
tags={"custom_tag_key": "custom_tag_value"},
backfill_timestamp=get_current_timestamp(),
dynamic_partitions_store=instance,
title=None,
description=None,
asset_graph_subset=asset_graph_subset,
run_config=None,
)
)
assert instance.get_runs_count() == 0
backfill = instance.get_backfill(
"backfill_from_asset_graph_subset_with_static_and_time_partitions"
)
assert backfill
assert backfill.status == BulkActionStatus.REQUESTED
list(execute_backfill_iteration(workspace_context, get_default_daemon_logger("BackfillDaemon")))
assert instance.get_runs_count() == 8
wait_for_all_runs_to_start(instance, timeout=30)
assert instance.get_runs_count() == 8
wait_for_all_runs_to_finish(instance, timeout=30)
assert instance.get_runs_count() == 8
runs = reversed(list(instance.get_runs()))
for run in runs:
assert (
run.tags[BACKFILL_ID_TAG]
== "backfill_from_asset_graph_subset_with_static_and_time_partitions"
)
assert run.tags["custom_tag_key"] == "custom_tag_value"
list(execute_backfill_iteration(workspace_context, get_default_daemon_logger("BackfillDaemon")))
backfill = instance.get_backfill(
"backfill_from_asset_graph_subset_with_static_and_time_partitions"
)
assert backfill
assert backfill.status == BulkActionStatus.COMPLETED_SUCCESS
assert backfill.backfill_end_timestamp is not None
def test_asset_backfill_not_complete_until_retries_complete(
instance: DagsterInstance,
workspace_context: WorkspaceProcessContext,
remote_repo: RemoteRepository,
):
del remote_repo
backfill_id = "run_retries_backfill"
partition_keys = static_partitions.get_partition_keys()
asset_selection = [dg.AssetKey("foo"), dg.AssetKey("a1"), dg.AssetKey("bar")]
instance.add_backfill(
PartitionBackfill.from_asset_partitions(
asset_graph=workspace_context.create_request_context().asset_graph,
backfill_id=backfill_id,
tags={"custom_tag_key": "custom_tag_value"},
backfill_timestamp=get_current_timestamp(),
asset_selection=asset_selection,
partition_names=partition_keys,
dynamic_partitions_store=instance,
all_partitions=False,
title=None,
description=None,
run_config=None,
)
)
assert instance.get_runs_count() == 0
backfill = instance.get_backfill(backfill_id)
assert backfill
assert backfill.status == BulkActionStatus.REQUESTED
list(execute_backfill_iteration(workspace_context, get_default_daemon_logger("BackfillDaemon")))
assert instance.get_runs_count() == 3
wait_for_all_runs_to_start(instance, timeout=30)
assert instance.get_runs_count() == 3
wait_for_all_runs_to_finish(instance, timeout=30)
assert instance.get_runs_count() == 3
runs = reversed(list(instance.get_runs()))
for run in runs:
assert run.tags[BACKFILL_ID_TAG] == backfill_id
assert run.tags["custom_tag_key"] == "custom_tag_value"
assert step_succeeded(instance, run, "foo")
assert step_succeeded(instance, run, "reusable")
assert step_succeeded(instance, run, "bar")
# simulate a retry of a run
run_to_retry = instance.get_runs()[0]
retried_run = create_run_for_test(
instance=instance,
job_name=run_to_retry.job_name,
tags=run_to_retry.tags,
root_run_id=run_to_retry.run_id,
parent_run_id=run_to_retry.run_id,
)
# since there is a run in progress, the backfill should not be marked as complete, even though
# all targeted asset partitions have a completed state
list(execute_backfill_iteration(workspace_context, get_default_daemon_logger("BackfillDaemon")))
backfill = instance.get_backfill(backfill_id)
assert backfill
assert backfill.asset_backfill_data
assert backfill.asset_backfill_data.all_targeted_partitions_have_materialization_status()
assert backfill.status == BulkActionStatus.REQUESTED
# manually mark the run as successful to show that the backfill will be marked as complete
# since there are no in progress runs
instance.handle_new_event(
dg.EventLogEntry(
error_info=None,
level="debug",
user_message="",
run_id=retried_run.run_id,
timestamp=time.time(),
dagster_event=dg.DagsterEvent(
event_type_value=DagsterEventType.RUN_SUCCESS.value,
job_name=retried_run.job_name,
),
)
)
retried_run = instance.get_runs(filters=dg.RunsFilter(run_ids=[retried_run.run_id]))[0]
assert retried_run.status == DagsterRunStatus.SUCCESS
list(execute_backfill_iteration(workspace_context, get_default_daemon_logger("BackfillDaemon")))
backfill = instance.get_backfill(backfill_id)
assert backfill
assert backfill.status == BulkActionStatus.COMPLETED_SUCCESS
assert backfill.backfill_end_timestamp is not None
def test_asset_backfill_not_complete_if_automatic_retry_could_happen(
instance: DagsterInstance,
workspace_context: WorkspaceProcessContext,
remote_repo: RemoteRepository,
):
del remote_repo
backfill_id = "run_retries_backfill"
partition_keys = static_partitions.get_partition_keys()
asset_selection = [dg.AssetKey("foo"), dg.AssetKey("pass_on_retry")]
instance.add_backfill(
PartitionBackfill.from_asset_partitions(
asset_graph=workspace_context.create_request_context().asset_graph,
backfill_id=backfill_id,
tags={"custom_tag_key": "custom_tag_value", MAX_RETRIES_TAG: "2"},
backfill_timestamp=get_current_timestamp(),
asset_selection=asset_selection,
partition_names=partition_keys,
dynamic_partitions_store=instance,
all_partitions=False,
title=None,
description=None,
run_config=None,
)
)
assert instance.get_runs_count() == 0
backfill = instance.get_backfill(backfill_id)
assert backfill
assert backfill.status == BulkActionStatus.REQUESTED
list(execute_backfill_iteration(workspace_context, get_default_daemon_logger("BackfillDaemon")))
assert instance.get_runs_count() == 3
wait_for_all_runs_to_start(instance, timeout=30)
assert instance.get_runs_count() == 3
wait_for_all_runs_to_finish(instance, timeout=30)
assert instance.get_runs_count() == 3
runs = reversed(list(instance.get_runs()))
for run in runs:
assert run.tags[BACKFILL_ID_TAG] == backfill_id
assert run.tags["custom_tag_key"] == "custom_tag_value"
assert step_succeeded(instance, run, "foo")
assert step_failed(instance, run, "pass_on_retry")
# since the failed runs should have automatic retries launched for them, the backfill should not
# be considered complete, even though the targeted asset partitions have a completed state
list(execute_backfill_iteration(workspace_context, get_default_daemon_logger("BackfillDaemon")))
backfill = instance.get_backfill(backfill_id)
assert backfill
assert backfill.asset_backfill_data
assert backfill.asset_backfill_data.all_targeted_partitions_have_materialization_status()
assert backfill.status == BulkActionStatus.REQUESTED
# automatic retries wont get automatically run in test environment, so we run the function manually
runs = instance.get_run_records()
list(
consume_new_runs_for_automatic_reexecution(
workspace_process_context=workspace_context,
run_records=runs,
logger=logger,
)
)
wait_for_all_runs_to_finish(instance, timeout=30)
assert instance.get_runs_count() == 6
list(execute_backfill_iteration(workspace_context, get_default_daemon_logger("BackfillDaemon")))
backfill = instance.get_backfill(backfill_id)
assert backfill
assert backfill.status == BulkActionStatus.COMPLETED_SUCCESS
assert backfill.backfill_end_timestamp is not None
def test_asset_backfill_fails_if_retries_fail(
instance: DagsterInstance,
workspace_context: WorkspaceProcessContext,
remote_repo: RemoteRepository,
):
del remote_repo
backfill_id = "run_retries_backfill"
partition_keys = static_partitions.get_partition_keys()
asset_selection = [
dg.AssetKey("foo"),
dg.AssetKey("pass_on_retry"),
dg.AssetKey("always_fails"),
]
instance.add_backfill(
PartitionBackfill.from_asset_partitions(
asset_graph=workspace_context.create_request_context().asset_graph,
backfill_id=backfill_id,
tags={"custom_tag_key": "custom_tag_value", MAX_RETRIES_TAG: "2"},
backfill_timestamp=get_current_timestamp(),
asset_selection=asset_selection,
partition_names=partition_keys,
dynamic_partitions_store=instance,
all_partitions=False,
title=None,
description=None,
run_config=None,
)
)
assert instance.get_runs_count() == 0
backfill = instance.get_backfill(backfill_id)
assert backfill
assert backfill.status == BulkActionStatus.REQUESTED
list(execute_backfill_iteration(workspace_context, get_default_daemon_logger("BackfillDaemon")))
assert instance.get_runs_count() == 3
wait_for_all_runs_to_start(instance, timeout=30)
assert instance.get_runs_count() == 3
wait_for_all_runs_to_finish(instance, timeout=30)
assert instance.get_runs_count() == 3
runs = reversed(list(instance.get_runs()))
for run in runs:
assert run.tags[BACKFILL_ID_TAG] == backfill_id
assert run.tags["custom_tag_key"] == "custom_tag_value"
assert step_succeeded(instance, run, "foo")
assert step_failed(instance, run, "pass_on_retry")
# since the failed runs should have automatic retries launched for them, the backfill should not
# be considered complete, even though the targeted asset partitions have a completed state
list(execute_backfill_iteration(workspace_context, get_default_daemon_logger("BackfillDaemon")))
backfill = instance.get_backfill(backfill_id)
assert backfill
assert backfill.asset_backfill_data
assert backfill.asset_backfill_data.all_targeted_partitions_have_materialization_status()
assert backfill.status == BulkActionStatus.REQUESTED
runs = instance.get_run_records()
list(
consume_new_runs_for_automatic_reexecution(
workspace_process_context=workspace_context,
run_records=runs,
logger=logger,
)
)
wait_for_all_runs_to_finish(instance, timeout=30)
assert instance.get_runs_count() == 6
list(execute_backfill_iteration(workspace_context, get_default_daemon_logger("BackfillDaemon")))
backfill = instance.get_backfill(backfill_id)
assert backfill
# retry limit hasn't been hit, so backfill still in progress
assert backfill.status == BulkActionStatus.REQUESTED
runs = instance.get_run_records()
list(
consume_new_runs_for_automatic_reexecution(
workspace_process_context=workspace_context,
run_records=runs,
logger=logger,
)
)
wait_for_all_runs_to_finish(instance, timeout=30)
assert instance.get_runs_count() == 9
list(execute_backfill_iteration(workspace_context, get_default_daemon_logger("BackfillDaemon")))
backfill = instance.get_backfill(backfill_id)
assert backfill
assert backfill.status == BulkActionStatus.COMPLETED_FAILED
assert backfill.backfill_end_timestamp is not None
def test_asset_backfill_retries_make_downstreams_runnable(
instance: DagsterInstance,
workspace_context: WorkspaceProcessContext,
remote_repo: RemoteRepository,
):
del remote_repo
backfill_id = "run_retries_backfill_with_downstream"
partition_keys = partitions_a.get_partition_keys()
asset_selection = [
dg.AssetKey("fails_once_asset_a"),
dg.AssetKey("downstream_of_fails_once_asset_b"),
dg.AssetKey("downstream_of_fails_once_asset_c"),
]
instance.add_backfill(
PartitionBackfill.from_asset_partitions(
asset_graph=workspace_context.create_request_context().asset_graph,
backfill_id=backfill_id,
tags={"custom_tag_key": "custom_tag_value", MAX_RETRIES_TAG: "2"},
backfill_timestamp=get_current_timestamp(),
asset_selection=asset_selection,
partition_names=partition_keys,
dynamic_partitions_store=instance,
all_partitions=False,
title=None,
description=None,
run_config=None,
)
)
assert instance.get_runs_count() == 0
backfill = instance.get_backfill(backfill_id)
assert backfill
assert backfill.status == BulkActionStatus.REQUESTED
list(execute_backfill_iteration(workspace_context, get_default_daemon_logger("BackfillDaemon")))
assert instance.get_runs_count() == 1
wait_for_all_runs_to_start(instance, timeout=30)
assert instance.get_runs_count() == 1
wait_for_all_runs_to_finish(instance, timeout=30)
assert instance.get_runs_count() == 1
runs = reversed(list(instance.get_runs()))
for run in runs:
assert run.tags[BACKFILL_ID_TAG] == backfill_id
assert step_failed(instance, run, "fails_once_asset_a")
# if the backfill daemon runs again, we will see that the downstreams are in the failed and downstream subset
list(execute_backfill_iteration(workspace_context, get_default_daemon_logger("BackfillDaemon")))
assert instance.get_runs_count() == 1
backfill = instance.get_backfill(backfill_id)
assert backfill
assert backfill.asset_backfill_data
assert (
backfill.asset_backfill_data.materialized_subset.num_partitions_and_non_partitioned_assets
== 0
)
assert (
backfill.asset_backfill_data.failed_and_downstream_subset.num_partitions_and_non_partitioned_assets
== 3
)
# launch a retry of the failed run
runs = instance.get_run_records()
list(
consume_new_runs_for_automatic_reexecution(
workspace_process_context=workspace_context,
run_records=runs,
logger=logger,
)
)
wait_for_all_runs_to_finish(instance, timeout=30)
assert instance.get_runs_count() == 2
# now that the failed run has been retried, the backfill daemon can launch runs of the downstream assets
list(execute_backfill_iteration(workspace_context, get_default_daemon_logger("BackfillDaemon")))
backfill = instance.get_backfill(backfill_id)
assert backfill
assert backfill.status == BulkActionStatus.REQUESTED
assert instance.get_runs_count() == 4
wait_for_all_runs_to_start(instance, timeout=30)
assert instance.get_runs_count() == 4
wait_for_all_runs_to_finish(instance, timeout=30)
assert instance.get_runs_count() == 4
list(execute_backfill_iteration(workspace_context, get_default_daemon_logger("BackfillDaemon")))
backfill = instance.get_backfill(backfill_id)
assert backfill
assert backfill.status == BulkActionStatus.COMPLETED_SUCCESS
assert backfill.backfill_end_timestamp is not None
assert backfill.asset_backfill_data
assert (
backfill.asset_backfill_data.failed_and_downstream_subset.num_partitions_and_non_partitioned_assets
== 0
)
def test_run_retry_not_part_of_completed_backfill(
instance: DagsterInstance,
workspace_context: WorkspaceProcessContext,
workspace_request_context: BaseWorkspaceRequestContext,
code_location: CodeLocation,
remote_repo: RemoteRepository,
):
backfill_id = "run_retries_backfill"
partition_keys = static_partitions.get_partition_keys()
asset_selection = [dg.AssetKey("foo"), dg.AssetKey("a1"), dg.AssetKey("bar")]
instance.add_backfill(
PartitionBackfill.from_asset_partitions(
asset_graph=workspace_request_context.asset_graph,
backfill_id=backfill_id,
tags={"custom_tag_key": "custom_tag_value"},
backfill_timestamp=get_current_timestamp(),
asset_selection=asset_selection,
partition_names=partition_keys,
dynamic_partitions_store=instance,
all_partitions=False,
title=None,
description=None,
run_config=None,
)
)
assert instance.get_runs_count() == 0
backfill = instance.get_backfill(backfill_id)
assert backfill
assert backfill.status == BulkActionStatus.REQUESTED
list(execute_backfill_iteration(workspace_context, get_default_daemon_logger("BackfillDaemon")))
assert instance.get_runs_count() == 3
wait_for_all_runs_to_start(instance, timeout=30)
assert instance.get_runs_count() == 3
wait_for_all_runs_to_finish(instance, timeout=30)
assert instance.get_runs_count() == 3
runs = reversed(list(instance.get_runs()))
for run in runs:
assert run.tags[BACKFILL_ID_TAG] == backfill_id
assert run.tags["custom_tag_key"] == "custom_tag_value"
assert step_succeeded(instance, run, "foo")
assert step_succeeded(instance, run, "reusable")
assert step_succeeded(instance, run, "bar")
list(execute_backfill_iteration(workspace_context, get_default_daemon_logger("BackfillDaemon")))
backfill = instance.get_backfill(backfill_id)
assert backfill
assert backfill.status == BulkActionStatus.COMPLETED_SUCCESS
assert backfill.backfill_end_timestamp is not None
# simulate a retry of a run
run_to_retry = instance.get_runs()[0]
selector = JobSubsetSelector(
location_name=code_location.name,
repository_name=remote_repo.name,
job_name=run_to_retry.job_name,
asset_selection=run_to_retry.asset_selection,
op_selection=None,
)
remote_job = code_location.get_job(selector)
retried_run = instance.create_reexecuted_run(
parent_run=run_to_retry,
request_context=workspace_request_context,
code_location=code_location,
remote_job=remote_job,
strategy=ReexecutionStrategy.ALL_STEPS,
run_config=run_to_retry.run_config,
use_parent_run_tags=True, # ensures that the logic for not copying over backfill tags is tested
)
for tag in BACKFILL_TAGS:
assert tag not in retried_run.tags.keys()
# Since the backfill is alerady complete, it should not be processed by the backfill daemon and
# should remain in a completed state
list(execute_backfill_iteration(workspace_context, get_default_daemon_logger("BackfillDaemon")))
backfill = instance.get_backfill(backfill_id)
assert backfill
assert backfill.status == BulkActionStatus.COMPLETED_SUCCESS
assert backfill.backfill_end_timestamp is not None
assert retried_run.run_id not in [
r.run_id for r in instance.get_runs(filters=RunsFilter.for_backfill(backfill_id))
]
def test_multi_partitioned_asset_backfill(
instance: DagsterInstance,
workspace_context: WorkspaceProcessContext,
remote_repo: RemoteRepository,
):
del remote_repo
num_partitions = 4
target_partitions = multi_partitions_def.get_partition_keys()[0:num_partitions]
asset_selection = [dg.AssetKey("multi_partitioned_asset")]
backfill_id = "backfill_multi_partitions"
instance.add_backfill(
PartitionBackfill.from_asset_partitions(
asset_graph=workspace_context.create_request_context().asset_graph,
backfill_id=backfill_id,
tags={"custom_tag_key": "custom_tag_value"},
backfill_timestamp=get_current_timestamp(),
asset_selection=asset_selection,
partition_names=target_partitions,
dynamic_partitions_store=instance,
all_partitions=False,
title=None,
description=None,
run_config=None,
)
)
assert instance.get_runs_count() == 0
backfill = instance.get_backfill(backfill_id)
assert backfill
assert backfill.status == BulkActionStatus.REQUESTED
list(execute_backfill_iteration(workspace_context, get_default_daemon_logger("BackfillDaemon")))
assert instance.get_runs_count() == num_partitions
wait_for_all_runs_to_start(instance, timeout=30)
assert instance.get_runs_count() == num_partitions
wait_for_all_runs_to_finish(instance, timeout=30)
assert instance.get_runs_count() == num_partitions
runs = reversed(list(instance.get_runs()))
for run in runs:
assert run.tags[BACKFILL_ID_TAG] == backfill_id
assert run.tags["custom_tag_key"] == "custom_tag_value"
list(execute_backfill_iteration(workspace_context, get_default_daemon_logger("BackfillDaemon")))
backfill = instance.get_backfill(backfill_id)
assert backfill is not None
assert backfill.status == BulkActionStatus.COMPLETED_SUCCESS
def test_multi_partitioned_asset_with_single_run_bp_backfill(
instance: DagsterInstance,
workspace_context: WorkspaceProcessContext,
remote_repo: RemoteRepository,
):
del remote_repo
target_partitions = ["2023-01-01|x", "2023-01-02|x", "2023-01-01|z", "2023-01-01|y"]
asset_selection = [dg.AssetKey("multi_partitioned_asset_with_single_run_bp")]
backfill_id = "backfill_multi_partitions"
instance.add_backfill(
PartitionBackfill.from_asset_partitions(
asset_graph=workspace_context.create_request_context().asset_graph,
backfill_id=backfill_id,
tags={"custom_tag_key": "custom_tag_value"},
backfill_timestamp=get_current_timestamp(),
asset_selection=asset_selection,
partition_names=target_partitions,
dynamic_partitions_store=instance,
all_partitions=False,
title=None,
description=None,
run_config=None,
)
)
assert instance.get_runs_count() == 0
backfill = instance.get_backfill(backfill_id)
assert backfill
assert backfill.status == BulkActionStatus.REQUESTED
list(execute_backfill_iteration(workspace_context, get_default_daemon_logger("BackfillDaemon")))
backfill = instance.get_backfill(backfill_id)
assert backfill
# even though it is a single run backfill, the multi-partitions selected will span two ranges
# because we compute ranges per-primary key
assert instance.get_runs_count() == 2
wait_for_all_runs_to_start(instance, timeout=30)
assert instance.get_runs_count() == 2
wait_for_all_runs_to_finish(instance, timeout=30)
assert instance.get_runs_count() == 2
runs = reversed(list(instance.get_runs()))
partition_ranges = []
for run in runs:
assert run.tags[BACKFILL_ID_TAG] == backfill_id
partition_ranges.append(
(run.tags[ASSET_PARTITION_RANGE_START_TAG], run.tags[ASSET_PARTITION_RANGE_END_TAG])
)
assert sorted(partition_ranges) == sorted(
[("2023-01-01|x", "2023-01-01|z"), ("2023-01-02|x", "2023-01-02|x")]
)
list(execute_backfill_iteration(workspace_context, get_default_daemon_logger("BackfillDaemon")))
backfill = instance.get_backfill(backfill_id)
assert backfill
wait_for_all_runs_to_start(instance, timeout=30)
wait_for_all_runs_to_finish(instance, timeout=30)
assert backfill.status == BulkActionStatus.COMPLETED_SUCCESS
# assert the expected asset materialization events exist with the expected partitions
result = instance.fetch_materializations(
dg.AssetRecordsFilter(
asset_key=asset_selection[0],
),
limit=10,
)
records_in_backfill = []
for record in result.records:
run = instance.get_run_by_id(record.run_id)
if run and run.tags.get(BACKFILL_ID_TAG) == backfill_id:
records_in_backfill.append(record)
partitions_materialized = {record.partition_key for record in records_in_backfill}
assert partitions_materialized == set(target_partitions)
@pytest.mark.skip("Occasionally hangs indefinitely in CI due to threading deadlock")
def test_threaded_submit_backfill(
instance: DagsterInstance,
workspace_context: WorkspaceProcessContext,
remote_repo: RemoteRepository,
):
job_def = the_repo.get_job("multi_asset_job")
assert job_def
partition_set_name = f"{job_def.name}_partition_set"
partition_set = remote_repo.get_partition_set(partition_set_name)
backfill = PartitionBackfill(
backfill_id="backfill_with_asset_selection",
partition_set_origin=partition_set.get_remote_origin(),
status=BulkActionStatus.REQUESTED,
partition_names=["one", "two", "three"],
from_failure=False,
reexecution_steps=None,
tags=None,
backfill_timestamp=get_current_timestamp(),
)
assert not backfill.is_asset_backfill
instance.add_backfill(backfill)
assert instance.get_runs_count() == 0
with ThreadPoolExecutor(3) as submit_threadpool_executor:
list(
execute_backfill_iteration(
workspace_context,
get_default_daemon_logger("BackfillDaemon"),
threadpool_executor=None,
submit_threadpool_executor=submit_threadpool_executor,
)
)
assert instance.get_runs_count() == 3
runs = instance.get_runs()
partitions = {run.tags[PARTITION_NAME_TAG] for run in runs}
assert partitions == {"one", "two", "three"}
| BpSingleRunConfig |
python | dask__distributed | distributed/worker_state_machine.py | {
"start": 141212,
"end": 142129
} | class ____:
name: str
target: str | None
def __init__(self, target: str | None = None):
self.target = target
def __set_name__(self, owner: type, name: str) -> None:
self.name = name
def _warn_deprecated(self) -> None:
warnings.warn(
f"The `Worker.{self.name}` attribute has been moved to "
f"`Worker.state.{self.target or self.name}`",
FutureWarning,
)
def __get__(self, instance: Worker | None, owner: type[Worker]) -> Any:
if instance is None:
# This is triggered by Sphinx
return None # pragma: nocover
self._warn_deprecated()
return getattr(instance.state, self.target or self.name)
def __set__(self, instance: Worker, value: Any) -> None:
self._warn_deprecated()
setattr(instance.state, self.target or self.name, value)
| DeprecatedWorkerStateAttribute |
python | ansible__ansible | test/lib/ansible_test/_internal/core_ci.py | {
"start": 724,
"end": 1435
} | class ____(metaclass=abc.ABCMeta):
"""Base class for Ansible Core CI resources."""
@abc.abstractmethod
def as_tuple(self) -> tuple[str, str, str, str]:
"""Return the resource as a tuple of platform, version, architecture and provider."""
@abc.abstractmethod
def get_label(self) -> str:
"""Return a user-friendly label for this resource."""
@property
@abc.abstractmethod
def persist(self) -> bool:
"""True if the resource is persistent, otherwise false."""
@abc.abstractmethod
def get_config(self, core_ci: AnsibleCoreCI) -> dict[str, object]:
"""Return the configuration for this resource."""
@dataclasses.dataclass(frozen=True)
| Resource |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/compiler.py | {
"start": 22497,
"end": 28970
} | class ____:
"""Represent a compiled SQL or DDL expression.
The ``__str__`` method of the ``Compiled`` object should produce
the actual text of the statement. ``Compiled`` objects are
specific to their underlying database dialect, and also may
or may not be specific to the columns referenced within a
particular set of bind parameters. In no case should the
``Compiled`` object be dependent on the actual values of those
bind parameters, even though it may reference those values as
defaults.
"""
statement: Optional[ClauseElement] = None
"The statement to compile."
string: str = ""
"The string representation of the ``statement``"
state: CompilerState
"""description of the compiler's state"""
is_sql = False
is_ddl = False
_cached_metadata: Optional[CursorResultMetaData] = None
_result_columns: Optional[List[ResultColumnsEntry]] = None
schema_translate_map: Optional[SchemaTranslateMapType] = None
execution_options: _ExecuteOptions = util.EMPTY_DICT
"""
Execution options propagated from the statement. In some cases,
sub-elements of the statement can modify these.
"""
preparer: IdentifierPreparer
_annotations: _AnnotationDict = util.EMPTY_DICT
compile_state: Optional[CompileState] = None
"""Optional :class:`.CompileState` object that maintains additional
state used by the compiler.
Major executable objects such as :class:`_expression.Insert`,
:class:`_expression.Update`, :class:`_expression.Delete`,
:class:`_expression.Select` will generate this
state when compiled in order to calculate additional information about the
object. For the top level object that is to be executed, the state can be
stored here where it can also have applicability towards result set
processing.
.. versionadded:: 1.4
"""
dml_compile_state: Optional[CompileState] = None
"""Optional :class:`.CompileState` assigned at the same point that
.isinsert, .isupdate, or .isdelete is assigned.
This will normally be the same object as .compile_state, with the
exception of cases like the :class:`.ORMFromStatementCompileState`
object.
.. versionadded:: 1.4.40
"""
cache_key: Optional[CacheKey] = None
"""The :class:`.CacheKey` that was generated ahead of creating this
:class:`.Compiled` object.
This is used for routines that need access to the original
:class:`.CacheKey` instance generated when the :class:`.Compiled`
instance was first cached, typically in order to reconcile
the original list of :class:`.BindParameter` objects with a
per-statement list that's generated on each call.
"""
_gen_time: float
"""Generation time of this :class:`.Compiled`, used for reporting
cache stats."""
def __init__(
self,
dialect: Dialect,
statement: Optional[ClauseElement],
schema_translate_map: Optional[SchemaTranslateMapType] = None,
render_schema_translate: bool = False,
compile_kwargs: Mapping[str, Any] = util.immutabledict(),
):
"""Construct a new :class:`.Compiled` object.
:param dialect: :class:`.Dialect` to compile against.
:param statement: :class:`_expression.ClauseElement` to be compiled.
:param schema_translate_map: dictionary of schema names to be
translated when forming the resultant SQL
.. seealso::
:ref:`schema_translating`
:param compile_kwargs: additional kwargs that will be
passed to the initial call to :meth:`.Compiled.process`.
"""
self.dialect = dialect
self.preparer = self.dialect.identifier_preparer
if schema_translate_map:
self.schema_translate_map = schema_translate_map
self.preparer = self.preparer._with_schema_translate(
schema_translate_map
)
if statement is not None:
self.state = CompilerState.COMPILING
self.statement = statement
self.can_execute = statement.supports_execution
self._annotations = statement._annotations
if self.can_execute:
if TYPE_CHECKING:
assert isinstance(statement, Executable)
self.execution_options = statement._execution_options
self.string = self.process(self.statement, **compile_kwargs)
if render_schema_translate:
assert schema_translate_map is not None
self.string = self.preparer._render_schema_translates(
self.string, schema_translate_map
)
self.state = CompilerState.STRING_APPLIED
else:
self.state = CompilerState.NO_STATEMENT
self._gen_time = perf_counter()
def __init_subclass__(cls) -> None:
cls._init_compiler_cls()
return super().__init_subclass__()
@classmethod
def _init_compiler_cls(cls):
pass
def visit_unsupported_compilation(self, element, err, **kw):
raise exc.UnsupportedCompilationError(self, type(element)) from err
@property
def sql_compiler(self) -> SQLCompiler:
"""Return a Compiled that is capable of processing SQL expressions.
If this compiler is one, it would likely just return 'self'.
"""
raise NotImplementedError()
def process(self, obj: Visitable, **kwargs: Any) -> str:
return obj._compiler_dispatch(self, **kwargs)
def __str__(self) -> str:
"""Return the string text of the generated SQL or DDL."""
if self.state is CompilerState.STRING_APPLIED:
return self.string
else:
return ""
def construct_params(
self,
params: Optional[_CoreSingleExecuteParams] = None,
extracted_parameters: Optional[Sequence[BindParameter[Any]]] = None,
escape_names: bool = True,
) -> Optional[_MutableCoreSingleExecuteParams]:
"""Return the bind params for this compiled object.
:param params: a dict of string/object pairs whose values will
override bind values compiled in to the
statement.
"""
raise NotImplementedError()
@property
def params(self):
"""Return the bind params for this compiled object."""
return self.construct_params()
| Compiled |
python | joke2k__faker | tests/providers/test_company.py | {
"start": 3429,
"end": 4058
} | class ____:
"""Test fi_FI company provider methods"""
def _has_valid_checksum(self, company_id):
factors = [7, 9, 10, 5, 8, 4, 2]
checksum = 0
for x, y in zip(company_id[:-2], factors):
checksum += int(x) * y
checksum %= 11
checksum = 11 - checksum if checksum else 0
return int(company_id[-1]) == checksum
def test_company_business_id(self, faker, num_samples):
for _ in range(num_samples):
company_id = faker.company_business_id()
assert len(company_id) == 9
assert self._has_valid_checksum(company_id)
| TestFiFi |
python | django__django | tests/forms_tests/field_tests/test_filefield.py | {
"start": 407,
"end": 4608
} | class ____(SimpleTestCase):
def test_filefield_1(self):
f = FileField()
with self.assertRaisesMessage(ValidationError, "'This field is required.'"):
f.clean("")
with self.assertRaisesMessage(ValidationError, "'This field is required.'"):
f.clean("", "")
self.assertEqual("files/test1.pdf", f.clean("", "files/test1.pdf"))
with self.assertRaisesMessage(ValidationError, "'This field is required.'"):
f.clean(None)
with self.assertRaisesMessage(ValidationError, "'This field is required.'"):
f.clean(None, "")
self.assertEqual("files/test2.pdf", f.clean(None, "files/test2.pdf"))
no_file_msg = "'No file was submitted. Check the encoding type on the form.'"
file = SimpleUploadedFile(None, b"")
file._name = ""
with self.assertRaisesMessage(ValidationError, no_file_msg):
f.clean(file)
with self.assertRaisesMessage(ValidationError, no_file_msg):
f.clean(file, "")
self.assertEqual("files/test3.pdf", f.clean(None, "files/test3.pdf"))
with self.assertRaisesMessage(ValidationError, no_file_msg):
f.clean("some content that is not a file")
with self.assertRaisesMessage(
ValidationError, "'The submitted file is empty.'"
):
f.clean(SimpleUploadedFile("name", None))
with self.assertRaisesMessage(
ValidationError, "'The submitted file is empty.'"
):
f.clean(SimpleUploadedFile("name", b""))
self.assertEqual(
SimpleUploadedFile,
type(f.clean(SimpleUploadedFile("name", b"Some File Content"))),
)
self.assertIsInstance(
f.clean(
SimpleUploadedFile(
"我隻氣墊船裝滿晒鱔.txt",
"मेरी मँडराने वाली नाव सर्पमीनों से भरी ह".encode(),
)
),
SimpleUploadedFile,
)
self.assertIsInstance(
f.clean(
SimpleUploadedFile("name", b"Some File Content"), "files/test4.pdf"
),
SimpleUploadedFile,
)
def test_filefield_2(self):
f = FileField(max_length=5)
with self.assertRaisesMessage(
ValidationError,
"'Ensure this filename has at most 5 characters (it has 18).'",
):
f.clean(SimpleUploadedFile("test_maxlength.txt", b"hello world"))
self.assertEqual("files/test1.pdf", f.clean("", "files/test1.pdf"))
self.assertEqual("files/test2.pdf", f.clean(None, "files/test2.pdf"))
self.assertIsInstance(
f.clean(SimpleUploadedFile("name", b"Some File Content")),
SimpleUploadedFile,
)
def test_filefield_3(self):
f = FileField(allow_empty_file=True)
self.assertIsInstance(
f.clean(SimpleUploadedFile("name", b"")), SimpleUploadedFile
)
def test_filefield_changed(self):
"""
The value of data will more than likely come from request.FILES. The
value of initial data will likely be a filename stored in the database.
Since its value is of no use to a FileField it is ignored.
"""
f = FileField()
# No file was uploaded and no initial data.
self.assertFalse(f.has_changed("", None))
# A file was uploaded and no initial data.
self.assertTrue(
f.has_changed("", {"filename": "resume.txt", "content": "My resume"})
)
# A file was not uploaded, but there is initial data
self.assertFalse(f.has_changed("resume.txt", None))
# A file was uploaded and there is initial data (file identity is not
# dealt with here)
self.assertTrue(
f.has_changed(
"resume.txt", {"filename": "resume.txt", "content": "My resume"}
)
)
def test_disabled_has_changed(self):
f = FileField(disabled=True)
self.assertIs(f.has_changed("x", "y"), False)
def test_file_picklable(self):
self.assertIsInstance(pickle.loads(pickle.dumps(FileField())), FileField)
| FileFieldTest |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeVarDefault2.py | {
"start": 1077,
"end": 1106
} | class ____[T = P1]: ...
| ClassT9 |
python | pytorch__pytorch | torch/sparse/semi_structured.py | {
"start": 22196,
"end": 28722
} | class ____(SparseSemiStructuredTensor):
"""
The cuSPARSELt backend expects the specified elements and the metadata to be stored in a single tensor:
packed = [ specified elements of original tensor | metadata ]
For an original tensor of size (m, k) we expect the first m * k // 2 elements to be the kept elements
The rest of the tensor is metadata. Since there is only one tensor, we only use the packed and packed_t
attributes respectively.
cuSPARSELt also supports transposition fusion, which is necessary for performant 2:4 sparse training, as well
as specifying alg_id, a config that affects the performance of the matmul depending on matmul sizes.
"""
BACKEND = "cusparselt"
_DTYPE_SHAPE_CONSTRAINTS = {
torch.float8_e4m3fn: _SEMI_STRUCTURED_SPARSE_CONFIG(32, 32, 16, 16),
torch.int8: _SEMI_STRUCTURED_SPARSE_CONFIG(32, 32, 16, 16),
torch.float16: _SEMI_STRUCTURED_SPARSE_CONFIG(16, 16, 8, 8),
torch.bfloat16: _SEMI_STRUCTURED_SPARSE_CONFIG(16, 16, 8, 8),
}
@classmethod
def from_dense(
cls, original_tensor: torch.Tensor
) -> "SparseSemiStructuredTensorCUSPARSELT":
cls._validate_device_dim_dtype_shape(original_tensor)
# pyrefly: ignore [no-matching-overload]
return cls(
shape=original_tensor.shape,
packed=torch._cslt_compress(original_tensor),
meta=None,
packed_t=None,
meta_t=None,
compressed_swizzled_bitmask=None,
fuse_transpose_cusparselt=SparseSemiStructuredTensor._FUSE_TRANSPOSE,
alg_id_cusparselt=SparseSemiStructuredTensor._DEFAULT_ALG_ID,
requires_grad=original_tensor.requires_grad,
)
@classmethod
def prune_dense_static_sort(
cls, original_tensor: torch.Tensor, algorithm=""
) -> "SparseSemiStructuredTensor":
"""
This function does the same thing as described in SparseSemiStructuredCUTLASS, but uses the cuSPARSELt metadata
layout and sparse matmul.
The only functional difference is that cuSPARSELt stores `metadata` and `packed` together into a single tensor.
[9 1 7 4] [9 0 7 0]
[1 2 3 0] [0 2 0 0]
[8 3 5 4] -> prune 4x4 tile -> [8 0 0 4] -> pack to cuSPARSELT semi-structured -> packed
[1 2 6 2] [0 0 6 2]
-> pack to transposed cuSPARSELt -> packed_t
semi-structured representation
-> compute swizzled bitmask -> compressed_swizzled_bitmask
The equivalent PyTorch code to create the same three outputs from the dense tensor can be found below:
```
from torch.sparse import SparseSemiStructuredTensorCUSPARSELT
from torch.sparse._semi_structured_conversions import (
_sparse_semi_structured_tile,
_compute_compressed_swizzled_bitmask,
)
pruned = _sparse_semi_structured_tile(dense)
packed_cusparselt = torch._cslt_compress(pruned)
packed_t_cusparselt = torch._cslt_compress(pruned.t().contiguous())
bitmask = _compute_compressed_swizzled_bitmask(pruned)
SparseSemiStructuredTensorCUSPARSELT(
dense.shape, packed_cutlass, None, packed_t_cutlass, None, bitmask
)
```
"""
(
packed,
meta,
packed_t,
meta_t,
compressed_swizzled_bitmask,
) = torch._sparse_semi_structured_tile(
original_tensor, algorithm=algorithm, use_cutlass=False
)
# Map this two 2-dim view of packed data.
# TODO: is this proper cuSPARSELt metadata?
packed = packed.view(original_tensor.shape[0], -1)
packed_t = packed_t.view(original_tensor.shape[1], -1)
# pyrefly: ignore [no-matching-overload]
return cls(
original_tensor.shape,
packed=packed,
meta=meta,
packed_t=packed_t,
meta_t=meta_t,
compressed_swizzled_bitmask=compressed_swizzled_bitmask,
requires_grad=False,
)
def _mm(
self, B: torch.Tensor, *, bias: torch.Tensor | None = None, **kwargs
) -> torch.Tensor:
if isinstance(B, SparseSemiStructuredTensor):
raise ValueError(
"`SparseSemiStructuredTensor @ SparseSemiStructuredTensor` is not supported by the hardware"
)
if self.ndim != 2 or B.ndim != 2:
raise NotImplementedError(
f"`{self.__class__.__name__}` matmul: Broadcasting is not implemented"
)
if B.dtype != self.dtype:
raise NotImplementedError(
f"`{self.__class__.__name__}` matmul: trying to do `A={tuple(self.shape)} @ B={tuple(B.shape)}`, "
f"with A.dtype={self.dtype} and B.dtype={B.dtype}. "
"This operation is only supported when A and B have the same data type."
)
if bias is not None and bias.dtype != self.dtype:
raise NotImplementedError(
f"`{self.__class__.__name__}` matmul: trying to do `A={tuple(self.shape)} @ B={tuple(B.shape)} + C`, "
f"with A.dtype=B.dtype={self.dtype} and C.dtype={B.dtype}. "
"This operation is only supported when A, B and C have the same data type."
)
# Force fp8 mm to error to be consistent with torch
if self.dtype == torch.float8_e4m3fn:
raise NotImplementedError(
f"`{self.__class__.__name__}` matmul: trying to do `A={tuple(self.shape)} @ B={tuple(B.shape)}`, "
f"with A.dtype=B.dtype={self.dtype}. "
"mm is not supported for float8_e4m3fn, please use `torch._scaled_mm` instead."
)
if self.packed is None:
raise NotImplementedError(
f"`{self.__class__.__name__}` matmul: operation is not supported"
)
else:
res = torch._cslt_sparse_mm(
self.packed,
B,
bias=bias,
transpose_result=self.fuse_transpose_cusparselt,
alg_id=self.alg_id_cusparselt,
)
return res.t() if self.fuse_transpose_cusparselt else res
| SparseSemiStructuredTensorCUSPARSELT |
python | great-expectations__great_expectations | great_expectations/types/colors.py | {
"start": 2911,
"end": 4140
} | class ____(Enum):
CATEGORY_5 = [
PrimaryColors.ORANGE,
SecondaryColors.ROYAL_BLUE,
SecondaryColors.TURQUOISE_BLUE,
SecondaryColors.LEAF_GREEN,
SecondaryColors.LAVENDER_PURPLE,
]
CATEGORY_7 = [
PrimaryColors.ORANGE,
SecondaryColors.ROYAL_BLUE,
SecondaryColors.TURQUOISE_BLUE,
SecondaryColors.LEAF_GREEN,
SecondaryColors.GOLD_YELLOW,
SecondaryColors.POMEGRANATE_PINK,
SecondaryColors.LAVENDER_PURPLE,
]
DIVERGING_7 = [
TintsAndShades.GREEN_50,
TintsAndShades.GREEN_30,
TintsAndShades.GREEN_10,
"#f1f1f1",
TintsAndShades.PINK_10,
TintsAndShades.PINK_30,
TintsAndShades.PINK_50,
]
HEATMAP_6 = [
TintsAndShades.TURQUOISE_BLUE_10,
TintsAndShades.TURQUOISE_BLUE_30,
TintsAndShades.TURQUOISE_BLUE_50,
TintsAndShades.MIDNIGHT_BLUE_50,
TintsAndShades.MIDNIGHT_BLUE_70,
TintsAndShades.MIDNIGHT_BLUE_90,
]
ORDINAL_5 = [
TintsAndShades.ORANGE_90,
TintsAndShades.ORANGE_70,
TintsAndShades.ORANGE_50,
TintsAndShades.YELLOW_50,
TintsAndShades.YELLOW_30,
]
| ColorPalettes |
python | numba__numba | numba/core/typing/cmathdecl.py | {
"start": 564,
"end": 728
} | class ____(ConcreteTemplate):
cases = [signature(tp, tp) for tp in sorted(types.complex_domain)]
@infer_global(cmath.isinf)
@infer_global(cmath.isnan)
| CMath_unary |
python | kamyu104__LeetCode-Solutions | Python/random-flip-matrix.py | {
"start": 118,
"end": 845
} | class ____(object):
def __init__(self, n_rows, n_cols):
"""
:type n_rows: int
:type n_cols: int
"""
self.__n_rows = n_rows
self.__n_cols = n_cols
self.__n = n_rows*n_cols
self.__lookup = {}
def flip(self):
"""
:rtype: List[int]
"""
self.__n -= 1
target = random.randint(0, self.__n)
x = self.__lookup.get(target, target)
self.__lookup[target] = self.__lookup.get(self.__n, self.__n)
return divmod(x, self.__n_cols)
def reset(self):
"""
:rtype: void
"""
self.__n = self.__n_rows*self.__n_cols
self.__lookup = {}
| Solution |
python | huggingface__transformers | src/transformers/models/gemma3n/processing_gemma3n.py | {
"start": 1084,
"end": 7134
} | class ____(ProcessorMixin):
"""
A processor for Gemma 3n, wrapping the full capabilities of a feature extractor, image processor, and tokenizer
into a single processor.
Args:
feature_extractor (`Gemma3nAudioFeatureExtractor`):
Feature extractor that converts raw audio waveforms into MEL spectrograms for the audio encoder. This
should return a `BatchFeature` with `input_features` and `input_features_mask` features.
image_processor (`SiglipImageProcessorFast`):
Image processor that prepares batches of images for the vision encoder. This should return a `BatchFeature`
with a `pixel_values` feature.
tokenizer (`GemmaTokenizerFast`):
The text tokenizer for the model.
chat_template (`string`, *optional*):
A Jinja template for generating text prompts from a set of messages.
audio_seq_length (int, *optional*, defaults to 188):
The number of audio soft tokens that will be added to the text prompt
image_seq_length (int, *optional*, defaults to 256):
The number of image soft tokens that should be added to
"""
def __init__(
self,
feature_extractor,
image_processor,
tokenizer,
chat_template=None,
audio_seq_length: int = 188,
image_seq_length: int = 256,
**kwargs,
):
self.audio_seq_length = audio_seq_length
self.audio_token_id = tokenizer.audio_token_id
self.boa_token = tokenizer.boa_token
self.audio_token = tokenizer.audio_token
audio_tokens_expanded = "".join([tokenizer.audio_token] * audio_seq_length)
self.full_audio_sequence = f"\n\n{tokenizer.boa_token}{audio_tokens_expanded}{tokenizer.eoa_token}\n\n"
self.image_seq_length = image_seq_length
self.image_token_id = tokenizer.image_token_id
self.boi_token = tokenizer.boi_token
self.image_token = tokenizer.image_token
image_tokens_expanded = "".join([tokenizer.image_token] * image_seq_length)
self.full_image_sequence = f"\n\n{tokenizer.boi_token}{image_tokens_expanded}{tokenizer.eoi_token}\n\n"
super().__init__(
feature_extractor=feature_extractor,
image_processor=image_processor,
tokenizer=tokenizer,
chat_template=chat_template,
**kwargs,
)
def __call__(
self,
images: Optional[ImageInput] = None,
text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]] = None,
audio: Optional[Union[np.ndarray, list[float], list[np.ndarray], list[list[float]]]] = None,
**kwargs: Unpack[Gemma3nProcessorKwargs],
) -> BatchFeature:
if text is None and images is None and audio is None:
raise ValueError("Provide at least one of `text`, `images`, or `audio`.")
output_kwargs = self._merge_kwargs(
Gemma3nProcessorKwargs,
tokenizer_init_kwargs=self.tokenizer.init_kwargs,
**kwargs,
)
if isinstance(text, str):
text = [text]
elif not isinstance(text, list) and not isinstance(text[0], str):
raise TypeError("Invalid input text. Please provide a string, or a list of strings")
if audio is not None:
audio_inputs = self.feature_extractor(audio, **output_kwargs["audio_kwargs"])
if not text:
text = [self.audio_token for _ in audio]
# Expand placeholder audio tokens to the full audio token sequence
text = [prompt.replace(self.audio_token, self.full_audio_sequence) for prompt in text]
else:
audio_inputs = {}
if images is not None:
images = self.image_processor.fetch_images(images)
batched_images = make_nested_list_of_images(images)
image_inputs = self.image_processor(batched_images, **output_kwargs["images_kwargs"])
# Create empty text to be replaced with placeholders
if not text:
text = [" ".join([self.image_token] * len(images)) for images in batched_images]
if len(batched_images) != len(text):
raise ValueError(
f"Received inconsistently sized batches of images ({len(batched_images)}) and text ({len(text)})."
)
# Expand placeholder image tokens to the full image token sequence
text = [prompt.replace(self.image_token, self.full_image_sequence) for prompt in text]
else:
image_inputs = {}
return_tensors = output_kwargs["text_kwargs"].pop("return_tensors", None)
text_inputs = self.tokenizer(text=text, **output_kwargs["text_kwargs"], return_tensors="np")
self._check_special_mm_tokens(text, text_inputs, modalities=["image"])
# Add token type ids manually, as tokenizer can't do arbitrary position token types
array_ids = text_inputs["input_ids"]
token_type_ids = np.zeros_like(array_ids)
token_type_ids[array_ids == self.image_token_id] = 1
token_type_ids[array_ids == self.audio_token_id] = 3
text_inputs = {k: v.tolist() for k, v in text_inputs.items()} # in case user requested list inputs
text_inputs["token_type_ids"] = token_type_ids.tolist()
return BatchFeature(data={**text_inputs, **image_inputs, **audio_inputs}, tensor_type=return_tensors)
@property
def model_input_names(self):
tokenizer_input_names = self.tokenizer.model_input_names + ["token_type_ids"]
image_processor_input_names = self.image_processor.model_input_names
audio_processor_input_names = self.feature_extractor.model_input_names
image_processor_input_names = [name for name in image_processor_input_names if name != "num_crops"]
return list(tokenizer_input_names + image_processor_input_names + audio_processor_input_names)
__all__ = ["Gemma3nProcessor"]
| Gemma3nProcessor |
python | jmcnamara__XlsxWriter | xlsxwriter/test/worksheet/test_worksheet01.py | {
"start": 345,
"end": 1537
} | class ____(unittest.TestCase):
"""
Test assembling a complete Worksheet file.
"""
def test_assemble_xml_file(self):
"""Test writing a worksheet with no cell data."""
self.maxDiff = None
fh = StringIO()
worksheet = Worksheet()
worksheet._set_filehandle(fh)
worksheet.select()
worksheet._assemble_xml_file()
exp = _xml_to_list(
"""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships">
<dimension ref="A1"/>
<sheetViews>
<sheetView tabSelected="1" workbookViewId="0"/>
</sheetViews>
<sheetFormatPr defaultRowHeight="15"/>
<sheetData/>
<pageMargins left="0.7" right="0.7" top="0.75" bottom="0.75" header="0.3" footer="0.3"/>
</worksheet>
"""
)
got = _xml_to_list(fh.getvalue())
self.assertEqual(exp, got)
| TestAssembleWorksheet |
python | doocs__leetcode | solution/0500-0599/0529.Minesweeper/Solution.py | {
"start": 0,
"end": 854
} | class ____:
def updateBoard(self, board: List[List[str]], click: List[int]) -> List[List[str]]:
def dfs(i: int, j: int):
cnt = 0
for x in range(i - 1, i + 2):
for y in range(j - 1, j + 2):
if 0 <= x < m and 0 <= y < n and board[x][y] == "M":
cnt += 1
if cnt:
board[i][j] = str(cnt)
else:
board[i][j] = "B"
for x in range(i - 1, i + 2):
for y in range(j - 1, j + 2):
if 0 <= x < m and 0 <= y < n and board[x][y] == "E":
dfs(x, y)
m, n = len(board), len(board[0])
i, j = click
if board[i][j] == "M":
board[i][j] = "X"
else:
dfs(i, j)
return board
| Solution |
python | html5lib__html5lib-python | html5lib/tests/test_stream.py | {
"start": 2101,
"end": 10561
} | class ____(HTMLBinaryInputStream):
_defaultChunkSize = 2
def test_char_ascii():
stream = HTMLInputStream(b"'", override_encoding='ascii')
assert stream.charEncoding[0].name == 'windows-1252'
assert stream.char() == "'"
def test_char_utf8():
stream = HTMLInputStream('\u2018'.encode('utf-8'), override_encoding='utf-8')
assert stream.charEncoding[0].name == 'utf-8'
assert stream.char() == '\u2018'
def test_char_win1252():
stream = HTMLInputStream("\xa9\xf1\u2019".encode('windows-1252'))
assert stream.charEncoding[0].name == 'windows-1252'
assert stream.char() == "\xa9"
assert stream.char() == "\xf1"
assert stream.char() == "\u2019"
def test_bom():
stream = HTMLInputStream(codecs.BOM_UTF8 + b"'")
assert stream.charEncoding[0].name == 'utf-8'
assert stream.char() == "'"
def test_utf_16():
stream = HTMLInputStream((' ' * 1025).encode('utf-16'))
assert stream.charEncoding[0].name in ['utf-16le', 'utf-16be']
assert len(stream.charsUntil(' ', True)) == 1025
def test_newlines():
stream = HTMLBinaryInputStreamShortChunk(codecs.BOM_UTF8 + b"a\nbb\r\nccc\rddddxe")
assert stream.position() == (1, 0)
assert stream.charsUntil('c') == "a\nbb\n"
assert stream.position() == (3, 0)
assert stream.charsUntil('x') == "ccc\ndddd"
assert stream.position() == (4, 4)
assert stream.charsUntil('e') == "x"
assert stream.position() == (4, 5)
def test_newlines2():
size = HTMLUnicodeInputStream._defaultChunkSize
stream = HTMLInputStream("\r" * size + "\n")
assert stream.charsUntil('x') == "\n" * size
def test_position():
stream = HTMLBinaryInputStreamShortChunk(codecs.BOM_UTF8 + b"a\nbb\nccc\nddde\nf\ngh")
assert stream.position() == (1, 0)
assert stream.charsUntil('c') == "a\nbb\n"
assert stream.position() == (3, 0)
stream.unget("\n")
assert stream.position() == (2, 2)
assert stream.charsUntil('c') == "\n"
assert stream.position() == (3, 0)
stream.unget("\n")
assert stream.position() == (2, 2)
assert stream.char() == "\n"
assert stream.position() == (3, 0)
assert stream.charsUntil('e') == "ccc\nddd"
assert stream.position() == (4, 3)
assert stream.charsUntil('h') == "e\nf\ng"
assert stream.position() == (6, 1)
def test_position2():
stream = HTMLUnicodeInputStreamShortChunk("abc\nd")
assert stream.position() == (1, 0)
assert stream.char() == "a"
assert stream.position() == (1, 1)
assert stream.char() == "b"
assert stream.position() == (1, 2)
assert stream.char() == "c"
assert stream.position() == (1, 3)
assert stream.char() == "\n"
assert stream.position() == (2, 0)
assert stream.char() == "d"
assert stream.position() == (2, 1)
def test_python_issue_20007():
"""
Make sure we have a work-around for Python bug #20007
http://bugs.python.org/issue20007
"""
class FakeSocket(object):
def makefile(self, _mode, _bufsize=None):
# pylint:disable=unused-argument
return BytesIO(b"HTTP/1.1 200 Ok\r\n\r\nText")
source = http_client.HTTPResponse(FakeSocket())
source.begin()
stream = HTMLInputStream(source)
assert stream.charsUntil(" ") == "Text"
def test_python_issue_20007_b():
"""
Make sure we have a work-around for Python bug #20007
http://bugs.python.org/issue20007
"""
if six.PY2:
return
class FakeSocket(object):
def makefile(self, _mode, _bufsize=None):
# pylint:disable=unused-argument
return BytesIO(b"HTTP/1.1 200 Ok\r\n\r\nText")
source = http_client.HTTPResponse(FakeSocket())
source.begin()
wrapped = urllib.response.addinfourl(source, source.msg, "http://example.com")
stream = HTMLInputStream(wrapped)
assert stream.charsUntil(" ") == "Text"
@pytest.mark.parametrize("inp,num",
[("\u0000", 0),
("\u0001", 1),
("\u0008", 1),
("\u0009", 0),
("\u000A", 0),
("\u000B", 1),
("\u000C", 0),
("\u000D", 0),
("\u000E", 1),
("\u001F", 1),
("\u0020", 0),
("\u007E", 0),
("\u007F", 1),
("\u009F", 1),
("\u00A0", 0),
("\uFDCF", 0),
("\uFDD0", 1),
("\uFDEF", 1),
("\uFDF0", 0),
("\uFFFD", 0),
("\uFFFE", 1),
("\uFFFF", 1),
("\U0001FFFD", 0),
("\U0001FFFE", 1),
("\U0001FFFF", 1),
("\U0002FFFD", 0),
("\U0002FFFE", 1),
("\U0002FFFF", 1),
("\U0003FFFD", 0),
("\U0003FFFE", 1),
("\U0003FFFF", 1),
("\U0004FFFD", 0),
("\U0004FFFE", 1),
("\U0004FFFF", 1),
("\U0005FFFD", 0),
("\U0005FFFE", 1),
("\U0005FFFF", 1),
("\U0006FFFD", 0),
("\U0006FFFE", 1),
("\U0006FFFF", 1),
("\U0007FFFD", 0),
("\U0007FFFE", 1),
("\U0007FFFF", 1),
("\U0008FFFD", 0),
("\U0008FFFE", 1),
("\U0008FFFF", 1),
("\U0009FFFD", 0),
("\U0009FFFE", 1),
("\U0009FFFF", 1),
("\U000AFFFD", 0),
("\U000AFFFE", 1),
("\U000AFFFF", 1),
("\U000BFFFD", 0),
("\U000BFFFE", 1),
("\U000BFFFF", 1),
("\U000CFFFD", 0),
("\U000CFFFE", 1),
("\U000CFFFF", 1),
("\U000DFFFD", 0),
("\U000DFFFE", 1),
("\U000DFFFF", 1),
("\U000EFFFD", 0),
("\U000EFFFE", 1),
("\U000EFFFF", 1),
("\U000FFFFD", 0),
("\U000FFFFE", 1),
("\U000FFFFF", 1),
("\U0010FFFD", 0),
("\U0010FFFE", 1),
("\U0010FFFF", 1),
("\x01\x01\x01", 3),
("a\x01a\x01a\x01a", 3)])
def test_invalid_codepoints(inp, num):
stream = HTMLUnicodeInputStream(StringIO(inp))
for _i in range(len(inp)):
stream.char()
assert len(stream.errors) == num
@pytest.mark.skipif(not supports_lone_surrogates, reason="doesn't support lone surrogates")
@pytest.mark.parametrize("inp,num",
[("'\\uD7FF'", 0),
("'\\uD800'", 1),
("'\\uDBFF'", 1),
("'\\uDC00'", 1),
("'\\uDFFF'", 1),
("'\\uE000'", 0),
("'\\uD800\\uD800\\uD800'", 3),
("'a\\uD800a\\uD800a\\uD800a'", 3),
("'\\uDFFF\\uDBFF'", 2),
pytest.param(
"'\\uDBFF\\uDFFF'", 2,
marks=pytest.mark.skipif(
sys.maxunicode == 0xFFFF,
reason="narrow Python"))])
def test_invalid_codepoints_surrogates(inp, num):
inp = eval(inp) # pylint:disable=eval-used
fp = StringIO(inp)
if ord(max(fp.read())) > 0xFFFF:
pytest.skip("StringIO altered string")
fp.seek(0)
stream = HTMLUnicodeInputStream(fp)
for _i in range(len(inp)):
stream.char()
assert len(stream.errors) == num
| HTMLBinaryInputStreamShortChunk |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/hooks/gen_ai.py | {
"start": 1472,
"end": 7983
} | class ____(GoogleBaseHook):
"""Class for Google Cloud Generative AI Vertex AI hook."""
def get_genai_client(self, project_id: str, location: str):
return genai.Client(
vertexai=True,
project=project_id,
location=location,
)
@GoogleBaseHook.fallback_to_default_project_id
def embed_content(
self,
model: str,
location: str,
contents: ContentListUnion | ContentListUnionDict | list[str],
config: EmbedContentConfigOrDict | None = None,
project_id: str = PROVIDE_PROJECT_ID,
) -> EmbedContentResponse:
"""
Generate embeddings for words, phrases, sentences, and code.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param location: Required. The ID of the Google Cloud location that the service belongs to.
:param model: Required. The model to use.
:param contents: Optional. The contents to use for embedding.
:param config: Optional. Configuration for embeddings.
"""
client = self.get_genai_client(project_id=project_id, location=location)
resp = client.models.embed_content(model=model, contents=contents, config=config)
return resp
@GoogleBaseHook.fallback_to_default_project_id
def generate_content(
self,
location: str,
model: str,
contents: ContentListUnionDict,
generation_config: GenerateContentConfig | None = None,
project_id: str = PROVIDE_PROJECT_ID,
) -> str:
"""
Make an API request to generate content using a model.
:param location: Required. The ID of the Google Cloud location that the service belongs to.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param model: Required. The model to use.
:param contents: Required. The multi-part content of a message that a user or a program
gives to the generative model, in order to elicit a specific response.
:param generation_config: Optional. Generation configuration settings.
"""
client = self.get_genai_client(project_id=project_id, location=location)
response = client.models.generate_content(
model=model,
contents=contents,
config=generation_config,
)
return response.text
@GoogleBaseHook.fallback_to_default_project_id
def supervised_fine_tuning_train(
self,
source_model: str,
location: str,
training_dataset: TuningDatasetOrDict,
tuning_job_config: CreateTuningJobConfigOrDict | dict[str, Any] | None = None,
project_id: str = PROVIDE_PROJECT_ID,
) -> TuningJob:
"""
Create a tuning job to adapt model behavior with a labeled dataset.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param location: Required. The ID of the Google Cloud location that the service belongs to.
:param source_model: Required. A pre-trained model optimized for performing natural
language tasks such as classification, summarization, extraction, content
creation, and ideation.
:param train_dataset: Required. Cloud Storage URI of your training dataset. The dataset
must be formatted as a JSONL file. For best results, provide at least 100 to 500 examples.
:param tuning_job_config: Optional. Configuration of the Tuning job to be created.
"""
client = self.get_genai_client(project_id=project_id, location=location)
tuning_job = client.tunings.tune(
base_model=source_model,
training_dataset=training_dataset,
config=tuning_job_config,
)
# Poll until completion
running = {"JOB_STATE_PENDING", "JOB_STATE_RUNNING"}
while tuning_job.state in running:
time.sleep(60)
tuning_job = client.tunings.get(name=tuning_job.name)
return tuning_job
@GoogleBaseHook.fallback_to_default_project_id
def count_tokens(
self,
location: str,
model: str,
contents: ContentListUnion | ContentListUnionDict,
config: CountTokensConfigOrDict | None = None,
project_id: str = PROVIDE_PROJECT_ID,
) -> CountTokensResponse:
"""
Use Count Tokens API to calculate the number of input tokens before sending a request to Gemini API.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param location: Required. The ID of the Google Cloud location that the service belongs to.
:param contents: Required. The multi-part content of a message that a user or a program
gives to the generative model, in order to elicit a specific response.
:param model: Required. Model,
supporting prompts with text-only input, including natural language
tasks, multi-turn text and code chat, and code generation. It can
output text and code.
:param config: Optional. Configuration for Count Tokens.
"""
client = self.get_genai_client(project_id=project_id, location=location)
response = client.models.count_tokens(
model=model,
contents=contents,
config=config,
)
return response
@GoogleBaseHook.fallback_to_default_project_id
def create_cached_content(
self,
model: str,
location: str,
cached_content_config: CreateCachedContentConfigOrDict | None = None,
project_id: str = PROVIDE_PROJECT_ID,
) -> str:
"""
Create CachedContent to reduce the cost of requests containing repeat content.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param location: Required. The ID of the Google Cloud location that the service belongs to.
:param model: Required. The name of the publisher model to use for cached content.
:param cached_content_config: Optional. Configuration of the Cached Content.
"""
client = self.get_genai_client(project_id=project_id, location=location)
resp = client.caches.create(
model=model,
config=cached_content_config,
)
return resp.name
| GenAIGenerativeModelHook |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-google/llama_index/readers/google/maps/base.py | {
"start": 745,
"end": 889
} | class ____(BaseModel):
reviews: List[Review]
address: str
average_rating: float
display_name: str
number_of_ratings: int
| Place |
python | matplotlib__matplotlib | lib/mpl_toolkits/axisartist/floating_axes.py | {
"start": 659,
"end": 3829
} | class ____(grid_helper_curvelinear.FloatingAxisArtistHelper):
def __init__(self, grid_helper, side, nth_coord_ticks=None):
"""
nth_coord = along which coordinate value varies.
nth_coord = 0 -> x axis, nth_coord = 1 -> y axis
"""
lon1, lon2, lat1, lat2 = grid_helper.grid_finder.extreme_finder(*[None] * 5)
value, nth_coord = _api.check_getitem(
dict(left=(lon1, 0), right=(lon2, 0), bottom=(lat1, 1), top=(lat2, 1)),
side=side)
super().__init__(grid_helper, nth_coord, value, axis_direction=side)
if nth_coord_ticks is None:
nth_coord_ticks = nth_coord
self.nth_coord_ticks = nth_coord_ticks
self.value = value
self.grid_helper = grid_helper
self._side = side
def update_lim(self, axes):
self.grid_helper.update_lim(axes)
self._grid_info = self.grid_helper._grid_info
def get_tick_iterators(self, axes):
"""tick_loc, tick_angle, tick_label, (optionally) tick_label"""
grid_finder = self.grid_helper.grid_finder
lat_levs, lat_n, lat_factor = self._grid_info["lat_info"]
yy0 = lat_levs / lat_factor
lon_levs, lon_n, lon_factor = self._grid_info["lon_info"]
xx0 = lon_levs / lon_factor
extremes = self.grid_helper.grid_finder.extreme_finder(*[None] * 5)
xmin, xmax = sorted(extremes[:2])
ymin, ymax = sorted(extremes[2:])
def trf_xy(x, y):
trf = grid_finder.get_transform() + axes.transData
return trf.transform(np.column_stack(np.broadcast_arrays(x, y))).T
if self.nth_coord == 0:
mask = (ymin <= yy0) & (yy0 <= ymax)
(xx1, yy1), angle_normal, angle_tangent = \
grid_helper_curvelinear._value_and_jac_angle(
trf_xy, self.value, yy0[mask], (xmin, xmax), (ymin, ymax))
labels = self._grid_info["lat_labels"]
elif self.nth_coord == 1:
mask = (xmin <= xx0) & (xx0 <= xmax)
(xx1, yy1), angle_tangent, angle_normal = \
grid_helper_curvelinear._value_and_jac_angle(
trf_xy, xx0[mask], self.value, (xmin, xmax), (ymin, ymax))
labels = self._grid_info["lon_labels"]
labels = [l for l, m in zip(labels, mask) if m]
tick_to_axes = self.get_tick_transform(axes) - axes.transAxes
in_01 = functools.partial(
mpl.transforms._interval_contains_close, (0, 1))
def f1():
for x, y, normal, tangent, lab \
in zip(xx1, yy1, angle_normal, angle_tangent, labels):
c2 = tick_to_axes.transform((x, y))
if in_01(c2[0]) and in_01(c2[1]):
yield [x, y], *np.rad2deg([normal, tangent]), lab
return f1(), iter([])
def get_line(self, axes):
self.update_lim(axes)
k, v = dict(left=("lon_lines0", 0),
right=("lon_lines0", 1),
bottom=("lat_lines0", 0),
top=("lat_lines0", 1))[self._side]
return Path(self._grid_info[k][v])
| FixedAxisArtistHelper |
python | scrapy__scrapy | tests/test_downloader_handler_twisted_http11.py | {
"start": 1112,
"end": 1209
} | class ____(HTTP11DownloadHandlerMixin, TestHttpsInvalidDNSIdBase):
pass
| TestHttps11InvalidDNSId |
python | kamyu104__LeetCode-Solutions | Python/shift-distance-between-two-strings.py | {
"start": 48,
"end": 1030
} | class ____(object):
def shiftDistance(self, s, t, nextCost, previousCost):
"""
:type s: str
:type t: str
:type nextCost: List[int]
:type previousCost: List[int]
:rtype: int
"""
prefix1 = [0]*(len(nextCost)+1)
for i in xrange(len(nextCost)):
prefix1[i+1] = prefix1[i]+nextCost[i]
prefix2 = [0]*(len(previousCost)+1)
for i in xrange(len(previousCost)):
prefix2[i+1] = prefix2[i]+previousCost[i]
result = 0
for i in xrange(len(s)):
if s[i] == t[i]:
continue
left = ord(s[i])-ord('a')
right = ord(t[i])-ord('a')
if left <= right:
result += min(prefix1[right]-prefix1[left], prefix2[-1]-(prefix2[right+1]-prefix2[left+1]))
else:
result += min(prefix2[left+1]-prefix2[right+1], prefix1[-1]-(prefix1[left]-prefix1[right]))
return result
| Solution |
python | tensorflow__tensorflow | tensorflow/python/keras/engine/functional.py | {
"start": 56661,
"end": 58445
} | class ____(base_layer.Layer):
"""Wrapper for `tf.Module`s to support the Functional and Sequential API."""
def __init__(self, module, method_name=None, **kwargs):
"""Initializes the wrapper Layer for this module.
Args:
module: The `tf.Module` instance to be wrapped.
method_name: (Optional) str. The name of the method to use as the forward
pass of the module. If not set, defaults to '__call__' if defined, or
'call'.
**kwargs: Additional keywrod arguments. See `tf.keras.layers.Layer`.
Raises:
ValueError: If `method` is not defined on `module`.
"""
super(ModuleWrapper, self).__init__(**kwargs)
if method_name is None:
if hasattr(module, '__call__'):
method_name = '__call__'
elif hasattr(module, 'call'):
method_name = 'call'
if method_name is None or not hasattr(module, method_name):
raise ValueError('{} is not defined on object {}'.format(
method_name, module))
self._module = module
self._method_name = method_name
# Check if module.__call__ has a `training` arg or accepts `**kwargs`.
method = getattr(module, method_name)
method_arg_spec = tf_inspect.getfullargspec(method)
self._expects_training_arg = ('training' in method_arg_spec.args or
method_arg_spec.varkw is not None)
self._expects_mask_arg = ('mask' in method_arg_spec.args or
method_arg_spec.varkw is not None)
def call(self, *args, **kwargs):
if 'training' in kwargs and not self._expects_training_arg:
kwargs.pop('training')
if 'mask' in kwargs and not self._expects_mask_arg:
kwargs.pop('mask')
return getattr(self._module, self._method_name)(*args, **kwargs)
| ModuleWrapper |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/sensor_definition.py | {
"start": 50594,
"end": 62463
} | class ____(IHaveNew):
run_requests: Optional[Sequence[RunRequest]]
skip_message: Optional[str]
cursor: Optional[str]
dagster_run_reactions: Optional[Sequence[DagsterRunReaction]]
log_key: Optional[Sequence[str]]
dynamic_partitions_requests: Optional[
Sequence[Union[AddDynamicPartitionsRequest, DeleteDynamicPartitionsRequest]]
]
asset_events: Sequence[Union[AssetMaterialization, AssetObservation, AssetCheckEvaluation]]
automation_condition_evaluations: Sequence[AutomationConditionEvaluation]
def __new__(
cls,
run_requests: Optional[Sequence[RunRequest]] = None,
skip_message: Optional[str] = None,
cursor: Optional[str] = None,
dagster_run_reactions: Optional[Sequence[DagsterRunReaction]] = None,
log_key: Optional[Sequence[str]] = None,
dynamic_partitions_requests: Optional[
Sequence[Union[AddDynamicPartitionsRequest, DeleteDynamicPartitionsRequest]]
] = None,
asset_events: Optional[
Sequence[Union[AssetMaterialization, AssetObservation, AssetCheckEvaluation]]
] = None,
automation_condition_evaluations: Optional[Sequence[AutomationConditionEvaluation]] = None,
):
check.invariant(
not (run_requests and skip_message), "Found both skip data and run request data"
)
return super().__new__(
cls,
run_requests=run_requests,
skip_message=skip_message,
cursor=cursor,
dagster_run_reactions=dagster_run_reactions,
log_key=log_key,
dynamic_partitions_requests=dynamic_partitions_requests,
asset_events=asset_events or [],
automation_condition_evaluations=automation_condition_evaluations or [],
)
def wrap_sensor_evaluation(
sensor_name: str,
fn: RawSensorEvaluationFunction,
) -> SensorEvaluationFunction:
resource_arg_names: set[str] = {arg.name for arg in get_resource_args(fn)}
def _wrapped_fn(context: SensorEvaluationContext):
resource_args_populated = validate_and_get_resource_dict(
context.resources, sensor_name, resource_arg_names
)
context_param_name_if_present = get_context_param_name(fn)
context_param = (
{context_param_name_if_present: context} if context_param_name_if_present else {}
)
raw_evaluation_result = fn(**context_param, **resource_args_populated)
def check_returned_scalar(scalar):
if isinstance(scalar, (SkipReason, RunRequest, SensorResult)):
return scalar
elif scalar is not None:
raise Exception(
f"Error in sensor {sensor_name}: Sensor unexpectedly returned output "
f"{scalar} of type {type(scalar)}. Should only return SkipReason or "
"RunRequest objects."
)
if inspect.isgenerator(raw_evaluation_result):
result = []
try:
while True:
result.append(next(raw_evaluation_result))
except StopIteration as e:
# captures the case where the evaluation function has a yield and also returns a
# value
if e.value is not None:
result.append(check_returned_scalar(e.value))
return result
elif isinstance(raw_evaluation_result, list):
return raw_evaluation_result
else:
return [check_returned_scalar(raw_evaluation_result)]
return _wrapped_fn
@public
def build_sensor_context(
instance: Optional[DagsterInstance] = None,
cursor: Optional[str] = None,
repository_name: Optional[str] = None,
repository_def: Optional["RepositoryDefinition"] = None,
sensor_name: Optional[str] = None,
resources: Optional[Mapping[str, object]] = None,
definitions: Optional["Definitions"] = None,
instance_ref: Optional["InstanceRef"] = None,
last_sensor_start_time: Optional[float] = None,
) -> SensorEvaluationContext:
"""Builds sensor execution context using the provided parameters.
This function can be used to provide a context to the invocation of a sensor definition.If
provided, the dagster instance must be persistent; DagsterInstance.ephemeral() will result in an
error.
Args:
instance (Optional[DagsterInstance]): The dagster instance configured to run the sensor.
cursor (Optional[str]): A cursor value to provide to the evaluation of the sensor.
repository_name (Optional[str]): The name of the repository that the sensor belongs to.
repository_def (Optional[RepositoryDefinition]): The repository that the sensor belongs to.
If needed by the sensor top-level resource definitions will be pulled from this repository.
You can provide either this or `definitions`.
resources (Optional[Mapping[str, ResourceDefinition]]): A set of resource definitions
to provide to the sensor. If passed, these will override any resource definitions
provided by the repository.
definitions (Optional[Definitions]): `Definitions` object that the sensor is defined in.
If needed by the sensor, top-level resource definitions will be pulled from these
definitions. You can provide either this or `repository_def`.
last_sensor_start_time (Optional[float]): The last time the sensor was started.
Examples:
.. code-block:: python
context = build_sensor_context()
my_sensor(context)
"""
from dagster._core.definitions.definitions_class import Definitions
from dagster._core.definitions.repository_definition import RepositoryDefinition
from dagster._core.execution.build_resources import wrap_resources_for_execution
check.opt_inst_param(instance, "instance", DagsterInstance)
check.opt_str_param(cursor, "cursor")
check.opt_str_param(repository_name, "repository_name")
repository_def = normalize_to_repository(
check.opt_inst_param(definitions, "definitions", Definitions),
check.opt_inst_param(repository_def, "repository_def", RepositoryDefinition),
error_on_none=False,
)
return SensorEvaluationContext(
instance_ref=instance_ref,
last_tick_completion_time=None,
last_run_key=None,
cursor=cursor,
log_key=None,
repository_name=repository_name,
instance=instance,
repository_def=repository_def,
sensor_name=sensor_name,
resources=wrap_resources_for_execution(resources),
last_sensor_start_time=last_sensor_start_time,
)
T = TypeVar("T")
def get_sensor_context_from_args_or_kwargs(
fn: Callable[..., Any],
args: tuple[Any, ...],
kwargs: dict[str, Any],
context_type: type[T],
) -> Optional[T]:
from dagster._config.pythonic_config import is_coercible_to_resource
context_param_name = get_context_param_name(fn)
kwarg_keys_non_resource = set(kwargs.keys()) - {param.name for param in get_resource_args(fn)}
if len(args) + len(kwarg_keys_non_resource) > 1:
raise DagsterInvalidInvocationError(
"Sensor invocation received multiple non-resource arguments. Only a first "
"positional context parameter should be provided when invoking."
)
if any(is_coercible_to_resource(arg) for arg in args):
raise DagsterInvalidInvocationError(
"If directly invoking a sensor, you may not provide resources as"
" positional"
" arguments, only as keyword arguments."
)
context: Optional[T] = None
if len(args) > 0:
context = check.opt_inst(args[0], context_type)
elif len(kwargs) > 0:
if context_param_name and context_param_name not in kwargs:
raise DagsterInvalidInvocationError(
f"Sensor invocation expected argument '{context_param_name}'."
)
context = check.opt_inst(kwargs.get(context_param_name or "context"), context_type)
elif context_param_name:
# If the context parameter is present but no value was provided, we error
raise DagsterInvalidInvocationError(
"Sensor evaluation function expected context argument, but no context argument "
"was provided when invoking."
)
return context
def get_or_create_sensor_context(
fn: Callable[..., Any],
*args: Any,
context_type: type = SensorEvaluationContext,
**kwargs: Any,
) -> SensorEvaluationContext:
"""Based on the passed resource function and the arguments passed to it, returns the
user-passed SensorEvaluationContext or creates one if it is not passed.
Raises an exception if the user passes more than one argument or if the user-provided
function requires a context parameter but none is passed.
"""
context = (
get_sensor_context_from_args_or_kwargs(fn, args, kwargs, context_type)
or build_sensor_context()
)
resource_args_from_kwargs = {}
resource_args = {param.name for param in get_resource_args(fn)}
for resource_arg in resource_args:
if resource_arg in kwargs:
resource_args_from_kwargs[resource_arg] = kwargs[resource_arg]
if resource_args_from_kwargs:
return context.merge_resources(resource_args_from_kwargs)
return context
def _run_requests_with_base_asset_jobs(
run_requests: Iterable[RunRequest],
context: SensorEvaluationContext,
outer_asset_selection: AssetSelection,
) -> Sequence[RunRequest]:
"""For sensors that target asset selections instead of jobs, finds the corresponding base asset
for a selected set of assets.
"""
asset_graph = context.repository_def.asset_graph # type: ignore # (possible none)
result = []
for run_request in run_requests:
if run_request.asset_selection is not None:
asset_keys = run_request.asset_selection
unexpected_asset_keys = (
KeysAssetSelection(selected_keys=asset_keys) - outer_asset_selection
).resolve(asset_graph)
if unexpected_asset_keys:
raise DagsterInvalidSubsetError(
"RunRequest includes asset keys that are not part of sensor's asset_selection:"
f" {unexpected_asset_keys}"
)
else:
asset_keys = outer_asset_selection.resolve(asset_graph)
if run_request.asset_check_keys is not None:
asset_check_keys = run_request.asset_check_keys
unexpected_asset_check_keys = (
AssetCheckKeysSelection(selected_asset_check_keys=asset_check_keys)
- outer_asset_selection
).resolve_checks(asset_graph)
if unexpected_asset_check_keys:
deprecation_warning(
subject="Including asset check keys in a sensor RunRequest that are not a subset of the sensor asset_selection",
breaking_version="1.9.0",
additional_warn_text=f"Unexpected asset check keys: {unexpected_asset_check_keys}.",
)
else:
asset_check_keys = KeysAssetSelection(selected_keys=list(asset_keys)).resolve_checks(
asset_graph
)
base_job = context.repository_def.get_implicit_job_def_for_assets(asset_keys) # type: ignore # (possible none)
result.append(
run_request.with_replaced_attrs(
job_name=base_job.name, # type: ignore # (possible none)
asset_selection=list(asset_keys),
asset_check_keys=list(asset_check_keys),
)
)
return result
| SensorExecutionData |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.