language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | tiangolo__fastapi | docs_src/pydantic_v1_in_v2/tutorial002_an_py310.py | {
"start": 64,
"end": 252
} | class ____(BaseModel):
name: str
description: str | None = None
size: float
app = FastAPI()
@app.post("/items/")
async def create_item(item: Item) -> Item:
return item
| Item |
python | spyder-ide__spyder | spyder/plugins/switcher/widgets/item.py | {
"start": 4479,
"end": 11219
} | class ____(SwitcherBaseItem):
"""
Switcher item with title, description, shortcut and section.
SwitcherItem: [title description <shortcut> section]
Based on HTML delegate.
See: https://doc.qt.io/qt-5/richtext-html-subset.html
"""
_FONT_SIZE = 10
_STYLE_ATTRIBUTES = ['title_color', 'description_color', 'section_color',
'shortcut_color', 'title_font_size',
'description_font_size', 'section_font_size',
'shortcut_font_size']
_TEMPLATE = u'''
<table width="{width}" max_width="{width}" height="{height}"
cellpadding="{padding}">
<tr>
<td valign="middle">
<span style="color:{title_color};font-size:{title_font_size}pt">
{title}
</span>
<em>
<span
style="color:{description_color};font-size:{description_font_size}pt">
<span>{description}</span>
</span>
</em>
</td>
<td valign="middle" align="right" float="right">
<span style="color:{shortcut_color};font-size:{shortcut_font_size}pt">
<code><i>{shortcut}</i></code>
</span>
<span style="color:{section_color};font-size:{section_font_size}pt">
{section}
</span>
</td>
</tr>
</table>'''
def __init__(self, parent=None, icon=None, title=None, description=None,
shortcut=None, section=None, data=None, tool_tip=None,
action_item=False, styles=None, score=-1, use_score=True):
"""Switcher item with title, description, shortcut and section."""
super().__init__(parent=parent, styles=styles, use_score=use_score)
self._title = title if title else ''
self._rich_title = ''
self._shortcut = shortcut if shortcut else ''
self._description = description if description else ''
self._section = section if section else ''
self._icon = icon
self._data = data
self._score = score
self._action_item = action_item
# Section visibility is computed by the setup_sections method of the
# switcher.
self._section_visible = False
# Setup
self.setFlags(Qt.ItemIsEnabled | Qt.ItemIsSelectable)
if icon:
self.setIcon(icon)
# TODO: Change fixed icon size value
self._icon_width = 20
else:
self._icon_width = 0
self._set_styles()
self._set_rendered_text()
# ---- Helpers
def _render_text(self, title=None, description=None, section=None):
"""Render the html template for this item."""
if self._rich_title:
title = self._rich_title
else:
title = title if title else self._title
# TODO: Based on width this should elide/shorten
description = description if description else self._description
if self._section_visible:
section = section if section else self._section
else:
section = ''
padding = self._PADDING
width = int(self._width - self._icon_width)
height = int(self.get_height())
self.setSizeHint(QSize(width, height))
shortcut = '<' + self._shortcut + '>' if self._shortcut else ''
title = str(title)
section = str(section)
description = str(description)
shortcut = str(shortcut)
text = self._TEMPLATE.format(width=width, height=height, title=title,
section=section, description=description,
padding=padding, shortcut=shortcut,
**self._styles)
return text
def _set_styles(self):
"""Set the styles for this item."""
for attr in self._STYLE_ATTRIBUTES:
if attr not in self._styles:
self._styles[attr] = self._STYLES[attr]
def _get_height(self):
"""
Return the expected height of this item's text, including
the text margins.
"""
doc = QTextDocument()
try:
doc.setHtml('<span style="font-size:{}pt">Title</span>'
.format(self._styles['title_font_size']))
except KeyError:
doc.setHtml('<span>Title</span>')
doc.setDocumentMargin(self._PADDING)
return doc.size().height()
# ---- API
def set_icon(self, icon):
"""Set the QIcon for the list item."""
self._icon = icon
self.setIcon(icon)
def get_icon(self):
"""Return the QIcon for the list item."""
return self._icon
def set_title(self, value):
"""Set the main text (title) of the item."""
self._title = value
self._set_rendered_text()
def get_title(self):
"""Return the the main text (title) of the item."""
return self._title
def set_rich_title(self, value):
"""Set the rich title version (filter highlight) of the item."""
self._rich_title = value
self._set_rendered_text()
def get_rich_title(self):
"""Return the rich title version (filter highlight) of the item."""
return self._rich_title
def set_description(self, value):
"""Set the item description text."""
self._description = value
self._set_rendered_text()
def get_description(self):
"""Return the item description text."""
return self._description
def set_shortcut(self, value):
"""Set the shortcut for the item action."""
self._shortcut = value
self._set_rendered_text()
def get_shortcut(self, value):
"""Return the shortcut for the item action."""
return self._shortcut
def set_tooltip(self, value):
"""Set the tooltip text for the item."""
super().setTooltip(value)
def set_data(self, value):
"""Set the additional data associated to the item."""
self._data = value
def get_data(self):
"""Return the additional data associated to the item."""
return self._data
def set_section(self, value):
"""Set the item section name."""
self._section = value
self._set_rendered_text()
def get_section(self):
"""Return the item section name."""
return self._section
def set_section_visible(self, value):
"""Set visibility of the item section."""
self._section_visible = value
self._set_rendered_text()
def set_action_item(self, value):
"""Enable/disable the action type for the item."""
self._action_item = value
self._set_rendered_text()
| SwitcherItem |
python | mlflow__mlflow | mlflow/entities/_mlflow_object.py | {
"start": 950,
"end": 1394
} | class ____:
def __init__(self):
super().__init__()
self.printer = pprint.PrettyPrinter()
def to_string(self, obj):
if isinstance(obj, _MlflowObject):
return f"<{get_classname(obj)}: {self._entity_to_string(obj)}>"
return self.printer.pformat(obj)
def _entity_to_string(self, entity):
return ", ".join([f"{key}={self.to_string(value)}" for key, value in entity])
| _MlflowObjectPrinter |
python | wandb__wandb | wandb/automations/_filters/run_states.py | {
"start": 848,
"end": 2136
} | class ____(GQLBase): # from: RunStateFilter
states: Annotated[
list[ReportedRunState],
BeforeValidator(always_list), # Coerce x -> [x] if passed a single value
]
@property
def event_type(self) -> EventType:
return EventType.RUN_STATE
@field_validator("states", mode="after")
@classmethod
def _dedup_and_order(cls, v: list[ReportedRunState]) -> list[ReportedRunState]:
"""Ensure states are deduplicated and predictably ordered."""
return sorted(set(v))
def __and__(self, other: Any) -> RunStateFilter:
"""Returns `(state_filter & run_filter)` as a `RunStateFilter`."""
from wandb.automations.events import RunStateFilter
if isinstance(run_filter := other, (BaseOp, FilterExpr)):
# Treat `other` as a run filter and build a RunStateFilter. Let the
# metric filter validators wrap or nest as appropriate.
return RunStateFilter(run=run_filter, state=self)
return NotImplemented
def __rand__(self, other: BaseOp | FilterExpr) -> RunStateFilter:
"""Ensures `&` is commutative for run and state filters.
I.e. `(run_filter & state_filter) == (state_filter & run_filter)`.
"""
return self.__and__(other)
| StateFilter |
python | getsentry__sentry | src/sentry/rules/conditions/event_frequency.py | {
"start": 2084,
"end": 2948
} | class ____(GenericCondition):
"""
The base typed dict for all condition data representing EventFrequency issue
alert rule conditions
"""
# Either the count or percentage.
value: int | float
# The interval to compare the value against such as 5m, 1h, 3w, etc.
# e.g. # of issues is more than {value} in {interval}.
interval: str
# NOTE: Some of the earliest COUNT conditions were created without the
# comparisonType field, although modern rules will always have it.
comparisonType: NotRequired[Literal[ComparisonType.COUNT, ComparisonType.PERCENT]]
# The previous interval to compare the curr interval against. This is only
# present in PERCENT conditions.
# e.g. # of issues is 50% higher in {interval} compared to {comparisonInterval}
comparisonInterval: NotRequired[str]
| EventFrequencyConditionData |
python | HypothesisWorks__hypothesis | hypothesis-python/src/hypothesis/extra/django/_impl.py | {
"start": 1433,
"end": 1505
} | class ____(HypothesisTestCase, dt.SimpleTestCase):
pass
| SimpleTestCase |
python | PyCQA__pylint | tests/functional/m/member/member_checks.py | {
"start": 4917,
"end": 5138
} | class ____:
def __init__(self, flag):
if flag:
self.attribute = slice(None)
else:
self.attribute = []
self.attribute.append(1)
from enum import Enum
| SomeClassUsingSlice |
python | django__django | tests/app_loading/not_installed/models.py | {
"start": 124,
"end": 286
} | class ____(models.Model):
class Meta:
app_label = "not_installed"
not_installed = models.ForeignKey(NotInstalledModel, models.CASCADE)
| RelatedModel |
python | dagster-io__dagster | python_modules/libraries/dagster-dbt/dagster_dbt/cloud_v2/cli_invocation.py | {
"start": 502,
"end": 2235
} | class ____:
"""Represents a dbt Cloud cli invocation."""
args: Sequence[str]
client: DbtCloudWorkspaceClient
manifest: Mapping[str, Any]
dagster_dbt_translator: DagsterDbtTranslator
run_handler: DbtCloudJobRunHandler
context: Optional[AssetExecutionContext]
@classmethod
def run(
cls,
job_id: int,
args: Sequence[str],
client: DbtCloudWorkspaceClient,
manifest: Mapping[str, Any],
dagster_dbt_translator: DagsterDbtTranslator,
context: Optional[AssetExecutionContext] = None,
) -> "DbtCloudCliInvocation":
run_handler = DbtCloudJobRunHandler.run(
job_id=job_id,
args=args,
client=client,
)
return DbtCloudCliInvocation(
args=args,
client=client,
manifest=manifest,
dagster_dbt_translator=dagster_dbt_translator,
run_handler=run_handler,
context=context,
)
def wait(
self, timeout: Optional[float] = None
) -> Iterator[Union[AssetCheckEvaluation, AssetCheckResult, AssetMaterialization, Output]]:
run = self.run_handler.wait(timeout=timeout)
if "run_results.json" in self.run_handler.list_run_artifacts():
run_results = DbtCloudJobRunResults.from_run_results_json(
run_results_json=self.run_handler.get_run_results()
)
yield from run_results.to_default_asset_events(
client=self.client,
manifest=self.manifest,
dagster_dbt_translator=self.dagster_dbt_translator,
context=self.context,
)
run.raise_for_status()
| DbtCloudCliInvocation |
python | astropy__astropy | astropy/modeling/projections.py | {
"start": 39466,
"end": 39542
} | class ____(Projection):
r"""Base class for HEALPix projections."""
| HEALPix |
python | ray-project__ray | python/ray/train/lightgbm/lightgbm_trainer.py | {
"start": 3205,
"end": 13540
} | class ____(SimpleLightGBMTrainer):
"""A Trainer for distributed data-parallel LightGBM training.
Example
-------
.. testcode::
:skipif: True
import lightgbm
import ray.data
import ray.train
from ray.train.lightgbm import RayTrainReportCallback, LightGBMTrainer
def train_fn_per_worker(config: dict):
# (Optional) Add logic to resume training state from a checkpoint.
# ray.train.get_checkpoint()
# 1. Get the dataset shard for the worker and convert to a `lightgbm.Dataset`
train_ds_iter, eval_ds_iter = (
ray.train.get_dataset_shard("train"),
ray.train.get_dataset_shard("validation"),
)
train_ds, eval_ds = train_ds_iter.materialize(), eval_ds_iter.materialize()
train_df, eval_df = train_ds.to_pandas(), eval_ds.to_pandas()
train_X, train_y = train_df.drop("y", axis=1), train_df["y"]
eval_X, eval_y = eval_df.drop("y", axis=1), eval_df["y"]
dtrain = lightgbm.Dataset(train_X, label=train_y)
deval = lightgbm.Dataset(eval_X, label=eval_y)
params = {
"objective": "regression",
"metric": "l2",
"learning_rate": 1e-4,
"subsample": 0.5,
"max_depth": 2,
# Adding the line below is the only change needed
# for your `lgb.train` call!
**ray.train.lightgbm.get_network_params(),
}
# 2. Do distributed data-parallel training.
# Ray Train sets up the necessary coordinator processes and
# environment variables for your workers to communicate with each other.
bst = lightgbm.train(
params,
train_set=dtrain,
valid_sets=[deval],
valid_names=["validation"],
num_boost_round=10,
callbacks=[RayTrainReportCallback()],
)
train_ds = ray.data.from_items([{"x": x, "y": x + 1} for x in range(32)])
eval_ds = ray.data.from_items([{"x": x, "y": x + 1} for x in range(16)])
trainer = LightGBMTrainer(
train_fn_per_worker,
datasets={"train": train_ds, "validation": eval_ds},
scaling_config=ray.train.ScalingConfig(num_workers=4),
)
result = trainer.fit()
booster = RayTrainReportCallback.get_model(result.checkpoint)
Args:
train_loop_per_worker: The training function to execute on each worker.
This function can either take in zero arguments or a single ``Dict``
argument which is set by defining ``train_loop_config``.
Within this function you can use any of the
:ref:`Ray Train Loop utilities <train-loop-api>`.
train_loop_config: A configuration ``Dict`` to pass in as an argument to
``train_loop_per_worker``.
This is typically used for specifying hyperparameters.
lightgbm_config: The configuration for setting up the distributed lightgbm
backend. Defaults to using the "rabit" backend.
See :class:`~ray.train.lightgbm.LightGBMConfig` for more info.
datasets: The Ray Datasets to use for training and validation.
dataset_config: The configuration for ingesting the input ``datasets``.
By default, all the Ray Datasets are split equally across workers.
See :class:`~ray.train.DataConfig` for more details.
scaling_config: The configuration for how to scale data parallel training.
``num_workers`` determines how many Python processes are used for training,
and ``use_gpu`` determines whether or not each process should use GPUs.
See :class:`~ray.train.ScalingConfig` for more info.
run_config: The configuration for the execution of the training run.
See :class:`~ray.train.RunConfig` for more info.
resume_from_checkpoint: A checkpoint to resume training from.
This checkpoint can be accessed from within ``train_loop_per_worker``
by calling ``ray.train.get_checkpoint()``.
metadata: Dict that should be made available via
`ray.train.get_context().get_metadata()` and in `checkpoint.get_metadata()`
for checkpoints saved from this Trainer. Must be JSON-serializable.
label_column: [Deprecated] Name of the label column. A column with this name
must be present in the training dataset.
params: [Deprecated] LightGBM training parameters.
Refer to `LightGBM documentation <https://lightgbm.readthedocs.io/>`_
for a list of possible parameters.
num_boost_round: [Deprecated] Target number of boosting iterations (trees in the model).
Note that unlike in ``lightgbm.train``, this is the target number
of trees, meaning that if you set ``num_boost_round=10`` and pass a model
that has already been trained for 5 iterations, it will be trained for 5
iterations more, instead of 10 more.
**train_kwargs: [Deprecated] Additional kwargs passed to ``lightgbm.train()`` function.
"""
_handles_checkpoint_freq = True
_handles_checkpoint_at_end = True
def __init__(
self,
train_loop_per_worker: Optional[
Union[Callable[[], None], Callable[[Dict], None]]
] = None,
*,
train_loop_config: Optional[Dict] = None,
lightgbm_config: Optional[LightGBMConfig] = None,
scaling_config: Optional[ray.train.ScalingConfig] = None,
run_config: Optional[ray.train.RunConfig] = None,
datasets: Optional[Dict[str, GenDataset]] = None,
dataset_config: Optional[ray.train.DataConfig] = None,
resume_from_checkpoint: Optional[Checkpoint] = None,
metadata: Optional[Dict[str, Any]] = None,
# TODO: [Deprecated] Legacy LightGBMTrainer API
label_column: Optional[str] = None,
params: Optional[Dict[str, Any]] = None,
num_boost_round: Optional[int] = None,
**train_kwargs,
):
# TODO: [Deprecated] Legacy LightGBMTrainer API
legacy_api = train_loop_per_worker is None
if legacy_api:
train_loop_per_worker = self._get_legacy_train_fn_per_worker(
lightgbm_train_kwargs=train_kwargs,
run_config=run_config,
label_column=label_column,
num_boost_round=num_boost_round,
datasets=datasets,
)
train_loop_config = params or {}
elif train_kwargs:
_log_deprecation_warning(
"Passing `lightgbm.train` kwargs to `LightGBMTrainer` is deprecated. "
f"Got kwargs: {train_kwargs.keys()}\n"
"In your training function, you can call `lightgbm.train(**kwargs)` "
"with arbitrary arguments. "
f"{LEGACY_LIGHTGBM_TRAINER_DEPRECATION_MESSAGE}"
)
super(LightGBMTrainer, self).__init__(
train_loop_per_worker=train_loop_per_worker,
train_loop_config=train_loop_config,
lightgbm_config=lightgbm_config,
scaling_config=scaling_config,
run_config=run_config,
datasets=datasets,
dataset_config=dataset_config,
resume_from_checkpoint=resume_from_checkpoint,
metadata=metadata,
)
def _get_legacy_train_fn_per_worker(
self,
lightgbm_train_kwargs: Dict,
run_config: Optional[ray.train.RunConfig],
datasets: Optional[Dict[str, GenDataset]],
label_column: Optional[str],
num_boost_round: Optional[int],
) -> Callable[[Dict], None]:
"""Get the training function for the legacy LightGBMTrainer API."""
datasets = datasets or {}
if not datasets.get(TRAIN_DATASET_KEY):
raise ValueError(
"`datasets` must be provided for the LightGBMTrainer API "
"if `train_loop_per_worker` is not provided. "
"This dict must contain the training dataset under the "
f"key: '{TRAIN_DATASET_KEY}'. "
f"Got keys: {list(datasets.keys())}"
)
if not label_column:
raise ValueError(
"`label_column` must be provided for the LightGBMTrainer API "
"if `train_loop_per_worker` is not provided. "
"This is the column name of the label in the dataset."
)
num_boost_round = num_boost_round or 10
_log_deprecation_warning(LEGACY_LIGHTGBM_TRAINER_DEPRECATION_MESSAGE)
# Initialize a default Ray Train metrics/checkpoint reporting callback if needed
callbacks = lightgbm_train_kwargs.get("callbacks", [])
user_supplied_callback = any(
isinstance(callback, RayTrainReportCallback) for callback in callbacks
)
callback_kwargs = {}
if run_config:
checkpoint_frequency = run_config.checkpoint_config.checkpoint_frequency
checkpoint_at_end = run_config.checkpoint_config.checkpoint_at_end
callback_kwargs["frequency"] = checkpoint_frequency
# Default `checkpoint_at_end=True` unless the user explicitly sets it.
callback_kwargs["checkpoint_at_end"] = (
checkpoint_at_end if checkpoint_at_end is not None else True
)
if not user_supplied_callback:
callbacks.append(RayTrainReportCallback(**callback_kwargs))
lightgbm_train_kwargs["callbacks"] = callbacks
train_fn_per_worker = partial(
_lightgbm_train_fn_per_worker,
label_column=label_column,
num_boost_round=num_boost_round,
dataset_keys=set(datasets),
lightgbm_train_kwargs=lightgbm_train_kwargs,
)
return train_fn_per_worker
@classmethod
def get_model(
cls,
checkpoint: Checkpoint,
) -> lightgbm.Booster:
"""Retrieve the LightGBM model stored in this checkpoint."""
return RayTrainReportCallback.get_model(checkpoint)
| LightGBMTrainer |
python | protocolbuffers__protobuf | python/google/protobuf/internal/type_checkers.py | {
"start": 8902,
"end": 8981
} | class ____(IntValueChecker):
_MIN = 0
_MAX = (1 << 32) - 1
| Uint32ValueChecker |
python | crytic__slither | slither/core/expressions/identifier.py | {
"start": 516,
"end": 2164
} | class ____(Expression):
def __init__(
self,
value: Union[
Variable,
"TopLevel",
"ContractLevel",
"Contract",
"SolidityVariable",
"SolidityFunction",
"YulBuiltin",
],
) -> None:
super().__init__()
# pylint: disable=import-outside-toplevel
from slither.core.declarations import Contract, SolidityVariable, SolidityFunction
from slither.solc_parsing.yul.evm_functions import YulBuiltin
assert isinstance(
value,
(
Variable,
TopLevel,
ContractLevel,
Contract,
SolidityVariable,
SolidityFunction,
YulBuiltin,
),
)
self._value: Union[
Variable,
"TopLevel",
"ContractLevel",
"Contract",
"SolidityVariable",
"SolidityFunction",
"YulBuiltin",
] = value
self._type: Optional["Type"] = None
@property
def type(self) -> Optional["Type"]:
return self._type
@type.setter
def type(self, new_type: "Type") -> None:
self._type = new_type
@property
def value(
self,
) -> Union[
Variable,
"TopLevel",
"ContractLevel",
"Contract",
"SolidityVariable",
"SolidityFunction",
"YulBuiltin",
]:
return self._value
def __str__(self) -> str:
return str(self._value)
def expression(self):
return self
| Identifier |
python | coleifer__peewee | peewee.py | {
"start": 42371,
"end": 43866
} | class ____(WrappedNode):
def __init__(self, node, direction, collation=None, nulls=None):
super(Ordering, self).__init__(node)
self.direction = direction
self.collation = collation
self.nulls = nulls
if nulls and nulls.lower() not in ('first', 'last'):
raise ValueError('Ordering nulls= parameter must be "first" or '
'"last", got: %s' % nulls)
def collate(self, collation=None):
return Ordering(self.node, self.direction, collation)
def _null_ordering_case(self, nulls):
if nulls.lower() == 'last':
ifnull, notnull = 1, 0
elif nulls.lower() == 'first':
ifnull, notnull = 0, 1
else:
raise ValueError('unsupported value for nulls= ordering.')
return Case(None, ((self.node.is_null(), ifnull),), notnull)
def __sql__(self, ctx):
if self.nulls and not ctx.state.nulls_ordering:
ctx.sql(self._null_ordering_case(self.nulls)).literal(', ')
ctx.sql(self.node).literal(' %s' % self.direction)
if self.collation:
ctx.literal(' COLLATE %s' % self.collation)
if self.nulls and ctx.state.nulls_ordering:
ctx.literal(' NULLS %s' % self.nulls)
return ctx
def Asc(node, collation=None, nulls=None):
return Ordering(node, 'ASC', collation, nulls)
def Desc(node, collation=None, nulls=None):
return Ordering(node, 'DESC', collation, nulls)
| Ordering |
python | tensorflow__tensorflow | tensorflow/python/keras/utils/object_identity.py | {
"start": 4626,
"end": 6398
} | class ____(collections.abc.MutableSet):
"""Like the built-in set, but compares objects with "is"."""
__slots__ = ["_storage", "__weakref__"]
def __init__(self, *args):
self._storage = set(self._wrap_key(obj) for obj in list(*args))
def __le__(self, other: Set[Any]) -> bool:
if not isinstance(other, Set):
return NotImplemented
if len(self) > len(other):
return False
for item in self._storage:
if item not in other:
return False
return True
def __ge__(self, other: Set[Any]) -> bool:
if not isinstance(other, Set):
return NotImplemented
if len(self) < len(other):
return False
for item in other:
if item not in self:
return False
return True
@staticmethod
def _from_storage(storage):
result = ObjectIdentitySet()
result._storage = storage # pylint: disable=protected-access
return result
def _wrap_key(self, key):
return _ObjectIdentityWrapper(key)
def __contains__(self, key):
return self._wrap_key(key) in self._storage
def discard(self, key):
self._storage.discard(self._wrap_key(key))
def add(self, key):
self._storage.add(self._wrap_key(key))
def update(self, items):
self._storage.update([self._wrap_key(item) for item in items])
def clear(self):
self._storage.clear()
def intersection(self, items):
return self._storage.intersection([self._wrap_key(item) for item in items])
def difference(self, items):
return ObjectIdentitySet._from_storage(
self._storage.difference([self._wrap_key(item) for item in items]))
def __len__(self):
return len(self._storage)
def __iter__(self):
keys = list(self._storage)
for key in keys:
yield key.unwrapped
| ObjectIdentitySet |
python | getsentry__sentry | src/sentry/migrations/0953_make_releasefiles_tti.py | {
"start": 227,
"end": 1656
} | class ____(CheckedMigration):
# This flag is used to mark that a migration shouldn't be automatically run in production.
# This should only be used for operations where it's safe to run the migration after your
# code has deployed. So this should not be used for most operations that alter the schema
# of a table.
# Here are some things that make sense to mark as post deployment:
# - Large data migrations. Typically we want these to be run manually so that they can be
# monitored and not block the deploy for a long period of time while they run.
# - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to
# run this outside deployments so that we don't block them. Note that while adding an index
# is a schema change, it's completely safe to run the operation after the code has deployed.
# Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment
is_post_deployment = False
dependencies = [
("sentry", "0952_fix_span_item_event_type_alerts"),
]
operations = [
migrations.AddField(
model_name="releasefile",
name="date_accessed",
field=models.DateTimeField(
db_default=django.db.models.functions.datetime.Now(),
default=django.utils.timezone.now,
),
),
]
| Migration |
python | astropy__astropy | astropy/cosmology/_src/tests/flrw/test_parameters.py | {
"start": 7043,
"end": 9018
} | class ____(ParameterTestMixin):
"""Tests for `astropy.cosmology.Parameter` Tcmb0 on a Cosmology.
Tcmb0 is a descriptor, which are tested by mixin, here with ``TestFLRW``.
These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the
args and kwargs for the cosmology class, respectively. See ``TestFLRW``.
"""
def test_Tcmb0(self, cosmo_cls: type[Cosmology], cosmo: Cosmology):
"""Test Parameter ``Tcmb0``."""
# on the class
Tcmb0 = cosmo_cls.parameters["Tcmb0"]
assert isinstance(Tcmb0, Parameter)
assert "Temperature of the CMB" in Tcmb0.__doc__
assert Tcmb0.unit == u.K
assert Tcmb0.default == 0.0 * u.K
# validation
assert Tcmb0.validate(cosmo, 1) == 1 * u.K
assert Tcmb0.validate(cosmo, 10 * u.K) == 10 * u.K
with pytest.raises(ValueError, match="Tcmb0 is a non-scalar quantity"):
Tcmb0.validate(cosmo, [1, 2])
# on the instance
assert cosmo.Tcmb0 is cosmo.__dict__["Tcmb0"]
assert cosmo.Tcmb0 == self.cls_kwargs["Tcmb0"]
assert isinstance(cosmo.Tcmb0, u.Quantity) and cosmo.Tcmb0.unit == u.K
def test_init_Tcmb0(self, cosmo_cls: type[Cosmology], ba: BoundArguments):
"""Test initialization for values of ``Tcmb0``."""
# test that it works with units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.Tcmb0 == ba.arguments["Tcmb0"]
# also without units
ba.arguments["Tcmb0"] = ba.arguments["Tcmb0"].value # strip units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.Tcmb0.value == ba.arguments["Tcmb0"]
# must be a scalar
ba.arguments["Tcmb0"] = u.Quantity([0.0, 2], u.K)
with pytest.raises(ValueError, match="Tcmb0 is a non-scalar quantity"):
cosmo_cls(*ba.args, **ba.kwargs)
# =============================================================================
| ParameterTcmb0TestMixin |
python | getsentry__sentry-python | tests/conftest.py | {
"start": 5950,
"end": 8634
} | class ____(Transport):
def __init__(self):
Transport.__init__(self)
def capture_envelope(self, _: Envelope) -> None:
"""No-op capture_envelope for tests"""
pass
@pytest.fixture
def capture_events(monkeypatch):
def inner():
events = []
test_client = sentry_sdk.get_client()
old_capture_envelope = test_client.transport.capture_envelope
def append_event(envelope):
for item in envelope:
if item.headers.get("type") in ("event", "transaction"):
events.append(item.payload.json)
return old_capture_envelope(envelope)
monkeypatch.setattr(test_client.transport, "capture_envelope", append_event)
return events
return inner
@pytest.fixture
def capture_envelopes(monkeypatch):
def inner():
envelopes = []
test_client = sentry_sdk.get_client()
old_capture_envelope = test_client.transport.capture_envelope
def append_envelope(envelope):
envelopes.append(envelope)
return old_capture_envelope(envelope)
monkeypatch.setattr(test_client.transport, "capture_envelope", append_envelope)
return envelopes
return inner
@pytest.fixture
def capture_record_lost_event_calls(monkeypatch):
def inner():
calls = []
test_client = sentry_sdk.get_client()
def record_lost_event(reason, data_category=None, item=None, *, quantity=1):
calls.append((reason, data_category, item, quantity))
monkeypatch.setattr(
test_client.transport, "record_lost_event", record_lost_event
)
return calls
return inner
@pytest.fixture
def capture_events_forksafe(monkeypatch, capture_events, request):
def inner():
capture_events()
events_r, events_w = os.pipe()
events_r = os.fdopen(events_r, "rb", 0)
events_w = os.fdopen(events_w, "wb", 0)
test_client = sentry_sdk.get_client()
old_capture_envelope = test_client.transport.capture_envelope
def append(envelope):
event = envelope.get_event() or envelope.get_transaction_event()
if event is not None:
events_w.write(json.dumps(event).encode("utf-8"))
events_w.write(b"\n")
return old_capture_envelope(envelope)
def flush(timeout=None, callback=None):
events_w.write(b"flush\n")
monkeypatch.setattr(test_client.transport, "capture_envelope", append)
monkeypatch.setattr(test_client, "flush", flush)
return EventStreamReader(events_r, events_w)
return inner
| TestTransport |
python | openai__openai-python | tests/api_resources/chat/test_completions.py | {
"start": 15982,
"end": 32322
} | class ____:
parametrize = pytest.mark.parametrize(
"async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
)
@parametrize
async def test_method_create_overload_1(self, async_client: AsyncOpenAI) -> None:
completion = await async_client.chat.completions.create(
messages=[
{
"content": "string",
"role": "developer",
}
],
model="gpt-4o",
)
assert_matches_type(ChatCompletion, completion, path=["response"])
@parametrize
async def test_method_create_with_all_params_overload_1(self, async_client: AsyncOpenAI) -> None:
completion = await async_client.chat.completions.create(
messages=[
{
"content": "string",
"role": "developer",
"name": "name",
}
],
model="gpt-4o",
audio={
"format": "wav",
"voice": "ash",
},
frequency_penalty=-2,
function_call="none",
functions=[
{
"name": "name",
"description": "description",
"parameters": {"foo": "bar"},
}
],
logit_bias={"foo": 0},
logprobs=True,
max_completion_tokens=0,
max_tokens=0,
metadata={"foo": "string"},
modalities=["text"],
n=1,
parallel_tool_calls=True,
prediction={
"content": "string",
"type": "content",
},
presence_penalty=-2,
prompt_cache_key="prompt-cache-key-1234",
prompt_cache_retention="in-memory",
reasoning_effort="none",
response_format={"type": "text"},
safety_identifier="safety-identifier-1234",
seed=-9007199254740991,
service_tier="auto",
stop="\n",
store=True,
stream=False,
stream_options={
"include_obfuscation": True,
"include_usage": True,
},
temperature=1,
tool_choice="none",
tools=[
{
"function": {
"name": "name",
"description": "description",
"parameters": {"foo": "bar"},
"strict": True,
},
"type": "function",
}
],
top_logprobs=0,
top_p=1,
user="user-1234",
verbosity="low",
web_search_options={
"search_context_size": "low",
"user_location": {
"approximate": {
"city": "city",
"country": "country",
"region": "region",
"timezone": "timezone",
},
"type": "approximate",
},
},
)
assert_matches_type(ChatCompletion, completion, path=["response"])
@parametrize
async def test_raw_response_create_overload_1(self, async_client: AsyncOpenAI) -> None:
response = await async_client.chat.completions.with_raw_response.create(
messages=[
{
"content": "string",
"role": "developer",
}
],
model="gpt-4o",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
completion = response.parse()
assert_matches_type(ChatCompletion, completion, path=["response"])
@parametrize
async def test_streaming_response_create_overload_1(self, async_client: AsyncOpenAI) -> None:
async with async_client.chat.completions.with_streaming_response.create(
messages=[
{
"content": "string",
"role": "developer",
}
],
model="gpt-4o",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
completion = await response.parse()
assert_matches_type(ChatCompletion, completion, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_method_create_overload_2(self, async_client: AsyncOpenAI) -> None:
completion_stream = await async_client.chat.completions.create(
messages=[
{
"content": "string",
"role": "developer",
}
],
model="gpt-4o",
stream=True,
)
await completion_stream.response.aclose()
@parametrize
async def test_method_create_with_all_params_overload_2(self, async_client: AsyncOpenAI) -> None:
completion_stream = await async_client.chat.completions.create(
messages=[
{
"content": "string",
"role": "developer",
"name": "name",
}
],
model="gpt-4o",
stream=True,
audio={
"format": "wav",
"voice": "ash",
},
frequency_penalty=-2,
function_call="none",
functions=[
{
"name": "name",
"description": "description",
"parameters": {"foo": "bar"},
}
],
logit_bias={"foo": 0},
logprobs=True,
max_completion_tokens=0,
max_tokens=0,
metadata={"foo": "string"},
modalities=["text"],
n=1,
parallel_tool_calls=True,
prediction={
"content": "string",
"type": "content",
},
presence_penalty=-2,
prompt_cache_key="prompt-cache-key-1234",
prompt_cache_retention="in-memory",
reasoning_effort="none",
response_format={"type": "text"},
safety_identifier="safety-identifier-1234",
seed=-9007199254740991,
service_tier="auto",
stop="\n",
store=True,
stream_options={
"include_obfuscation": True,
"include_usage": True,
},
temperature=1,
tool_choice="none",
tools=[
{
"function": {
"name": "name",
"description": "description",
"parameters": {"foo": "bar"},
"strict": True,
},
"type": "function",
}
],
top_logprobs=0,
top_p=1,
user="user-1234",
verbosity="low",
web_search_options={
"search_context_size": "low",
"user_location": {
"approximate": {
"city": "city",
"country": "country",
"region": "region",
"timezone": "timezone",
},
"type": "approximate",
},
},
)
await completion_stream.response.aclose()
@parametrize
async def test_raw_response_create_overload_2(self, async_client: AsyncOpenAI) -> None:
response = await async_client.chat.completions.with_raw_response.create(
messages=[
{
"content": "string",
"role": "developer",
}
],
model="gpt-4o",
stream=True,
)
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
stream = response.parse()
await stream.close()
@parametrize
async def test_streaming_response_create_overload_2(self, async_client: AsyncOpenAI) -> None:
async with async_client.chat.completions.with_streaming_response.create(
messages=[
{
"content": "string",
"role": "developer",
}
],
model="gpt-4o",
stream=True,
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
stream = await response.parse()
await stream.close()
assert cast(Any, response.is_closed) is True
@parametrize
async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None:
completion = await async_client.chat.completions.retrieve(
"completion_id",
)
assert_matches_type(ChatCompletion, completion, path=["response"])
@parametrize
async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None:
response = await async_client.chat.completions.with_raw_response.retrieve(
"completion_id",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
completion = response.parse()
assert_matches_type(ChatCompletion, completion, path=["response"])
@parametrize
async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None:
async with async_client.chat.completions.with_streaming_response.retrieve(
"completion_id",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
completion = await response.parse()
assert_matches_type(ChatCompletion, completion, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `completion_id` but received ''"):
await async_client.chat.completions.with_raw_response.retrieve(
"",
)
@parametrize
async def test_method_update(self, async_client: AsyncOpenAI) -> None:
completion = await async_client.chat.completions.update(
completion_id="completion_id",
metadata={"foo": "string"},
)
assert_matches_type(ChatCompletion, completion, path=["response"])
@parametrize
async def test_raw_response_update(self, async_client: AsyncOpenAI) -> None:
response = await async_client.chat.completions.with_raw_response.update(
completion_id="completion_id",
metadata={"foo": "string"},
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
completion = response.parse()
assert_matches_type(ChatCompletion, completion, path=["response"])
@parametrize
async def test_streaming_response_update(self, async_client: AsyncOpenAI) -> None:
async with async_client.chat.completions.with_streaming_response.update(
completion_id="completion_id",
metadata={"foo": "string"},
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
completion = await response.parse()
assert_matches_type(ChatCompletion, completion, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_update(self, async_client: AsyncOpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `completion_id` but received ''"):
await async_client.chat.completions.with_raw_response.update(
completion_id="",
metadata={"foo": "string"},
)
@parametrize
async def test_method_list(self, async_client: AsyncOpenAI) -> None:
completion = await async_client.chat.completions.list()
assert_matches_type(AsyncCursorPage[ChatCompletion], completion, path=["response"])
@parametrize
async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None:
completion = await async_client.chat.completions.list(
after="after",
limit=0,
metadata={"foo": "string"},
model="model",
order="asc",
)
assert_matches_type(AsyncCursorPage[ChatCompletion], completion, path=["response"])
@parametrize
async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None:
response = await async_client.chat.completions.with_raw_response.list()
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
completion = response.parse()
assert_matches_type(AsyncCursorPage[ChatCompletion], completion, path=["response"])
@parametrize
async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None:
async with async_client.chat.completions.with_streaming_response.list() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
completion = await response.parse()
assert_matches_type(AsyncCursorPage[ChatCompletion], completion, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_method_delete(self, async_client: AsyncOpenAI) -> None:
completion = await async_client.chat.completions.delete(
"completion_id",
)
assert_matches_type(ChatCompletionDeleted, completion, path=["response"])
@parametrize
async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None:
response = await async_client.chat.completions.with_raw_response.delete(
"completion_id",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
completion = response.parse()
assert_matches_type(ChatCompletionDeleted, completion, path=["response"])
@parametrize
async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None:
async with async_client.chat.completions.with_streaming_response.delete(
"completion_id",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
completion = await response.parse()
assert_matches_type(ChatCompletionDeleted, completion, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_delete(self, async_client: AsyncOpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `completion_id` but received ''"):
await async_client.chat.completions.with_raw_response.delete(
"",
)
@parametrize
async def test_method_create_disallows_pydantic(self, async_client: AsyncOpenAI) -> None:
class MyModel(pydantic.BaseModel):
a: str
with pytest.raises(TypeError, match=r"You tried to pass a `BaseModel` class"):
await async_client.chat.completions.create(
messages=[
{
"content": "string",
"role": "system",
}
],
model="gpt-4o",
response_format=cast(Any, MyModel),
)
| TestAsyncCompletions |
python | huggingface__transformers | src/transformers/models/starcoder2/modeling_starcoder2.py | {
"start": 12397,
"end": 15425
} | class ____(nn.Module):
inv_freq: torch.Tensor # fix linting for `register_buffer`
def __init__(self, config: Starcoder2Config, device=None):
super().__init__()
self.max_seq_len_cached = config.max_position_embeddings
self.original_max_seq_len = config.max_position_embeddings
self.config = config
self.rope_type = self.config.rope_parameters["rope_type"]
rope_init_fn: Callable = self.compute_default_rope_parameters
if self.rope_type != "default":
rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
inv_freq, self.attention_scaling = rope_init_fn(self.config, device)
self.register_buffer("inv_freq", inv_freq, persistent=False)
self.original_inv_freq = inv_freq
@staticmethod
def compute_default_rope_parameters(
config: Optional[Starcoder2Config] = None,
device: Optional["torch.device"] = None,
seq_len: Optional[int] = None,
) -> tuple["torch.Tensor", float]:
"""
Computes the inverse frequencies according to the original RoPE implementation
Args:
config ([`~transformers.PreTrainedConfig`]):
The model configuration.
device (`torch.device`):
The device to use for initialization of the inverse frequencies.
seq_len (`int`, *optional*):
The current sequence length. Unused for this type of RoPE.
Returns:
Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).
"""
base = config.rope_parameters["rope_theta"]
dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads
attention_factor = 1.0 # Unused in this type of RoPE
# Compute the inverse frequencies
inv_freq = 1.0 / (
base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / dim)
)
return inv_freq, attention_factor
@torch.no_grad()
@dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
def forward(self, x, position_ids):
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
position_ids_expanded = position_ids[:, None, :].float()
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
with torch.autocast(device_type=device_type, enabled=False): # Force float32
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
@auto_docstring
| Starcoder2RotaryEmbedding |
python | doocs__leetcode | solution/0200-0299/0220.Contains Duplicate III/Solution.py | {
"start": 0,
"end": 438
} | class ____:
def containsNearbyAlmostDuplicate(
self, nums: List[int], indexDiff: int, valueDiff: int
) -> bool:
s = SortedSet()
for i, v in enumerate(nums):
j = s.bisect_left(v - valueDiff)
if j < len(s) and s[j] <= v + valueDiff:
return True
s.add(v)
if i >= indexDiff:
s.remove(nums[i - indexDiff])
return False
| Solution |
python | Pylons__pyramid | src/pyramid/interfaces.py | {
"start": 26810,
"end": 26943
} | class ____(Interface):
def __call__(request):
"""Return the *default* root object for an application"""
| IDefaultRootFactory |
python | streamlit__streamlit | lib/tests/streamlit/data_mocks/modin_mocks.py | {
"start": 731,
"end": 1519
} | class ____:
"""This is dummy DataFrame class, which imitates modin.pandas.dataframe.DataFrame class
for testing purposes. We use this to make sure that our code does a special handling
if it detects a modin dataframe.
This allows testing of the functionality without having the library installed,
but it won't capture changes in the API of the library. This requires
integration tests.
"""
__module__ = "modin.pandas.dataframe"
def __init__(self, data: pd.DataFrame):
self._data: pd.DataFrame = data
def _to_pandas(self) -> pd.DataFrame:
return self._data
def head(self, n: int) -> DataFrame:
"""Returns the top n element of a mock version of Modin DataFrame."""
return DataFrame(self._data.head(n))
| DataFrame |
python | doocs__leetcode | solution/0000-0099/0003.Longest Substring Without Repeating Characters/Solution.py | {
"start": 0,
"end": 314
} | class ____:
def lengthOfLongestSubstring(self, s: str) -> int:
cnt = Counter()
ans = l = 0
for r, c in enumerate(s):
cnt[c] += 1
while cnt[c] > 1:
cnt[s[l]] -= 1
l += 1
ans = max(ans, r - l + 1)
return ans
| Solution |
python | pytorch__pytorch | torch/_functorch/_aot_autograd/descriptors.py | {
"start": 15686,
"end": 15897
} | class ____(AOTOutput):
"""A subclass that classifies AOTOutput that can be wrapped by TangentAOTInput"""
# ------------
# AOTInput
# ------------
@dataclasses.dataclass(frozen=True)
| DifferentiableAOTOutput |
python | tensorflow__tensorflow | tensorflow/python/ops/parsing_config.py | {
"start": 15951,
"end": 17523
} | class ____(collections.namedtuple(
"FixedLenSequenceFeature",
["shape", "dtype", "allow_missing", "default_value"])):
"""Configuration for parsing a variable-length input feature into a `Tensor`.
The resulting `Tensor` of parsing a single `SequenceExample` or `Example` has
a static `shape` of `[None] + shape` and the specified `dtype`.
The resulting `Tensor` of parsing a `batch_size` many `Example`s has
a static `shape` of `[batch_size, None] + shape` and the specified `dtype`.
The entries in the `batch` from different `Examples` will be padded with
`default_value` to the maximum length present in the `batch`.
To treat a sparse input as dense, provide `allow_missing=True`; otherwise,
the parse functions will fail on any examples missing this feature.
Fields:
shape: Shape of input data for dimension 2 and higher. First dimension is
of variable length `None`.
dtype: Data type of input.
allow_missing: Whether to allow this feature to be missing from a feature
list item. Is available only for parsing `SequenceExample` not for
parsing `Examples`.
default_value: Scalar value to be used to pad multiple `Example`s to their
maximum length. Irrelevant for parsing a single `Example` or
`SequenceExample`. Defaults to "" for dtype string and 0 otherwise
(optional).
"""
def __new__(cls, shape, dtype, allow_missing=False, default_value=None):
return super(FixedLenSequenceFeature, cls).__new__(
cls, shape, dtype, allow_missing, default_value)
| FixedLenSequenceFeature |
python | scikit-learn__scikit-learn | sklearn/model_selection/tests/test_successive_halving.py | {
"start": 784,
"end": 1653
} | class ____(DummyClassifier):
"""Dummy classifier that accepts parameters a, b, ... z.
These parameter don't affect the predictions and are useful for fast
grid searching."""
# update the constraints such that we accept all parameters from a to z
_parameter_constraints: dict = {
**DummyClassifier._parameter_constraints,
**{chr(key): "no_validation" for key in range(ord("a"), ord("z") + 1)},
}
def __init__(
self, strategy="stratified", random_state=None, constant=None, **kwargs
):
super().__init__(
strategy=strategy, random_state=random_state, constant=constant
)
def get_params(self, deep=False):
params = super().get_params(deep=deep)
for char in range(ord("a"), ord("z") + 1):
params[chr(char)] = "whatever"
return params
| FastClassifier |
python | scipy__scipy | scipy/linalg/tests/test_decomp.py | {
"start": 51290,
"end": 73990
} | class ____:
def test_simple(self):
a = [[8, 2, 3], [2, 9, 3], [5, 3, 6]]
q, r = qr(a)
assert_array_almost_equal(q.T @ q, eye(3))
assert_array_almost_equal(q @ r, a)
def test_simple_left(self):
a = [[8, 2, 3], [2, 9, 3], [5, 3, 6]]
q, r = qr(a)
c = [1, 2, 3]
qc, r2 = qr_multiply(a, c, "left")
assert_array_almost_equal(q @ c, qc)
assert_array_almost_equal(r, r2)
qc, r2 = qr_multiply(a, eye(3), "left")
assert_array_almost_equal(q, qc)
def test_simple_right(self):
a = [[8, 2, 3], [2, 9, 3], [5, 3, 6]]
q, r = qr(a)
c = [1, 2, 3]
qc, r2 = qr_multiply(a, c)
assert_array_almost_equal(c @ q, qc)
assert_array_almost_equal(r, r2)
qc, r = qr_multiply(a, eye(3))
assert_array_almost_equal(q, qc)
def test_simple_pivoting(self):
a = np.asarray([[8, 2, 3], [2, 9, 3], [5, 3, 6]])
q, r, p = qr(a, pivoting=True)
d = abs(diag(r))
assert_(np.all(d[1:] <= d[:-1]))
assert_array_almost_equal(q.T @ q, eye(3))
assert_array_almost_equal(q @ r, a[:, p])
q2, r2 = qr(a[:, p])
assert_array_almost_equal(q, q2)
assert_array_almost_equal(r, r2)
def test_simple_left_pivoting(self):
a = [[8, 2, 3], [2, 9, 3], [5, 3, 6]]
q, r, jpvt = qr(a, pivoting=True)
c = [1, 2, 3]
qc, r, jpvt = qr_multiply(a, c, "left", True)
assert_array_almost_equal(q @ c, qc)
def test_simple_right_pivoting(self):
a = [[8, 2, 3], [2, 9, 3], [5, 3, 6]]
q, r, jpvt = qr(a, pivoting=True)
c = [1, 2, 3]
qc, r, jpvt = qr_multiply(a, c, pivoting=True)
assert_array_almost_equal(c @ q, qc)
def test_simple_trap(self):
a = [[8, 2, 3], [2, 9, 3]]
q, r = qr(a)
assert_array_almost_equal(q.T @ q, eye(2))
assert_array_almost_equal(q @ r, a)
def test_simple_trap_pivoting(self):
a = np.asarray([[8, 2, 3], [2, 9, 3]])
q, r, p = qr(a, pivoting=True)
d = abs(diag(r))
assert_(np.all(d[1:] <= d[:-1]))
assert_array_almost_equal(q.T @ q, eye(2))
assert_array_almost_equal(q @ r, a[:, p])
q2, r2 = qr(a[:, p])
assert_array_almost_equal(q, q2)
assert_array_almost_equal(r, r2)
def test_simple_tall(self):
# full version
a = [[8, 2], [2, 9], [5, 3]]
q, r = qr(a)
assert_array_almost_equal(q.T @ q, eye(3))
assert_array_almost_equal(q @ r, a)
def test_simple_tall_pivoting(self):
# full version pivoting
a = np.asarray([[8, 2], [2, 9], [5, 3]])
q, r, p = qr(a, pivoting=True)
d = abs(diag(r))
assert_(np.all(d[1:] <= d[:-1]))
assert_array_almost_equal(q.T @ q, eye(3))
assert_array_almost_equal(q @ r, a[:, p])
q2, r2 = qr(a[:, p])
assert_array_almost_equal(q, q2)
assert_array_almost_equal(r, r2)
def test_simple_tall_e(self):
# economy version
a = [[8, 2], [2, 9], [5, 3]]
q, r = qr(a, mode='economic')
assert_array_almost_equal(q.T @ q, eye(2))
assert_array_almost_equal(q @ r, a)
assert_equal(q.shape, (3, 2))
assert_equal(r.shape, (2, 2))
def test_simple_tall_e_pivoting(self):
# economy version pivoting
a = np.asarray([[8, 2], [2, 9], [5, 3]])
q, r, p = qr(a, pivoting=True, mode='economic')
d = abs(diag(r))
assert_(np.all(d[1:] <= d[:-1]))
assert_array_almost_equal(q.T @ q, eye(2))
assert_array_almost_equal(q @ r, a[:, p])
q2, r2 = qr(a[:, p], mode='economic')
assert_array_almost_equal(q, q2)
assert_array_almost_equal(r, r2)
def test_simple_tall_left(self):
a = [[8, 2], [2, 9], [5, 3]]
q, r = qr(a, mode="economic")
c = [1, 2]
qc, r2 = qr_multiply(a, c, "left")
assert_array_almost_equal(q @ c, qc)
assert_array_almost_equal(r, r2)
c = array([1, 2, 0])
qc, r2 = qr_multiply(a, c, "left", overwrite_c=True)
assert_array_almost_equal(q @ c[:2], qc)
qc, r = qr_multiply(a, eye(2), "left")
assert_array_almost_equal(qc, q)
def test_simple_tall_left_pivoting(self):
a = [[8, 2], [2, 9], [5, 3]]
q, r, jpvt = qr(a, mode="economic", pivoting=True)
c = [1, 2]
qc, r, kpvt = qr_multiply(a, c, "left", True)
assert_array_equal(jpvt, kpvt)
assert_array_almost_equal(q @ c, qc)
qc, r, jpvt = qr_multiply(a, eye(2), "left", True)
assert_array_almost_equal(qc, q)
def test_simple_tall_right(self):
a = [[8, 2], [2, 9], [5, 3]]
q, r = qr(a, mode="economic")
c = [1, 2, 3]
cq, r2 = qr_multiply(a, c)
assert_array_almost_equal(c @ q, cq)
assert_array_almost_equal(r, r2)
cq, r = qr_multiply(a, eye(3))
assert_array_almost_equal(cq, q)
def test_simple_tall_right_pivoting(self):
a = [[8, 2], [2, 9], [5, 3]]
q, r, jpvt = qr(a, pivoting=True, mode="economic")
c = [1, 2, 3]
cq, r, jpvt = qr_multiply(a, c, pivoting=True)
assert_array_almost_equal(c @ q, cq)
cq, r, jpvt = qr_multiply(a, eye(3), pivoting=True)
assert_array_almost_equal(cq, q)
def test_simple_fat(self):
# full version
a = [[8, 2, 5], [2, 9, 3]]
q, r = qr(a)
assert_array_almost_equal(q.T @ q, eye(2))
assert_array_almost_equal(q @ r, a)
assert_equal(q.shape, (2, 2))
assert_equal(r.shape, (2, 3))
def test_simple_fat_pivoting(self):
# full version pivoting
a = np.asarray([[8, 2, 5], [2, 9, 3]])
q, r, p = qr(a, pivoting=True)
d = abs(diag(r))
assert_(np.all(d[1:] <= d[:-1]))
assert_array_almost_equal(q.T @ q, eye(2))
assert_array_almost_equal(q @ r, a[:, p])
assert_equal(q.shape, (2, 2))
assert_equal(r.shape, (2, 3))
q2, r2 = qr(a[:, p])
assert_array_almost_equal(q, q2)
assert_array_almost_equal(r, r2)
def test_simple_fat_e(self):
# economy version
a = [[8, 2, 3], [2, 9, 5]]
q, r = qr(a, mode='economic')
assert_array_almost_equal(q.T @ q, eye(2))
assert_array_almost_equal(q @ r, a)
assert_equal(q.shape, (2, 2))
assert_equal(r.shape, (2, 3))
def test_simple_fat_e_pivoting(self):
# economy version pivoting
a = np.asarray([[8, 2, 3], [2, 9, 5]])
q, r, p = qr(a, pivoting=True, mode='economic')
d = abs(diag(r))
assert_(np.all(d[1:] <= d[:-1]))
assert_array_almost_equal(q.T @ q, eye(2))
assert_array_almost_equal(q @ r, a[:, p])
assert_equal(q.shape, (2, 2))
assert_equal(r.shape, (2, 3))
q2, r2 = qr(a[:, p], mode='economic')
assert_array_almost_equal(q, q2)
assert_array_almost_equal(r, r2)
def test_simple_fat_left(self):
a = [[8, 2, 3], [2, 9, 5]]
q, r = qr(a, mode="economic")
c = [1, 2]
qc, r2 = qr_multiply(a, c, "left")
assert_array_almost_equal(q @ c, qc)
assert_array_almost_equal(r, r2)
qc, r = qr_multiply(a, eye(2), "left")
assert_array_almost_equal(qc, q)
def test_simple_fat_left_pivoting(self):
a = [[8, 2, 3], [2, 9, 5]]
q, r, jpvt = qr(a, mode="economic", pivoting=True)
c = [1, 2]
qc, r, jpvt = qr_multiply(a, c, "left", True)
assert_array_almost_equal(q @ c, qc)
qc, r, jpvt = qr_multiply(a, eye(2), "left", True)
assert_array_almost_equal(qc, q)
def test_simple_fat_right(self):
a = [[8, 2, 3], [2, 9, 5]]
q, r = qr(a, mode="economic")
c = [1, 2]
cq, r2 = qr_multiply(a, c)
assert_array_almost_equal(c @ q, cq)
assert_array_almost_equal(r, r2)
cq, r = qr_multiply(a, eye(2))
assert_array_almost_equal(cq, q)
def test_simple_fat_right_pivoting(self):
a = [[8, 2, 3], [2, 9, 5]]
q, r, jpvt = qr(a, pivoting=True, mode="economic")
c = [1, 2]
cq, r, jpvt = qr_multiply(a, c, pivoting=True)
assert_array_almost_equal(c @ q, cq)
cq, r, jpvt = qr_multiply(a, eye(2), pivoting=True)
assert_array_almost_equal(cq, q)
def test_simple_complex(self):
a = [[3, 3+4j, 5], [5, 2, 2+7j], [3, 2, 7]]
q, r = qr(a)
assert_array_almost_equal(q.conj().T @ q, eye(3))
assert_array_almost_equal(q @ r, a)
def test_simple_complex_left(self):
a = [[3, 3+4j, 5], [5, 2, 2+7j], [3, 2, 7]]
q, r = qr(a)
c = [1, 2, 3+4j]
qc, r = qr_multiply(a, c, "left")
assert_array_almost_equal(q @ c, qc)
qc, r = qr_multiply(a, eye(3), "left")
assert_array_almost_equal(q, qc)
def test_simple_complex_right(self):
a = [[3, 3+4j, 5], [5, 2, 2+7j], [3, 2, 7]]
q, r = qr(a)
c = [1, 2, 3+4j]
qc, r = qr_multiply(a, c)
assert_array_almost_equal(c @ q, qc)
qc, r = qr_multiply(a, eye(3))
assert_array_almost_equal(q, qc)
def test_simple_tall_complex_left(self):
a = [[8, 2+3j], [2, 9], [5+7j, 3]]
q, r = qr(a, mode="economic")
c = [1, 2+2j]
qc, r2 = qr_multiply(a, c, "left")
assert_array_almost_equal(q @ c, qc)
assert_array_almost_equal(r, r2)
c = array([1, 2, 0])
qc, r2 = qr_multiply(a, c, "left", overwrite_c=True)
assert_array_almost_equal(q @ c[:2], qc)
qc, r = qr_multiply(a, eye(2), "left")
assert_array_almost_equal(qc, q)
def test_simple_complex_left_conjugate(self):
a = [[3, 3+4j, 5], [5, 2, 2+7j], [3, 2, 7]]
q, r = qr(a)
c = [1, 2, 3+4j]
qc, r = qr_multiply(a, c, "left", conjugate=True)
assert_array_almost_equal(q.conj() @ c, qc)
def test_simple_complex_tall_left_conjugate(self):
a = [[3, 3+4j], [5, 2+2j], [3, 2]]
q, r = qr(a, mode='economic')
c = [1, 3+4j]
qc, r = qr_multiply(a, c, "left", conjugate=True)
assert_array_almost_equal(q.conj() @ c, qc)
def test_simple_complex_right_conjugate(self):
a = [[3, 3+4j, 5], [5, 2, 2+7j], [3, 2, 7]]
q, r = qr(a)
c = np.array([1, 2, 3+4j])
qc, r = qr_multiply(a, c, conjugate=True)
assert_array_almost_equal(c @ q.conj(), qc)
def test_simple_complex_pivoting(self):
a = array([[3, 3+4j, 5], [5, 2, 2+7j], [3, 2, 7]])
q, r, p = qr(a, pivoting=True)
d = abs(diag(r))
assert_(np.all(d[1:] <= d[:-1]))
assert_array_almost_equal(q.conj().T @ q, eye(3))
assert_array_almost_equal(q @ r, a[:, p])
q2, r2 = qr(a[:, p])
assert_array_almost_equal(q, q2)
assert_array_almost_equal(r, r2)
def test_simple_complex_left_pivoting(self):
a = array([[3, 3+4j, 5], [5, 2, 2+7j], [3, 2, 7]])
q, r, jpvt = qr(a, pivoting=True)
c = [1, 2, 3+4j]
qc, r, jpvt = qr_multiply(a, c, "left", True)
assert_array_almost_equal(q @ c, qc)
def test_simple_complex_right_pivoting(self):
a = array([[3, 3+4j, 5], [5, 2, 2+7j], [3, 2, 7]])
q, r, jpvt = qr(a, pivoting=True)
c = [1, 2, 3+4j]
qc, r, jpvt = qr_multiply(a, c, pivoting=True)
assert_array_almost_equal(c @ q, qc)
def test_random(self):
rng = np.random.RandomState(1234)
n = 20
for k in range(2):
a = rng.random([n, n])
q, r = qr(a)
assert_array_almost_equal(q.T @ q, eye(n))
assert_array_almost_equal(q @ r, a)
def test_random_left(self):
rng = np.random.RandomState(1234)
n = 20
for k in range(2):
a = rng.random([n, n])
q, r = qr(a)
c = rng.random([n])
qc, r = qr_multiply(a, c, "left")
assert_array_almost_equal(q @ c, qc)
qc, r = qr_multiply(a, eye(n), "left")
assert_array_almost_equal(q, qc)
def test_random_right(self):
rng = np.random.RandomState(1234)
n = 20
for k in range(2):
a = rng.random([n, n])
q, r = qr(a)
c = rng.random([n])
cq, r = qr_multiply(a, c)
assert_array_almost_equal(c @ q, cq)
cq, r = qr_multiply(a, eye(n))
assert_array_almost_equal(q, cq)
def test_random_pivoting(self):
rng = np.random.RandomState(1234)
n = 20
for k in range(2):
a = rng.random([n, n])
q, r, p = qr(a, pivoting=True)
d = abs(diag(r))
assert_(np.all(d[1:] <= d[:-1]))
assert_array_almost_equal(q.T @ q, eye(n))
assert_array_almost_equal(q @ r, a[:, p])
q2, r2 = qr(a[:, p])
assert_array_almost_equal(q, q2)
assert_array_almost_equal(r, r2)
def test_random_tall(self):
rng = np.random.RandomState(1234)
# full version
m = 200
n = 100
for k in range(2):
a = rng.random([m, n])
q, r = qr(a)
assert_array_almost_equal(q.T @ q, eye(m))
assert_array_almost_equal(q @ r, a)
def test_random_tall_left(self):
rng = np.random.RandomState(1234)
# full version
m = 200
n = 100
for k in range(2):
a = rng.random([m, n])
q, r = qr(a, mode="economic")
c = rng.random([n])
qc, r = qr_multiply(a, c, "left")
assert_array_almost_equal(q @ c, qc)
qc, r = qr_multiply(a, eye(n), "left")
assert_array_almost_equal(qc, q)
def test_random_tall_right(self):
rng = np.random.RandomState(1234)
# full version
m = 200
n = 100
for k in range(2):
a = rng.random([m, n])
q, r = qr(a, mode="economic")
c = rng.random([m])
cq, r = qr_multiply(a, c)
assert_array_almost_equal(c @ q, cq)
cq, r = qr_multiply(a, eye(m))
assert_array_almost_equal(cq, q)
def test_random_tall_pivoting(self):
rng = np.random.RandomState(1234)
# full version pivoting
m = 200
n = 100
for k in range(2):
a = rng.random([m, n])
q, r, p = qr(a, pivoting=True)
d = abs(diag(r))
assert_(np.all(d[1:] <= d[:-1]))
assert_array_almost_equal(q.T @ q, eye(m))
assert_array_almost_equal(q @ r, a[:, p])
q2, r2 = qr(a[:, p])
assert_array_almost_equal(q, q2)
assert_array_almost_equal(r, r2)
def test_random_tall_e(self):
rng = np.random.RandomState(1234)
# economy version
m = 200
n = 100
for k in range(2):
a = rng.random([m, n])
q, r = qr(a, mode='economic')
assert_array_almost_equal(q.T @ q, eye(n))
assert_array_almost_equal(q @ r, a)
assert_equal(q.shape, (m, n))
assert_equal(r.shape, (n, n))
def test_random_tall_e_pivoting(self):
rng = np.random.RandomState(1234)
# economy version pivoting
m = 200
n = 100
for k in range(2):
a = rng.random([m, n])
q, r, p = qr(a, pivoting=True, mode='economic')
d = abs(diag(r))
assert_(np.all(d[1:] <= d[:-1]))
assert_array_almost_equal(q.T @ q, eye(n))
assert_array_almost_equal(q @ r, a[:, p])
assert_equal(q.shape, (m, n))
assert_equal(r.shape, (n, n))
q2, r2 = qr(a[:, p], mode='economic')
assert_array_almost_equal(q, q2)
assert_array_almost_equal(r, r2)
def test_random_trap(self):
rng = np.random.RandomState(1234)
m = 100
n = 200
for k in range(2):
a = rng.random([m, n])
q, r = qr(a)
assert_array_almost_equal(q.T @ q, eye(m))
assert_array_almost_equal(q @ r, a)
def test_random_trap_pivoting(self):
rng = np.random.RandomState(1234)
m = 100
n = 200
for k in range(2):
a = rng.random([m, n])
q, r, p = qr(a, pivoting=True)
d = abs(diag(r))
assert_(np.all(d[1:] <= d[:-1]))
assert_array_almost_equal(q.T @ q, eye(m))
assert_array_almost_equal(q @ r, a[:, p])
q2, r2 = qr(a[:, p])
assert_array_almost_equal(q, q2)
assert_array_almost_equal(r, r2)
def test_random_complex(self):
rng = np.random.RandomState(1234)
n = 20
for k in range(2):
a = rng.random([n, n]) + 1j*rng.random([n, n])
q, r = qr(a)
assert_array_almost_equal(q.conj().T @ q, eye(n))
assert_array_almost_equal(q @ r, a)
def test_random_complex_left(self):
rng = np.random.RandomState(1234)
n = 20
for k in range(2):
a = rng.random([n, n]) + 1j*rng.random([n, n])
q, r = qr(a)
c = rng.random([n]) + 1j*rng.random([n])
qc, r = qr_multiply(a, c, "left")
assert_array_almost_equal(q @ c, qc)
qc, r = qr_multiply(a, eye(n), "left")
assert_array_almost_equal(q, qc)
def test_random_complex_right(self):
rng = np.random.RandomState(1234)
n = 20
for k in range(2):
a = rng.random([n, n]) + 1j*rng.random([n, n])
q, r = qr(a)
c = rng.random([n]) + 1j*rng.random([n])
cq, r = qr_multiply(a, c)
assert_array_almost_equal(c @ q, cq)
cq, r = qr_multiply(a, eye(n))
assert_array_almost_equal(q, cq)
def test_random_complex_pivoting(self):
rng = np.random.RandomState(1234)
n = 20
for k in range(2):
a = rng.random([n, n]) + 1j*rng.random([n, n])
q, r, p = qr(a, pivoting=True)
d = abs(diag(r))
assert_(np.all(d[1:] <= d[:-1]))
assert_array_almost_equal(q.conj().T @ q, eye(n))
assert_array_almost_equal(q @ r, a[:, p])
q2, r2 = qr(a[:, p])
assert_array_almost_equal(q, q2)
assert_array_almost_equal(r, r2)
def test_check_finite(self):
a = [[8, 2, 3], [2, 9, 3], [5, 3, 6]]
q, r = qr(a, check_finite=False)
assert_array_almost_equal(q.T @ q, eye(3))
assert_array_almost_equal(q @ r, a)
def test_lwork(self):
a = [[8, 2, 3], [2, 9, 3], [5, 3, 6]]
# Get comparison values
q, r = qr(a, lwork=None)
# Test against minimum valid lwork
q2, r2 = qr(a, lwork=3)
assert_array_almost_equal(q2, q)
assert_array_almost_equal(r2, r)
# Test against larger lwork
q3, r3 = qr(a, lwork=10)
assert_array_almost_equal(q3, q)
assert_array_almost_equal(r3, r)
# Test against explicit lwork=-1
q4, r4 = qr(a, lwork=-1)
assert_array_almost_equal(q4, q)
assert_array_almost_equal(r4, r)
# Test against invalid lwork
assert_raises(Exception, qr, (a,), {'lwork': 0})
assert_raises(Exception, qr, (a,), {'lwork': 2})
@pytest.mark.parametrize("m", [0, 1, 2])
@pytest.mark.parametrize("n", [0, 1, 2])
@pytest.mark.parametrize("pivoting", [False, True])
@pytest.mark.parametrize('dtype', DTYPES)
def test_shape_dtype(self, m, n, pivoting, dtype):
k = min(m, n)
a = np.zeros((m, n), dtype=dtype)
q, r, *other = qr(a, pivoting=pivoting)
assert_equal(q.shape, (m, m))
assert_equal(q.dtype, dtype)
assert_equal(r.shape, (m, n))
assert_equal(r.dtype, dtype)
assert len(other) == (1 if pivoting else 0)
if pivoting:
p, = other
assert_equal(p.shape, (n,))
assert_equal(p.dtype, np.int32)
r, *other = qr(a, mode='r', pivoting=pivoting)
assert_equal(r.shape, (m, n))
assert_equal(r.dtype, dtype)
assert len(other) == (1 if pivoting else 0)
if pivoting:
p, = other
assert_equal(p.shape, (n,))
assert_equal(p.dtype, np.int32)
q, r, *other = qr(a, mode='economic', pivoting=pivoting)
assert_equal(q.shape, (m, k))
assert_equal(q.dtype, dtype)
assert_equal(r.shape, (k, n))
assert_equal(r.dtype, dtype)
assert len(other) == (1 if pivoting else 0)
if pivoting:
p, = other
assert_equal(p.shape, (n,))
assert_equal(p.dtype, np.int32)
(raw, tau), r, *other = qr(a, mode='raw', pivoting=pivoting)
assert_equal(raw.shape, (m, n))
assert_equal(raw.dtype, dtype)
assert_equal(tau.shape, (k,))
assert_equal(tau.dtype, dtype)
assert_equal(r.shape, (k, n))
assert_equal(r.dtype, dtype)
assert len(other) == (1 if pivoting else 0)
if pivoting:
p, = other
assert_equal(p.shape, (n,))
assert_equal(p.dtype, np.int32)
@pytest.mark.parametrize(("m", "n"), [(0, 0), (0, 2), (2, 0)])
def test_empty(self, m, n):
k = min(m, n)
a = np.empty((m, n))
q, r = qr(a)
assert_allclose(q, np.identity(m))
assert_allclose(r, np.empty((m, n)))
q, r, p = qr(a, pivoting=True)
assert_allclose(q, np.identity(m))
assert_allclose(r, np.empty((m, n)))
assert_allclose(p, np.arange(n))
r, = qr(a, mode='r')
assert_allclose(r, np.empty((m, n)))
q, r = qr(a, mode='economic')
assert_allclose(q, np.empty((m, k)))
assert_allclose(r, np.empty((k, n)))
(raw, tau), r = qr(a, mode='raw')
assert_allclose(raw, np.empty((m, n)))
assert_allclose(tau, np.empty((k,)))
assert_allclose(r, np.empty((k, n)))
def test_multiply_empty(self):
a = np.empty((0, 0))
c = np.empty((0, 0))
cq, r = qr_multiply(a, c)
assert_allclose(cq, np.empty((0, 0)))
a = np.empty((0, 2))
c = np.empty((2, 0))
cq, r = qr_multiply(a, c)
assert_allclose(cq, np.empty((2, 0)))
a = np.empty((2, 0))
c = np.empty((0, 2))
cq, r = qr_multiply(a, c)
assert_allclose(cq, np.empty((0, 2)))
| TestQR |
python | django__django | django/db/models/query.py | {
"start": 10095,
"end": 10514
} | class ____(BaseIterable):
"""
Iterable returned by QuerySet.values_list(flat=True) that yields single
values.
"""
def __iter__(self):
queryset = self.queryset
compiler = queryset.query.get_compiler(queryset.db)
for row in compiler.results_iter(
chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size
):
yield row[0]
| FlatValuesListIterable |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_dataform.py | {
"start": 13761,
"end": 14534
} | class ____:
@mock.patch(HOOK_STR)
def test_execute(self, hook_mock):
op = DataformRemoveDirectoryOperator(
task_id="remove-directory",
project_id=PROJECT_ID,
region=REGION,
repository_id=REPOSITORY_ID,
workspace_id=WORKSPACE_ID,
directory_path=DIRECTORY_PATH,
)
op.execute(context=mock.MagicMock())
hook_mock.return_value.remove_directory.assert_called_once_with(
project_id=PROJECT_ID,
region=REGION,
repository_id=REPOSITORY_ID,
workspace_id=WORKSPACE_ID,
path=DIRECTORY_PATH,
retry=DEFAULT,
timeout=None,
metadata=(),
)
| TestDataformRemoveDirectoryOperator |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/property7.py | {
"start": 102,
"end": 513
} | class ____:
def __init__(self):
return
@property
def value(self):
return 42
def __getattr__(self, name: str):
return 0
b1 = A.value
# This should generate an error because __getattr__
# is not applied to a class.
b2 = A.blah
b3 = A.value.fget
a = A()
c1 = a.value
c2 = a.blah
# This should generate an error because a.value is
# the property value.
c3 = a.value.fget
| A |
python | keras-team__keras | keras/src/backend/common/variables_test.py | {
"start": 32889,
"end": 33705
} | class ____(test_case.TestCase):
def test_invalid_bool(self):
"""Test converting a variable to boolean."""
v = backend.Variable(initializer=np.ones((2, 2)))
with self.assertRaisesRegex(
TypeError, "A Keras Variable cannot be used as a boolean."
):
bool(v)
def test_invalid_int(self):
v = backend.Variable(initializer=np.ones((2, 2)))
with self.assertRaisesRegex(
TypeError, "Only scalar arrays can be converted to Python scalars."
):
int(v)
def test_invalid_float(self):
v = backend.Variable(initializer=np.ones((2, 2)))
with self.assertRaisesRegex(
TypeError, "Only scalar arrays can be converted to Python scalars."
):
float(v)
| VariableOpsBehaviorTest |
python | conda__conda | conda/exceptions.py | {
"start": 5701,
"end": 6406
} | class ____(ClobberError):
def __init__(self, source_path: PathType, target_path: PathType, context: Context):
message = dals(
"""
Conda was asked to clobber an existing path.
source path: %(source_path)s
target path: %(target_path)s
"""
)
if context.path_conflict == PathConflict.prevent:
message += (
"Conda no longer clobbers existing paths without the use of the "
"--clobber option\n."
)
super().__init__(
message,
context.path_conflict,
target_path=target_path,
source_path=source_path,
)
| BasicClobberError |
python | scikit-learn__scikit-learn | sklearn/externals/_numpydoc/docscrape.py | {
"start": 2324,
"end": 2592
} | class ____(Exception):
def __str__(self):
message = self.args[0]
if hasattr(self, "docstring"):
message = f"{message} in {self.docstring!r}"
return message
Parameter = namedtuple("Parameter", ["name", "type", "desc"])
| ParseError |
python | chroma-core__chroma | chromadb/api/fastapi.py | {
"start": 1656,
"end": 28510
} | class ____(BaseHTTPClient, ServerAPI):
def __init__(self, system: System):
super().__init__(system)
system.settings.require("chroma_server_host")
system.settings.require("chroma_server_http_port")
self._opentelemetry_client = self.require(OpenTelemetryClient)
self._product_telemetry_client = self.require(ProductTelemetryClient)
self._settings = system.settings
self._api_url = FastAPI.resolve_url(
chroma_server_host=str(system.settings.chroma_server_host),
chroma_server_http_port=system.settings.chroma_server_http_port,
chroma_server_ssl_enabled=system.settings.chroma_server_ssl_enabled,
default_api_path=system.settings.chroma_server_api_default_path,
)
if self._settings.chroma_server_ssl_verify is not None:
self._session = httpx.Client(
timeout=None,
limits=self.http_limits,
verify=self._settings.chroma_server_ssl_verify,
)
else:
self._session = httpx.Client(timeout=None, limits=self.http_limits)
self._header = system.settings.chroma_server_headers or {}
self._header["Content-Type"] = "application/json"
self._header["User-Agent"] = (
"Chroma Python Client v"
+ __version__
+ " (https://github.com/chroma-core/chroma)"
)
if self._header is not None:
self._session.headers.update(self._header)
if system.settings.chroma_client_auth_provider:
self._auth_provider = self.require(ClientAuthProvider)
_headers = self._auth_provider.authenticate()
for header, value in _headers.items():
self._session.headers[header] = value.get_secret_value()
def _make_request(self, method: str, path: str, **kwargs: Dict[str, Any]) -> Any:
# If the request has json in kwargs, use orjson to serialize it,
# remove it from kwargs, and add it to the content parameter
# This is because httpx uses a slower json serializer
if "json" in kwargs:
data = orjson.dumps(kwargs.pop("json"), option=orjson.OPT_SERIALIZE_NUMPY)
kwargs["content"] = data
# Unlike requests, httpx does not automatically escape the path
escaped_path = urllib.parse.quote(path, safe="/", encoding=None, errors=None)
url = self._api_url + escaped_path
response = self._session.request(method, url, **cast(Any, kwargs))
BaseHTTPClient._raise_chroma_error(response)
return orjson.loads(response.text)
@trace_method("FastAPI.heartbeat", OpenTelemetryGranularity.OPERATION)
@override
def heartbeat(self) -> int:
"""Returns the current server time in nanoseconds to check if the server is alive"""
resp_json = self._make_request("get", "/heartbeat")
return int(resp_json["nanosecond heartbeat"])
# Migrated to rust in distributed.
@trace_method("FastAPI.create_database", OpenTelemetryGranularity.OPERATION)
@override
def create_database(
self,
name: str,
tenant: str = DEFAULT_TENANT,
) -> None:
"""Creates a database"""
self._make_request(
"post",
f"/tenants/{tenant}/databases",
json={"name": name},
)
# Migrated to rust in distributed.
@trace_method("FastAPI.get_database", OpenTelemetryGranularity.OPERATION)
@override
def get_database(
self,
name: str,
tenant: str = DEFAULT_TENANT,
) -> Database:
"""Returns a database"""
resp_json = self._make_request(
"get",
f"/tenants/{tenant}/databases/{name}",
)
return Database(
id=resp_json["id"], name=resp_json["name"], tenant=resp_json["tenant"]
)
@trace_method("FastAPI.delete_database", OpenTelemetryGranularity.OPERATION)
@override
def delete_database(
self,
name: str,
tenant: str = DEFAULT_TENANT,
) -> None:
"""Deletes a database"""
self._make_request(
"delete",
f"/tenants/{tenant}/databases/{name}",
)
@trace_method("FastAPI.list_databases", OpenTelemetryGranularity.OPERATION)
@override
def list_databases(
self,
limit: Optional[int] = None,
offset: Optional[int] = None,
tenant: str = DEFAULT_TENANT,
) -> Sequence[Database]:
"""Returns a list of all databases"""
json_databases = self._make_request(
"get",
f"/tenants/{tenant}/databases",
params=BaseHTTPClient._clean_params(
{
"limit": limit,
"offset": offset,
}
),
)
databases = [
Database(id=db["id"], name=db["name"], tenant=db["tenant"])
for db in json_databases
]
return databases
@trace_method("FastAPI.create_tenant", OpenTelemetryGranularity.OPERATION)
@override
def create_tenant(self, name: str) -> None:
self._make_request("post", "/tenants", json={"name": name})
@trace_method("FastAPI.get_tenant", OpenTelemetryGranularity.OPERATION)
@override
def get_tenant(self, name: str) -> Tenant:
resp_json = self._make_request("get", "/tenants/" + name)
return Tenant(name=resp_json["name"])
@trace_method("FastAPI.get_user_identity", OpenTelemetryGranularity.OPERATION)
@override
def get_user_identity(self) -> UserIdentity:
return UserIdentity(**self._make_request("get", "/auth/identity"))
@trace_method("FastAPI.list_collections", OpenTelemetryGranularity.OPERATION)
@override
def list_collections(
self,
limit: Optional[int] = None,
offset: Optional[int] = None,
tenant: str = DEFAULT_TENANT,
database: str = DEFAULT_DATABASE,
) -> Sequence[CollectionModel]:
"""Returns a list of all collections"""
json_collections = self._make_request(
"get",
f"/tenants/{tenant}/databases/{database}/collections",
params=BaseHTTPClient._clean_params(
{
"limit": limit,
"offset": offset,
}
),
)
collection_models = [
CollectionModel.from_json(json_collection)
for json_collection in json_collections
]
return collection_models
@trace_method("FastAPI.count_collections", OpenTelemetryGranularity.OPERATION)
@override
def count_collections(
self, tenant: str = DEFAULT_TENANT, database: str = DEFAULT_DATABASE
) -> int:
"""Returns a count of collections"""
resp_json = self._make_request(
"get",
f"/tenants/{tenant}/databases/{database}/collections_count",
)
return cast(int, resp_json)
@trace_method("FastAPI.create_collection", OpenTelemetryGranularity.OPERATION)
@override
def create_collection(
self,
name: str,
schema: Optional[Schema] = None,
configuration: Optional[CreateCollectionConfiguration] = None,
metadata: Optional[CollectionMetadata] = None,
get_or_create: bool = False,
tenant: str = DEFAULT_TENANT,
database: str = DEFAULT_DATABASE,
) -> CollectionModel:
"""Creates a collection"""
config_json = (
create_collection_configuration_to_json(configuration, metadata)
if configuration
else None
)
serialized_schema = schema.serialize_to_json() if schema else None
resp_json = self._make_request(
"post",
f"/tenants/{tenant}/databases/{database}/collections",
json={
"name": name,
"metadata": metadata,
"configuration": config_json,
"schema": serialized_schema,
"get_or_create": get_or_create,
},
)
model = CollectionModel.from_json(resp_json)
return model
@trace_method("FastAPI.get_collection", OpenTelemetryGranularity.OPERATION)
@override
def get_collection(
self,
name: str,
tenant: str = DEFAULT_TENANT,
database: str = DEFAULT_DATABASE,
) -> CollectionModel:
"""Returns a collection"""
resp_json = self._make_request(
"get",
f"/tenants/{tenant}/databases/{database}/collections/{name}",
)
model = CollectionModel.from_json(resp_json)
return model
@trace_method(
"FastAPI.get_or_create_collection", OpenTelemetryGranularity.OPERATION
)
@override
def get_or_create_collection(
self,
name: str,
schema: Optional[Schema] = None,
configuration: Optional[CreateCollectionConfiguration] = None,
metadata: Optional[CollectionMetadata] = None,
tenant: str = DEFAULT_TENANT,
database: str = DEFAULT_DATABASE,
) -> CollectionModel:
return self.create_collection(
name=name,
metadata=metadata,
configuration=configuration,
schema=schema,
get_or_create=True,
tenant=tenant,
database=database,
)
@trace_method("FastAPI._modify", OpenTelemetryGranularity.OPERATION)
@override
def _modify(
self,
id: UUID,
new_name: Optional[str] = None,
new_metadata: Optional[CollectionMetadata] = None,
new_configuration: Optional[UpdateCollectionConfiguration] = None,
tenant: str = DEFAULT_TENANT,
database: str = DEFAULT_DATABASE,
) -> None:
"""Updates a collection"""
self._make_request(
"put",
f"/tenants/{tenant}/databases/{database}/collections/{id}",
json={
"new_metadata": new_metadata,
"new_name": new_name,
"new_configuration": update_collection_configuration_to_json(
new_configuration
)
if new_configuration
else None,
},
)
@trace_method("FastAPI._fork", OpenTelemetryGranularity.OPERATION)
@override
def _fork(
self,
collection_id: UUID,
new_name: str,
tenant: str = DEFAULT_TENANT,
database: str = DEFAULT_DATABASE,
) -> CollectionModel:
"""Forks a collection"""
resp_json = self._make_request(
"post",
f"/tenants/{tenant}/databases/{database}/collections/{collection_id}/fork",
json={"new_name": new_name},
)
model = CollectionModel.from_json(resp_json)
return model
@trace_method("FastAPI._search", OpenTelemetryGranularity.OPERATION)
@override
def _search(
self,
collection_id: UUID,
searches: List[Search],
tenant: str = DEFAULT_TENANT,
database: str = DEFAULT_DATABASE,
) -> SearchResult:
"""Performs hybrid search on a collection"""
# Convert Search objects to dictionaries
payload = {"searches": [s.to_dict() for s in searches]}
resp_json = self._make_request(
"post",
f"/tenants/{tenant}/databases/{database}/collections/{collection_id}/search",
json=payload,
)
# Deserialize metadatas: convert transport format to SparseVector instances
metadata_batches = resp_json.get("metadatas", None)
if metadata_batches is not None:
# SearchResult has nested structure: List[Optional[List[Optional[Metadata]]]]
resp_json["metadatas"] = [
[
deserialize_metadata(metadata) if metadata is not None else None
for metadata in metadatas
]
if metadatas is not None
else None
for metadatas in metadata_batches
]
return SearchResult(resp_json)
@trace_method("FastAPI.delete_collection", OpenTelemetryGranularity.OPERATION)
@override
def delete_collection(
self,
name: str,
tenant: str = DEFAULT_TENANT,
database: str = DEFAULT_DATABASE,
) -> None:
"""Deletes a collection"""
self._make_request(
"delete",
f"/tenants/{tenant}/databases/{database}/collections/{name}",
)
@trace_method("FastAPI._count", OpenTelemetryGranularity.OPERATION)
@override
def _count(
self,
collection_id: UUID,
tenant: str = DEFAULT_TENANT,
database: str = DEFAULT_DATABASE,
) -> int:
"""Returns the number of embeddings in the database"""
resp_json = self._make_request(
"get",
f"/tenants/{tenant}/databases/{database}/collections/{collection_id}/count",
)
return cast(int, resp_json)
@trace_method("FastAPI._peek", OpenTelemetryGranularity.OPERATION)
@override
def _peek(
self,
collection_id: UUID,
n: int = 10,
tenant: str = DEFAULT_TENANT,
database: str = DEFAULT_DATABASE,
) -> GetResult:
return cast(
GetResult,
self._get(
collection_id,
tenant=tenant,
database=database,
limit=n,
include=IncludeMetadataDocumentsEmbeddings,
),
)
@trace_method("FastAPI._get", OpenTelemetryGranularity.OPERATION)
@override
def _get(
self,
collection_id: UUID,
ids: Optional[IDs] = None,
where: Optional[Where] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
where_document: Optional[WhereDocument] = None,
include: Include = IncludeMetadataDocuments,
tenant: str = DEFAULT_TENANT,
database: str = DEFAULT_DATABASE,
) -> GetResult:
# Servers do not support receiving "data", as that is hydrated by the client as a loadable
filtered_include = [i for i in include if i != "data"]
resp_json = self._make_request(
"post",
f"/tenants/{tenant}/databases/{database}/collections/{collection_id}/get",
json={
"ids": ids,
"where": where,
"limit": limit,
"offset": offset,
"where_document": where_document,
"include": filtered_include,
},
)
# Deserialize metadatas: convert transport format to SparseVector instances
metadatas = resp_json.get("metadatas", None)
if metadatas is not None:
metadatas = [
deserialize_metadata(metadata) if metadata is not None else None
for metadata in metadatas
]
return GetResult(
ids=resp_json["ids"],
embeddings=resp_json.get("embeddings", None),
metadatas=metadatas,
documents=resp_json.get("documents", None),
data=None,
uris=resp_json.get("uris", None),
included=include,
)
@trace_method("FastAPI._delete", OpenTelemetryGranularity.OPERATION)
@override
def _delete(
self,
collection_id: UUID,
ids: Optional[IDs] = None,
where: Optional[Where] = None,
where_document: Optional[WhereDocument] = None,
tenant: str = DEFAULT_TENANT,
database: str = DEFAULT_DATABASE,
) -> None:
"""Deletes embeddings from the database"""
self._make_request(
"post",
f"/tenants/{tenant}/databases/{database}/collections/{collection_id}/delete",
json={
"ids": ids,
"where": where,
"where_document": where_document,
},
)
return None
@trace_method("FastAPI._submit_batch", OpenTelemetryGranularity.ALL)
def _submit_batch(
self,
batch: Tuple[
IDs,
Optional[Embeddings],
Optional[Metadatas],
Optional[Documents],
Optional[URIs],
],
url: str,
) -> None:
"""
Submits a batch of embeddings to the database
"""
# Serialize metadatas: convert SparseVector instances to transport format
serialized_metadatas = None
if batch[2] is not None:
serialized_metadatas = [
serialize_metadata(metadata) if metadata is not None else None
for metadata in batch[2]
]
data = {
"ids": batch[0],
"embeddings": optional_embeddings_to_base64_strings(batch[1])
if self.supports_base64_encoding()
else batch[1],
"metadatas": serialized_metadatas,
"documents": batch[3],
"uris": batch[4],
}
self._make_request("post", url, json=data)
@trace_method("FastAPI._add", OpenTelemetryGranularity.ALL)
@override
def _add(
self,
ids: IDs,
collection_id: UUID,
embeddings: Embeddings,
metadatas: Optional[Metadatas] = None,
documents: Optional[Documents] = None,
uris: Optional[URIs] = None,
tenant: str = DEFAULT_TENANT,
database: str = DEFAULT_DATABASE,
) -> bool:
"""
Adds a batch of embeddings to the database
- pass in column oriented data lists
"""
batch = (
ids,
embeddings,
metadatas,
documents,
uris,
)
validate_batch(batch, {"max_batch_size": self.get_max_batch_size()})
self._submit_batch(
batch,
f"/tenants/{tenant}/databases/{database}/collections/{str(collection_id)}/add",
)
return True
@trace_method("FastAPI._update", OpenTelemetryGranularity.ALL)
@override
def _update(
self,
collection_id: UUID,
ids: IDs,
embeddings: Optional[Embeddings] = None,
metadatas: Optional[Metadatas] = None,
documents: Optional[Documents] = None,
uris: Optional[URIs] = None,
tenant: str = DEFAULT_TENANT,
database: str = DEFAULT_DATABASE,
) -> bool:
"""
Updates a batch of embeddings in the database
- pass in column oriented data lists
"""
batch = (
ids,
embeddings if embeddings is not None else None,
metadatas,
documents,
uris,
)
validate_batch(batch, {"max_batch_size": self.get_max_batch_size()})
self._submit_batch(
batch,
f"/tenants/{tenant}/databases/{database}/collections/{str(collection_id)}/update",
)
return True
@trace_method("FastAPI._upsert", OpenTelemetryGranularity.ALL)
@override
def _upsert(
self,
collection_id: UUID,
ids: IDs,
embeddings: Embeddings,
metadatas: Optional[Metadatas] = None,
documents: Optional[Documents] = None,
uris: Optional[URIs] = None,
tenant: str = DEFAULT_TENANT,
database: str = DEFAULT_DATABASE,
) -> bool:
"""
Upserts a batch of embeddings in the database
- pass in column oriented data lists
"""
batch = (
ids,
embeddings,
metadatas,
documents,
uris,
)
validate_batch(batch, {"max_batch_size": self.get_max_batch_size()})
self._submit_batch(
batch,
f"/tenants/{tenant}/databases/{database}/collections/{str(collection_id)}/upsert",
)
return True
@trace_method("FastAPI._query", OpenTelemetryGranularity.ALL)
@override
def _query(
self,
collection_id: UUID,
query_embeddings: Embeddings,
ids: Optional[IDs] = None,
n_results: int = 10,
where: Optional[Where] = None,
where_document: Optional[WhereDocument] = None,
include: Include = IncludeMetadataDocumentsDistances,
tenant: str = DEFAULT_TENANT,
database: str = DEFAULT_DATABASE,
) -> QueryResult:
# Clients do not support receiving "data", as that is hydrated by the client as a loadable
filtered_include = [i for i in include if i != "data"]
"""Gets the nearest neighbors of a single embedding"""
resp_json = self._make_request(
"post",
f"/tenants/{tenant}/databases/{database}/collections/{collection_id}/query",
json={
"ids": ids,
"query_embeddings": convert_np_embeddings_to_list(query_embeddings)
if query_embeddings is not None
else None,
"n_results": n_results,
"where": where,
"where_document": where_document,
"include": filtered_include,
},
)
# Deserialize metadatas: convert transport format to SparseVector instances
metadata_batches = resp_json.get("metadatas", None)
if metadata_batches is not None:
metadata_batches = [
[
deserialize_metadata(metadata) if metadata is not None else None
for metadata in metadatas
]
if metadatas is not None
else None
for metadatas in metadata_batches
]
return QueryResult(
ids=resp_json["ids"],
distances=resp_json.get("distances", None),
embeddings=resp_json.get("embeddings", None),
metadatas=metadata_batches,
documents=resp_json.get("documents", None),
uris=resp_json.get("uris", None),
data=None,
included=include,
)
@trace_method("FastAPI.reset", OpenTelemetryGranularity.ALL)
@override
def reset(self) -> bool:
"""Resets the database"""
resp_json = self._make_request("post", "/reset")
return cast(bool, resp_json)
@trace_method("FastAPI.get_version", OpenTelemetryGranularity.OPERATION)
@override
def get_version(self) -> str:
"""Returns the version of the server"""
resp_json = self._make_request("get", "/version")
return cast(str, resp_json)
@override
def get_settings(self) -> Settings:
"""Returns the settings of the client"""
return self._settings
@trace_method("FastAPI.get_pre_flight_checks", OpenTelemetryGranularity.OPERATION)
def get_pre_flight_checks(self) -> Any:
if self.pre_flight_checks is None:
resp_json = self._make_request("get", "/pre-flight-checks")
self.pre_flight_checks = resp_json
return self.pre_flight_checks
@trace_method(
"FastAPI.supports_base64_encoding", OpenTelemetryGranularity.OPERATION
)
def supports_base64_encoding(self) -> bool:
pre_flight_checks = self.get_pre_flight_checks()
b64_encoding_enabled = cast(
bool, pre_flight_checks.get("supports_base64_encoding", False)
)
return b64_encoding_enabled
@trace_method("FastAPI.get_max_batch_size", OpenTelemetryGranularity.OPERATION)
@override
def get_max_batch_size(self) -> int:
pre_flight_checks = self.get_pre_flight_checks()
max_batch_size = cast(int, pre_flight_checks.get("max_batch_size", -1))
return max_batch_size
@trace_method("FastAPI.attach_function", OpenTelemetryGranularity.ALL)
@override
def attach_function(
self,
function_id: str,
name: str,
input_collection_id: UUID,
output_collection: str,
params: Optional[Dict[str, Any]] = None,
tenant: str = DEFAULT_TENANT,
database: str = DEFAULT_DATABASE,
) -> "AttachedFunction":
"""Attach a function to a collection."""
resp_json = self._make_request(
"post",
f"/tenants/{tenant}/databases/{database}/collections/{input_collection_id}/functions/attach",
json={
"name": name,
"function_id": function_id,
"output_collection": output_collection,
"params": params,
},
)
return AttachedFunction(
client=self,
id=UUID(resp_json["attached_function"]["id"]),
name=resp_json["attached_function"]["name"],
function_name=resp_json["attached_function"]["function_name"],
input_collection_id=input_collection_id,
output_collection=output_collection,
params=params,
tenant=tenant,
database=database,
)
@trace_method("FastAPI.get_attached_function", OpenTelemetryGranularity.ALL)
@override
def get_attached_function(
self,
name: str,
input_collection_id: UUID,
tenant: str = DEFAULT_TENANT,
database: str = DEFAULT_DATABASE,
) -> "AttachedFunction":
"""Get an attached function by name for a specific collection."""
resp_json = self._make_request(
"get",
f"/tenants/{tenant}/databases/{database}/collections/{input_collection_id}/functions/{name}",
)
af = resp_json["attached_function"]
return AttachedFunction(
client=self,
id=UUID(af["id"]),
name=af["name"],
function_name=af["function_name"],
input_collection_id=input_collection_id,
output_collection=af["output_collection"],
params=af.get("params"),
tenant=tenant,
database=database,
)
@trace_method("FastAPI.detach_function", OpenTelemetryGranularity.ALL)
@override
def detach_function(
self,
attached_function_id: UUID,
input_collection_id: UUID,
delete_output: bool = False,
tenant: str = DEFAULT_TENANT,
database: str = DEFAULT_DATABASE,
) -> bool:
"""Detach a function and prevent any further runs."""
resp_json = self._make_request(
"post",
f"/tenants/{tenant}/databases/{database}/attached_functions/{attached_function_id}/detach",
json={
"delete_output": delete_output,
"input_collection_id": str(input_collection_id),
},
)
return cast(bool, resp_json["success"])
| FastAPI |
python | catalyst-team__catalyst | catalyst/contrib/layers/se.py | {
"start": 2350,
"end": 3483
} | class ____(nn.Module): # noqa: N801
"""
The scSE (Concurrent Spatial and Channel Squeeze and Channel Excitation)
block from the `Concurrent Spatial and Channel ‘Squeeze & Excitation’
in Fully Convolutional Networks`__ paper.
Adapted from
https://www.kaggle.com/c/tgs-salt-identification-challenge/discussion/66178
Shape:
- Input: (batch, channels, height, width)
- Output: (batch, channels, height, width) (same shape as input)
__ https://arxiv.org/abs/1803.02579
"""
def __init__(self, in_channels: int, r: int = 16):
"""
Args:
in_channels: The number of channels
in the feature map of the input.
r: The reduction ratio of the intermediate channels.
Default: 16.
"""
super().__init__()
self.cse_block = cSE(in_channels, r)
self.sse_block = sSE(in_channels)
def forward(self, x: torch.Tensor):
"""Forward call."""
cse = self.cse_block(x)
sse = self.sse_block(x)
x = torch.add(cse, sse)
return x
__all__ = ["sSE", "scSE", "cSE"]
| scSE |
python | nedbat__coveragepy | coverage/sqlitedb.py | {
"start": 502,
"end": 10024
} | class ____:
"""A simple abstraction over a SQLite database.
Use as a context manager, then you can use it like a
:class:`python:sqlite3.Connection` object::
with SqliteDb(filename, debug_control) as db:
with db.execute("select a, b from some_table") as cur:
for a, b in cur:
etc(a, b)
"""
def __init__(self, filename: str, debug: TDebugCtl, no_disk: bool = False) -> None:
self.debug = debug
self.filename = filename
self.no_disk = no_disk
self.nest = 0
self.con: sqlite3.Connection | None = None
__repr__ = auto_repr
def _connect(self) -> None:
"""Connect to the db and do universal initialization."""
if self.con is not None:
return
# It can happen that Python switches threads while the tracer writes
# data. The second thread will also try to write to the data,
# effectively causing a nested context. However, given the idempotent
# nature of the tracer operations, sharing a connection among threads
# is not a problem.
if self.debug.should("sql"):
self.debug.write(f"Connecting to {self.filename!r}")
try:
# Use uri=True when connecting to memory URIs
if self.filename.startswith("file:"):
self.con = sqlite3.connect(self.filename, check_same_thread=False, uri=True)
else:
self.con = sqlite3.connect(self.filename, check_same_thread=False)
except sqlite3.Error as exc:
raise DataError(f"Couldn't use data file {self.filename!r}: {exc}") from exc
if self.debug.should("sql"):
self.debug.write(f"Connected to {self.filename!r} as {self.con!r}")
self.con.create_function("REGEXP", 2, lambda txt, pat: re.search(txt, pat) is not None)
# Turning off journal_mode can speed up writing. It can't always be
# disabled, so we have to be prepared for *-journal files elsewhere.
# In Python 3.12+, we can change the config to allow journal_mode=off.
if hasattr(sqlite3, "SQLITE_DBCONFIG_DEFENSIVE"):
# Turn off defensive mode, so that journal_mode=off can succeed.
self.con.setconfig( # type: ignore[attr-defined, unused-ignore]
sqlite3.SQLITE_DBCONFIG_DEFENSIVE,
False,
)
# This pragma makes writing faster. It disables rollbacks, but we never need them.
self.execute_void("pragma journal_mode=off")
# This pragma makes writing faster. It can fail in unusual situations
# (https://github.com/coveragepy/coveragepy/issues/1646), so use fail_ok=True
# to keep things going.
self.execute_void("pragma synchronous=off", fail_ok=True)
def close(self, force: bool = False) -> None:
"""If needed, close the connection."""
if self.con is not None:
if force or not self.no_disk:
if self.debug.should("sql"):
self.debug.write(f"Closing {self.con!r} on {self.filename!r}")
self.con.close()
self.con = None
def __enter__(self) -> SqliteDb:
if self.nest == 0:
self._connect()
assert self.con is not None
self.con.__enter__()
self.nest += 1
return self
def __exit__(self, exc_type, exc_value, traceback) -> None: # type: ignore[no-untyped-def]
self.nest -= 1
if self.nest == 0:
try:
assert self.con is not None
self.con.__exit__(exc_type, exc_value, traceback)
self.close()
except Exception as exc:
if self.debug.should("sql"):
self.debug.write(f"EXCEPTION from __exit__: {exc_one_line(exc)}")
raise DataError(f"Couldn't end data file {self.filename!r}: {exc}") from exc
def _execute(self, sql: str, parameters: Iterable[Any]) -> sqlite3.Cursor:
"""Same as :meth:`python:sqlite3.Connection.execute`."""
if self.debug.should("sql"):
tail = f" with {parameters!r}" if parameters else ""
self.debug.write(f"Executing {sql!r}{tail}")
try:
assert self.con is not None
try:
return self.con.execute(sql, parameters) # type: ignore[arg-type]
except Exception:
# In some cases, an error might happen that isn't really an
# error. Try again immediately.
# https://github.com/coveragepy/coveragepy/issues/1010
return self.con.execute(sql, parameters) # type: ignore[arg-type]
except sqlite3.Error as exc:
msg = str(exc)
if not self.no_disk:
try:
# `execute` is the first thing we do with the database, so try
# hard to provide useful hints if something goes wrong now.
with open(self.filename, "rb") as bad_file:
cov4_sig = b"!coverage.py: This is a private format"
if bad_file.read(len(cov4_sig)) == cov4_sig:
msg = (
"Looks like a coverage 4.x data file. "
+ "Are you mixing versions of coverage?"
)
except Exception:
pass
if self.debug.should("sql"):
self.debug.write(f"EXCEPTION from execute: {exc_one_line(exc)}")
raise DataError(f"Couldn't use data file {self.filename!r}: {msg}") from exc
@contextlib.contextmanager
def execute(
self,
sql: str,
parameters: Iterable[Any] = (),
) -> Iterator[sqlite3.Cursor]:
"""Context managed :meth:`python:sqlite3.Connection.execute`.
Use with a ``with`` statement to auto-close the returned cursor.
"""
cur = self._execute(sql, parameters)
try:
yield cur
finally:
cur.close()
def execute_void(self, sql: str, parameters: Iterable[Any] = (), fail_ok: bool = False) -> None:
"""Same as :meth:`python:sqlite3.Connection.execute` when you don't need the cursor.
If `fail_ok` is True, then SQLite errors are ignored.
"""
try:
# PyPy needs the .close() calls here, or sqlite gets twisted up:
# https://bitbucket.org/pypy/pypy/issues/2872/default-isolation-mode-is-different-on
self._execute(sql, parameters).close()
except DataError:
if not fail_ok:
raise
def execute_for_rowid(self, sql: str, parameters: Iterable[Any] = ()) -> int:
"""Like execute, but returns the lastrowid."""
with self.execute(sql, parameters) as cur:
assert cur.lastrowid is not None
rowid: int = cur.lastrowid
if self.debug.should("sqldata"):
self.debug.write(f"Row id result: {rowid!r}")
return rowid
def execute_one(self, sql: str, parameters: Iterable[Any] = ()) -> tuple[Any, ...] | None:
"""Execute a statement and return the one row that results.
This is like execute(sql, parameters).fetchone(), except it is
correct in reading the entire result set. This will raise an
exception if more than one row results.
Returns a row, or None if there were no rows.
"""
with self.execute(sql, parameters) as cur:
rows = list(cur)
if len(rows) == 0:
return None
elif len(rows) == 1:
return cast(tuple[Any, ...], rows[0])
else:
raise AssertionError(f"SQL {sql!r} shouldn't return {len(rows)} rows")
def _executemany(self, sql: str, data: list[Any]) -> sqlite3.Cursor:
"""Same as :meth:`python:sqlite3.Connection.executemany`."""
if self.debug.should("sql"):
final = ":" if self.debug.should("sqldata") else ""
self.debug.write(f"Executing many {sql!r} with {len(data)} rows{final}")
if self.debug.should("sqldata"):
for i, row in enumerate(data):
self.debug.write(f"{i:4d}: {row!r}")
assert self.con is not None
try:
return self.con.executemany(sql, data)
except Exception:
# In some cases, an error might happen that isn't really an
# error. Try again immediately.
# https://github.com/coveragepy/coveragepy/issues/1010
return self.con.executemany(sql, data)
def executemany_void(self, sql: str, data: list[Any]) -> None:
"""Same as :meth:`python:sqlite3.Connection.executemany` when you don't need the cursor."""
self._executemany(sql, data).close()
def executescript(self, script: str) -> None:
"""Same as :meth:`python:sqlite3.Connection.executescript`."""
if self.debug.should("sql"):
self.debug.write(
"Executing script with {} chars: {}".format(
len(script),
clipped_repr(script, 100),
)
)
assert self.con is not None
self.con.executescript(script).close()
def dump(self) -> str:
"""Return a multi-line string, the SQL dump of the database."""
assert self.con is not None
return "\n".join(self.con.iterdump())
| SqliteDb |
python | allegroai__clearml | examples/hyperdatasets/create_doc_entries.py | {
"start": 1035,
"end": 8337
} | class ____(DataSubEntry):
def __init__(self, name: str, text: str, path: str, url: str):
super().__init__(
name=name,
source=url,
metadata={"text": text, "path": path, "url": url},
)
def parse_args() -> argparse.Namespace:
description = textwrap.dedent(__doc__ or "").strip()
parser = argparse.ArgumentParser(
description=description or None,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument("--project", default="HyperDatasets Examples", help="ClearML project name")
parser.add_argument("--dataset-name", required=True, help="HyperDataset collection name")
parser.add_argument("--version-name", required=True, help="HyperDataset version name")
parser.add_argument("--description", default="Documentation HyperDataset", help="Dataset description")
parser.add_argument(
"--doc-url",
action="append",
help="Remote Markdown file to ingest (can be supplied multiple times)",
)
parser.add_argument("--limit", type=int, default=100, help="Maximum number of documents to ingest")
parser.add_argument("--embed", action="store_true", help="Generate embeddings for each document")
parser.add_argument(
"--vector-field",
help="Metadata field used to store the embedding vector",
)
parser.add_argument(
"--embedding-model",
default="sentence-transformers/all-MiniLM-L6-v2",
help="SentenceTransformer model name or path for embeddings",
)
parser.add_argument(
"--embedding-device",
default=None,
help="Optional device string passed to SentenceTransformer (e.g. 'cpu', 'cuda')",
)
parser.add_argument(
"--normalize",
action="store_true",
help="L2-normalize generated embeddings",
)
return parser.parse_args()
DEFAULT_DOC_URLS = [
"https://github.com/clearml/clearml-docs/blob/main/docs/build_interactive_models.md",
"https://github.com/clearml/clearml-docs/blob/main/docs/clearml_agent.md",
"https://github.com/clearml/clearml-docs/blob/main/docs/community.md",
"https://github.com/clearml/clearml-docs/blob/main/docs/custom_apps.md",
"https://github.com/clearml/clearml-docs/blob/main/docs/deploying_models.md",
]
def load_markdown(path: Path) -> Tuple[str, str]:
text = path.read_text(encoding="utf-8", errors="ignore")
title = path.stem
for line in text.splitlines():
stripped = line.strip().lstrip("# ")
if stripped:
title = stripped
break
return title, text
def maybe_encode_embeddings(
documents: List[dict],
*,
model_name: str,
device: Optional[str],
normalize: bool,
) -> Optional[List[List[float]]]:
try:
from sentence_transformers import SentenceTransformer
except ImportError as exc: # pragma: no cover - optional dependency
raise RuntimeError("sentence-transformers is required for --embed") from exc
model = SentenceTransformer(model_name, device=device)
embeddings = model.encode([doc["content"] for doc in documents], normalize_embeddings=normalize)
return [list(map(float, emb)) for emb in embeddings]
def build_entries(
documents: List[dict],
embeddings: Optional[List[List[float]]],
vector_field: Optional[str],
) -> Tuple[List[DataEntry], Optional[int]]:
entries: List[DataEntry] = []
vector_dims: Optional[int] = None
for idx, doc in enumerate(documents):
metadata = {
"title": doc["title"],
"path": doc["relative_path"],
"url": doc["url"],
"size_bytes": len(doc["content"].encode("utf-8")),
"snippet": doc["snippet"],
}
entry = DataEntry(metadata=metadata)
entry.add_sub_entries(
[
MarkdownDataSubEntry(
name="document",
text=doc["content"],
path=doc["relative_path"],
url=doc["url"],
)
]
)
if embeddings:
if not vector_field:
raise ValueError("--vector-field must be provided when --embed is used")
vector = embeddings[idx]
entry.set_vector(vector, metadata_field=vector_field)
if vector_dims is None:
vector_dims = len(vector)
elif vector_dims != len(vector):
raise ValueError("All embedding vectors must share the same dimensionality")
entries.append(entry)
return entries, vector_dims
def main() -> None:
args = parse_args()
documents: List[dict] = []
urls = args.doc_url or DEFAULT_DOC_URLS
for url in urls:
try:
local_path = Path(StorageManager.get_local_copy(remote_url=url))
except Exception as exc:
raise RuntimeError(f"Failed downloading {url}: {exc}")
title, content = load_markdown(local_path)
snippet = textwrap.shorten(content.replace("\n", " "), width=240, placeholder="…")
documents.append(
{
"title": title,
"content": content,
"relative_path": local_path.name,
"snippet": snippet,
"url": url,
}
)
if args.limit and len(documents) >= args.limit:
break
if not documents:
raise ValueError(f"No Markdown files found under {docs_dir}")
embeddings = None
if args.embed:
if not args.vector_field:
raise ValueError("--vector-field must be provided when --embed is used")
embeddings = maybe_encode_embeddings(
documents,
model_name=args.embedding_model,
device=args.embedding_device,
normalize=args.normalize,
)
entries, vector_dims = build_entries(documents, embeddings, args.vector_field)
field_mappings = None
if vector_dims:
field_path = args.vector_field if args.vector_field.startswith("meta.") else f"meta.{args.vector_field}"
# ClearML vector field mapping so the backend knows about the dense embedding metadata
field_mappings = {
field_path: {
"type": "dense_vector",
"element_type": "float",
"dims": vector_dims,
}
}
# ClearML HyperDataset version handle (creates the version if needed)
dataset = HyperDataset(
project_name=args.project,
dataset_name=args.dataset_name,
version_name=args.version_name,
description=args.description,
field_mappings=field_mappings,
)
# Upload the assembled entries so ClearML manages storage/indexing
errors = dataset.add_data_entries(entries, upload_local_files_destination=None, force_upload=True)
if errors.get("register"):
raise RuntimeError(f"Failed registering entries: {errors['register']}")
print(
"Created HyperDataset version: project={project} dataset={dataset} version={version}".format(
project=dataset.project_id,
dataset=dataset.dataset_id,
version=dataset.version_id,
)
)
if __name__ == "__main__":
main()
| MarkdownDataSubEntry |
python | ray-project__ray | python/ray/util/collective/collective_group/nixl_backend.py | {
"start": 229,
"end": 5367
} | class ____:
"""Backend implementation for NIXL tensor transport.
This class provides functionality for transferring tensors using NIXL. It handles
initialization of the NIXL agent, receiving tensors, and managing NIXL metadata.
"""
def __init__(self):
"""Initialize the NIXL backend.
Creates a NIXL agent with UCX backend.
"""
agent_config = nixl_agent_config(backends=["UCX"])
ctx = ray.get_runtime_context()
actor_id = ctx.get_actor_id()
if actor_id is None:
# If the actor id is None, it means the current process is a driver.
import uuid
actor_id = f"RAY-DRIVER-{uuid.uuid4()}"
self._nixl_agent = nixl_agent(actor_id, agent_config)
self._aborted_transfer_obj_ids = set()
self._aborted_transfer_obj_ids_lock = threading.Lock()
@classmethod
def backend(cls):
"""Get the backend type.
Returns:
Backend.NIXL: The backend type enum value for NIXL.
"""
return Backend.NIXL
def recv(
self,
tensors: List["torch.Tensor"],
obj_id: str,
nixl_serialized_descs: bytes,
remote_nixl_agent_meta: bytes,
):
"""Receive tensors from a remote NIXL agent.
Args:
tensors: List of tensors to receive into.
obj_id: The object ID for related GPU object.
nixl_serialized_descs: Serialized NIXL descriptors for the remote tensors.
remote_nixl_agent_meta: Metadata about the remote NIXL agent.
Raises:
RuntimeError: If the NIXL transfer enters an error state.
"""
with self._aborted_transfer_obj_ids_lock:
if obj_id in self._aborted_transfer_obj_ids:
self._aborted_transfer_obj_ids.remove(obj_id)
raise RuntimeError(f"NIXL transfer aborted for object id: {obj_id}")
local_descs = None
remote_name = None
xfer_handle = None
try:
nixl_agent = self._nixl_agent
remote_descs = nixl_agent.deserialize_descs(nixl_serialized_descs)
local_descs = nixl_agent.register_memory(tensors)
remote_name = nixl_agent.add_remote_agent(remote_nixl_agent_meta)
xfer_handle = nixl_agent.initialize_xfer(
# "UUID" here is just a placeholder, can be any bytes, but without it,
# nixl will fail to transfer multiple times.
"READ",
local_descs.trim(),
remote_descs,
remote_name,
"UUID",
)
state = nixl_agent.transfer(xfer_handle)
if state == "ERR":
raise RuntimeError("NIXL transfer got to Error state.")
# Since current nixl does not provide a better way, we need to check the state of
# the transfer continuously.
while True:
state = nixl_agent.check_xfer_state(xfer_handle)
if state == "ERR":
raise RuntimeError("NIXL transfer got to Error state.")
if state == "PROC":
with self._aborted_transfer_obj_ids_lock:
if obj_id in self._aborted_transfer_obj_ids:
self._aborted_transfer_obj_ids.remove(obj_id)
raise RuntimeError(
f"NIXL transfer aborted for object id: {obj_id}"
)
time.sleep(0.001) # Avoid busy waiting
elif state == "DONE":
break
finally:
# We could raise errors or NIXL could raise errors like NIXL_ERR_REMOTE_DISCONNECT,
# so doing best effort cleanup.
with self._aborted_transfer_obj_ids_lock:
self._aborted_transfer_obj_ids.discard(obj_id)
if xfer_handle:
nixl_agent.release_xfer_handle(xfer_handle)
if remote_name:
nixl_agent.remove_remote_agent(remote_name)
if local_descs:
nixl_agent.deregister_memory(local_descs)
def get_nixl_metadata(
self, tensors: List["torch.Tensor"]
) -> Tuple[Any, bytes, bytes]:
"""Get NIXL metadata for a set of tensors.
Args:
tensors: List of tensors to get metadata for.
Returns:
tuple: A tuple containing:
- Serialized NIXL descriptors for the tensors
- Metadata about this NIXL agent
"""
nixl_agent = self._nixl_agent
reg_descs = nixl_agent.register_memory(tensors)
xfer_descs = reg_descs.trim()
return (
reg_descs,
nixl_agent.get_serialized_descs(xfer_descs),
nixl_agent.get_agent_metadata(),
)
def deregister_memory(self, descs: Any):
self._nixl_agent.deregister_memory(descs)
def abort(self, obj_id: str):
with self._aborted_transfer_obj_ids_lock:
self._aborted_transfer_obj_ids.add(obj_id)
| NixlBackend |
python | apache__thrift | test/crossrunner/test.py | {
"start": 893,
"end": 3454
} | class ____(object):
def __init__(self, kind, name, protocol, transport, socket, workdir, stop_signal, command, env=None,
extra_args=[], extra_args2=[], join_args=False, **kwargs):
self.kind = kind
self.name = name
self.protocol = protocol
self.transport = transport
self.socket = socket
self.workdir = workdir
self.stop_signal = stop_signal
self.command = None
self._base_command = self._fix_cmd_path(command)
if env:
self.env = copy.copy(os.environ)
self.env.update(env)
else:
self.env = os.environ
self._extra_args = extra_args
self._extra_args2 = extra_args2
self._join_args = join_args
def _fix_cmd_path(self, cmd):
# if the arg is a file in the current directory, make it path
def abs_if_exists(arg):
p = os.path.join(self.workdir, arg)
return p if os.path.exists(p) else arg
if cmd[0] == 'python':
cmd[0] = sys.executable
else:
cmd[0] = abs_if_exists(cmd[0])
return cmd
def _socket_args(self, socket, port):
support_socket_activation = self.kind == 'server' and sys.platform != "win32"
return {
'ip-ssl': ['--ssl'],
'domain': ['--domain-socket=%s' % domain_socket_path(port)],
'domain-socketactivated': (['--emulate-socketactivation'] if support_socket_activation else []) + ['--domain-socket=%s' % domain_socket_path(port)],
'abstract': ['--abstract-namespace', '--domain-socket=%s' % domain_socket_path(port)],
}.get(socket, None)
def _transport_args(self, transport):
return {
'zlib': ['--zlib'],
}.get(transport, None)
def build_command(self, port):
cmd = copy.copy(self._base_command)
args = copy.copy(self._extra_args2)
args.append('--protocol=' + self.protocol)
args.append('--transport=' + self.transport)
transport_args = self._transport_args(self.transport)
if transport_args:
args += transport_args
socket_args = self._socket_args(self.socket, port)
if socket_args:
args += socket_args
args.append('--port=%d' % port)
if self._join_args:
cmd.append('%s' % " ".join(args))
else:
cmd.extend(args)
if self._extra_args:
cmd.extend(self._extra_args)
self.command = cmd
return self.command
| TestProgram |
python | pypa__pip | src/pip/_vendor/rich/theme.py | {
"start": 155,
"end": 2570
} | class ____:
"""A container for style information, used by :class:`~rich.console.Console`.
Args:
styles (Dict[str, Style], optional): A mapping of style names on to styles. Defaults to None for a theme with no styles.
inherit (bool, optional): Inherit default styles. Defaults to True.
"""
styles: Dict[str, Style]
def __init__(
self, styles: Optional[Mapping[str, StyleType]] = None, inherit: bool = True
):
self.styles = DEFAULT_STYLES.copy() if inherit else {}
if styles is not None:
self.styles.update(
{
name: style if isinstance(style, Style) else Style.parse(style)
for name, style in styles.items()
}
)
@property
def config(self) -> str:
"""Get contents of a config file for this theme."""
config = "[styles]\n" + "\n".join(
f"{name} = {style}" for name, style in sorted(self.styles.items())
)
return config
@classmethod
def from_file(
cls, config_file: IO[str], source: Optional[str] = None, inherit: bool = True
) -> "Theme":
"""Load a theme from a text mode file.
Args:
config_file (IO[str]): An open conf file.
source (str, optional): The filename of the open file. Defaults to None.
inherit (bool, optional): Inherit default styles. Defaults to True.
Returns:
Theme: A New theme instance.
"""
config = configparser.ConfigParser()
config.read_file(config_file, source=source)
styles = {name: Style.parse(value) for name, value in config.items("styles")}
theme = Theme(styles, inherit=inherit)
return theme
@classmethod
def read(
cls, path: str, inherit: bool = True, encoding: Optional[str] = None
) -> "Theme":
"""Read a theme from a path.
Args:
path (str): Path to a config file readable by Python configparser module.
inherit (bool, optional): Inherit default styles. Defaults to True.
encoding (str, optional): Encoding of the config file. Defaults to None.
Returns:
Theme: A new theme instance.
"""
with open(path, encoding=encoding) as config_file:
return cls.from_file(config_file, source=path, inherit=inherit)
| Theme |
python | cython__cython | runtests.py | {
"start": 58849,
"end": 61112
} | class ____(CythonCompileTestCase):
def setUp(self):
CythonCompileTestCase.setUp(self)
from Cython.Compiler import Options
Options.clear_to_none = False
def description_name(self):
return self.name if self.cython_only else "and running %s" % self.name
def run(self, result=None):
if result is None:
result = self.defaultTestResult()
result.startTest(self)
try:
self.setUp()
try:
self.success = False
ext_so_path = self.runCompileTest()
failures, errors, skipped = len(result.failures), len(result.errors), len(result.skipped)
if not self.cython_only and ext_so_path is not None:
self.run_tests(result, ext_so_path)
self.runAbi3AuditTest()
if failures == len(result.failures) and errors == len(result.errors):
# No new errors...
self.success = True
finally:
check_thread_termination()
except SkipTest as exc:
result.addSkip(self, str(exc))
result.stopTest(self)
except Exception:
result.addError(self, sys.exc_info())
result.stopTest(self)
try:
self.tearDown()
except Exception:
pass
def run_tests(self, result, ext_so_path):
self.run_doctests(self.module, result, ext_so_path)
def run_doctests(self, module_or_name, result, ext_so_path):
def run_test(result):
if isinstance(module_or_name, basestring):
with self.stats.time(self.name, self.language, 'import'):
module = import_ext(module_or_name, ext_so_path)
else:
module = module_or_name
tests = doctest.DocTestSuite(module)
if self.test_selector:
filter_test_suite(tests, self.test_selector)
with self.stats.time(self.name, self.language, 'run'):
tests.run(result)
run_single_test(result, run_test)
def run_single_test(result, run_func):
run_func(result)
sys.stdout.flush()
sys.stderr.flush()
gc.collect()
| CythonRunTestCase |
python | Textualize__textual | tests/test_binding_inheritance.py | {
"start": 9238,
"end": 9607
} | class ____(Static, can_focus=True):
"""A widget that has its own bindings for the movement keys."""
BINDINGS = AppKeyRecorder.make_bindings("local_")
async def action_local_record(self, key: str) -> None:
# Sneaky forward reference. Just for the purposes of testing.
await self.app.action_record(f"locally_{key}")
| FocusableWidgetWithBindings |
python | google__jax | jax/_src/interpreters/pxla.py | {
"start": 48838,
"end": 50180
} | class ____(stages.Executable):
__slots__ = ["xla_executable", "_unsafe_call", "build_unsafe_call",
"fingerprint", "in_avals", "_unloaded_executable"]
def __init__(self, xla_executable, build_unsafe_call, fingerprint,
in_avals,
unloaded_executable: UnloadedPmapExecutable):
self.xla_executable = xla_executable
self._unsafe_call = None
self.build_unsafe_call = build_unsafe_call
self.fingerprint = fingerprint
self.in_avals = in_avals
self._unloaded_executable = unloaded_executable
@property
def unsafe_call(self) -> Callable[..., Any]:
if self._unsafe_call is None:
self._unsafe_call = self.build_unsafe_call()
return self._unsafe_call # type: ignore
# -- stages.Executable overrides
def xla_extension_executable(self):
return self.xla_executable
@profiler.annotate_function
def call(self, *args):
# TODO(frostig): do we need to check sharding and sharded avals?
arg_avals = map(core.abstractify, args)
check_arg_avals_for_call(self.in_avals, arg_avals,
self._unloaded_executable.jaxpr_debug_info)
return self.unsafe_call(*args) # pylint: disable=not-callable
def _get_pmap_sharding(devices, specs):
return [sharding_impls.PmapSharding(devices, spec) for spec in specs]
| PmapExecutable |
python | PyCQA__pylint | tests/functional/o/overridden_final_method_py38.py | {
"start": 447,
"end": 592
} | class ____:
create_final_method = True
if create_final_method:
@final
def my_method(self):
pass
| BaseConditional |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/lexers/base.py | {
"start": 1736,
"end": 2350
} | class ____(Lexer):
"""
Lexer class that can dynamically returns any Lexer.
:param get_lexer: Callable that returns a :class:`.Lexer` instance.
"""
def __init__(self, get_lexer: Callable[[], Lexer | None]) -> None:
self.get_lexer = get_lexer
self._dummy = SimpleLexer()
def lex_document(self, document: Document) -> Callable[[int], StyleAndTextTuples]:
lexer = self.get_lexer() or self._dummy
return lexer.lex_document(document)
def invalidation_hash(self) -> Hashable:
lexer = self.get_lexer() or self._dummy
return id(lexer)
| DynamicLexer |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/reconciliation.py | {
"start": 26938,
"end": 34866
} | class ____(AirbyteInstanceCacheableAssetsDefinition):
def __init__(
self,
airbyte_resource_def: AirbyteResource,
key_prefix: Sequence[str],
create_assets_for_normalization_tables: bool,
connection_meta_to_group_fn: Optional[Callable[[AirbyteConnectionMetadata], Optional[str]]],
connections: Iterable[AirbyteConnection],
connection_to_io_manager_key_fn: Optional[Callable[[str], Optional[str]]],
connection_to_asset_key_fn: Optional[Callable[[AirbyteConnectionMetadata, str], AssetKey]],
connection_to_freshness_policy_fn: Optional[
Callable[[AirbyteConnectionMetadata], Optional[LegacyFreshnessPolicy]]
],
):
defined_conn_names = {conn.name for conn in connections}
super().__init__(
airbyte_resource_def=airbyte_resource_def,
workspace_id=None,
key_prefix=key_prefix,
create_assets_for_normalization_tables=create_assets_for_normalization_tables,
connection_meta_to_group_fn=connection_meta_to_group_fn,
connection_to_io_manager_key_fn=connection_to_io_manager_key_fn,
connection_filter=lambda conn: conn.name in defined_conn_names,
connection_to_asset_key_fn=connection_to_asset_key_fn,
connection_to_freshness_policy_fn=connection_to_freshness_policy_fn,
)
self._connections: list[AirbyteConnection] = list(connections)
def _get_connections(self) -> Sequence[tuple[str, AirbyteConnectionMetadata]]:
diff = reconcile_config(self._airbyte_instance, self._connections, dry_run=True)
if isinstance(diff, ManagedElementDiff) and not diff.is_empty():
raise ValueError(
f"Airbyte connections are not in sync with provided configuration, diff:\n{diff!s}"
)
elif isinstance(diff, ManagedElementError):
raise ValueError(f"Error checking Airbyte connections: {diff}")
return super()._get_connections()
@beta
@deprecated(breaking_version="2.0", additional_warn_text=MANAGED_ELEMENTS_DEPRECATION_MSG)
def load_assets_from_connections(
airbyte: Union[AirbyteResource, ResourceDefinition],
connections: Iterable[AirbyteConnection],
key_prefix: Optional[CoercibleToAssetKeyPrefix] = None,
create_assets_for_normalization_tables: bool = True,
connection_to_group_fn: Optional[Callable[[str], Optional[str]]] = clean_name,
connection_meta_to_group_fn: Optional[
Callable[[AirbyteConnectionMetadata], Optional[str]]
] = None,
io_manager_key: Optional[str] = None,
connection_to_io_manager_key_fn: Optional[Callable[[str], Optional[str]]] = None,
connection_to_asset_key_fn: Optional[
Callable[[AirbyteConnectionMetadata, str], AssetKey]
] = None,
connection_to_freshness_policy_fn: Optional[
Callable[[AirbyteConnectionMetadata], Optional[LegacyFreshnessPolicy]]
] = None,
) -> CacheableAssetsDefinition:
"""Loads Airbyte connection assets from a configured AirbyteResource instance, checking against a list of AirbyteConnection objects.
This method will raise an error on repo load if the passed AirbyteConnection objects are not in sync with the Airbyte instance.
Args:
airbyte (Union[AirbyteResource, ResourceDefinition]): An AirbyteResource configured with the appropriate connection
details.
connections (Iterable[AirbyteConnection]): A list of AirbyteConnection objects to build assets for.
key_prefix (Optional[CoercibleToAssetKeyPrefix]): A prefix for the asset keys created.
create_assets_for_normalization_tables (bool): If True, assets will be created for tables
created by Airbyte's normalization feature. If False, only the destination tables
will be created. Defaults to True.
connection_to_group_fn (Optional[Callable[[str], Optional[str]]]): Function which returns an asset
group name for a given Airbyte connection name. If None, no groups will be created. Defaults
to a basic sanitization function.
connection_meta_to_group_fn (Optional[Callable[[AirbyteConnectionMetadata], Optional[str]]]): Function which
returns an asset group name for a given Airbyte connection metadata. If None and connection_to_group_fn
is None, no groups will be created. Defaults to None.
io_manager_key (Optional[str]): The IO manager key to use for all assets. Defaults to "io_manager".
Use this if all assets should be loaded from the same source, otherwise use connection_to_io_manager_key_fn.
connection_to_io_manager_key_fn (Optional[Callable[[str], Optional[str]]]): Function which returns an
IO manager key for a given Airbyte connection name. When other ops are downstream of the loaded assets,
the IOManager specified determines how the inputs to those ops are loaded. Defaults to "io_manager".
connection_to_asset_key_fn (Optional[Callable[[AirbyteConnectionMetadata, str], AssetKey]]): Optional function which
takes in connection metadata and table name and returns an asset key for the table. If None, the default asset
key is based on the table name. Any asset key prefix will be applied to the output of this function.
connection_to_freshness_policy_fn (Optional[Callable[[AirbyteConnectionMetadata], Optional[FreshnessPolicy]]]): Optional function which
takes in connection metadata and returns a freshness policy for the connection. If None, no freshness policy will be applied.
**Examples:**
.. code-block:: python
from dagster_airbyte import (
AirbyteConnection,
AirbyteResource,
load_assets_from_connections,
)
airbyte_instance = AirbyteResource(
host: "localhost",
port: "8000",
)
airbyte_connections = [
AirbyteConnection(...),
AirbyteConnection(...)
]
airbyte_assets = load_assets_from_connections(airbyte_instance, airbyte_connections)
"""
if isinstance(key_prefix, str):
key_prefix = [key_prefix]
key_prefix = check.list_param(key_prefix or [], "key_prefix", of_type=str)
check.invariant(
not io_manager_key or not connection_to_io_manager_key_fn,
"Cannot specify both io_manager_key and connection_to_io_manager_key_fn",
)
if not connection_to_io_manager_key_fn:
connection_to_io_manager_key_fn = lambda _: io_manager_key
check.invariant(
not connection_meta_to_group_fn
or not connection_to_group_fn
or connection_to_group_fn == clean_name,
"Cannot specify both connection_meta_to_group_fn and connection_to_group_fn",
)
if not connection_meta_to_group_fn and connection_to_group_fn:
connection_meta_to_group_fn = lambda meta: connection_to_group_fn(meta.name)
return AirbyteManagedElementCacheableAssetsDefinition(
airbyte_resource_def=(
airbyte
if isinstance(airbyte, AirbyteResource)
else airbyte(build_init_resource_context())
),
key_prefix=key_prefix,
create_assets_for_normalization_tables=check.bool_param(
create_assets_for_normalization_tables, "create_assets_for_normalization_tables"
),
connection_meta_to_group_fn=check.opt_callable_param(
connection_meta_to_group_fn, "connection_meta_to_group_fn"
),
connection_to_io_manager_key_fn=connection_to_io_manager_key_fn,
connections=check.iterable_param(connections, "connections", of_type=AirbyteConnection),
connection_to_asset_key_fn=connection_to_asset_key_fn,
connection_to_freshness_policy_fn=connection_to_freshness_policy_fn,
)
| AirbyteManagedElementCacheableAssetsDefinition |
python | bokeh__bokeh | tests/unit/bokeh/test_objects.py | {
"start": 4115,
"end": 4166
} | class ____(Model):
some = Int(default=0)
| SomeModel |
python | donnemartin__interactive-coding-challenges | online_judges/island_perimeter/test_island_perimeter.py | {
"start": 18,
"end": 693
} | class ____(unittest.TestCase):
def test_island_perimeter(self):
solution = Solution()
self.assertRaises(TypeError, solution.island_perimeter, None)
data = [[1, 0]]
expected = 4
self.assertEqual(solution.island_perimeter(data), expected)
data = [[0, 1, 0, 0],
[1, 1, 1, 0],
[0, 1, 0, 0],
[1, 1, 0, 0]]
expected = 16
self.assertEqual(solution.island_perimeter(data), expected)
print('Success: test_island_perimeter')
def main():
test = TestIslandPerimeter()
test.test_island_perimeter()
if __name__ == '__main__':
main()
| TestIslandPerimeter |
python | getsentry__sentry | src/sentry/analytics/events/relocation_forked.py | {
"start": 74,
"end": 301
} | class ____(analytics.Event):
creator_id: int
owner_id: int
uuid: str
from_org_slug: str
requesting_region_name: str
replying_region_name: str
analytics.register(RelocationForkedEvent)
| RelocationForkedEvent |
python | sqlalchemy__sqlalchemy | test/orm/test_cascade.py | {
"start": 50740,
"end": 53918
} | class ____(_fixtures.FixtureTest):
"""test that backrefs don't force save-update cascades to occur
when the cascade initiated from the forwards side."""
def test_unidirectional_cascade_o2m(self):
User, Order, users, orders = (
self.classes.User,
self.classes.Order,
self.tables.users,
self.tables.orders,
)
self.mapper_registry.map_imperatively(Order, orders)
self.mapper_registry.map_imperatively(
User,
users,
properties=dict(
orders=relationship(
Order, backref=backref("user", cascade=None)
)
),
)
sess = fixture_session()
o1 = Order()
sess.add(o1)
u1 = User(orders=[o1])
assert u1 not in sess
assert o1 in sess
sess.expunge_all()
o1 = Order()
u1 = User(orders=[o1])
sess.add(o1)
assert u1 not in sess
assert o1 in sess
def test_unidirectional_cascade_m2o(self):
User, Order, users, orders = (
self.classes.User,
self.classes.Order,
self.tables.users,
self.tables.orders,
)
self.mapper_registry.map_imperatively(
Order,
orders,
properties={
"user": relationship(
User, backref=backref("orders", cascade=None)
)
},
)
self.mapper_registry.map_imperatively(User, users)
sess = fixture_session()
u1 = User()
sess.add(u1)
o1 = Order()
o1.user = u1
assert o1 not in sess
assert u1 in sess
sess.expunge_all()
u1 = User()
o1 = Order()
o1.user = u1
sess.add(u1)
assert o1 not in sess
assert u1 in sess
def test_unidirectional_cascade_m2m(self):
keywords, items, item_keywords, Keyword, Item = (
self.tables.keywords,
self.tables.items,
self.tables.item_keywords,
self.classes.Keyword,
self.classes.Item,
)
self.mapper_registry.map_imperatively(
Item,
items,
properties={
"keywords": relationship(
Keyword,
secondary=item_keywords,
cascade="none",
backref="items",
)
},
)
self.mapper_registry.map_imperatively(Keyword, keywords)
sess = fixture_session()
i1 = Item()
k1 = Keyword()
sess.add(i1)
i1.keywords.append(k1)
assert i1 in sess
assert k1 not in sess
sess.expunge_all()
i1 = Item()
k1 = Keyword()
sess.add(i1)
k1.items.append(i1)
assert i1 in sess
assert k1 not in sess
@testing.combinations(
(
"legacy_style",
True,
),
(
"new_style",
False,
),
argnames="name, _legacy_inactive_history_style",
id_="sa",
)
| NoSaveCascadeBackrefTest |
python | scipy__scipy | scipy/optimize/tests/test_linprog.py | {
"start": 80793,
"end": 81354
} | class ____(LinprogSimplexTests):
def setup_method(self):
self.options = {'bland': True}
def test_bug_5400(self):
pytest.skip("Simplex fails on this problem.")
def test_bug_8174_low_tol(self):
# Fails if the tolerance is too strict. Here, we test that
# even if the solution is wrong, the appropriate error is raised.
self.options.update({'tol': 1e-12})
with pytest.raises(AssertionError):
with pytest.warns(OptimizeWarning):
super().test_bug_8174()
| TestLinprogSimplexBland |
python | streamlit__streamlit | lib/tests/streamlit/data_test_cases.py | {
"start": 4104,
"end": 33786
} | class ____(enum.Enum):
NUMBER_INPUT = "st.number_input"
TEXT_AREA = "st.text_area"
TEXT_INPUT = "st.text_input"
def data_generator():
yield "st.number_input"
yield "st.text_area"
yield "st.text_input"
SHARED_TEST_CASES: list[tuple[str, Any, CaseMetadata]] = [
###################################
####### Native Python Types #######
###################################
(
"None",
None,
CaseMetadata(0, 0, DataFormat.EMPTY, [], "markdown", False, pd.DataFrame),
),
(
"Empty list",
[],
CaseMetadata(0, 0, DataFormat.LIST_OF_VALUES, [], "json", False),
),
(
"Empty tuple",
(),
CaseMetadata(0, 0, DataFormat.TUPLE_OF_VALUES, [], "markdown", False),
),
(
"Empty dict",
{},
CaseMetadata(0, 0, DataFormat.KEY_VALUE_DICT, [], "json", False),
),
(
"Empty set",
set(),
CaseMetadata(0, 0, DataFormat.SET_OF_VALUES, [], "markdown", False),
),
(
"List[str]",
["st.text_area", "st.number_input", "st.text_input"],
CaseMetadata(
3,
1,
DataFormat.LIST_OF_VALUES,
["st.text_area", "st.number_input", "st.text_input"],
"json",
False,
),
),
(
"List[int]",
[1, 2, 3],
CaseMetadata(3, 1, DataFormat.LIST_OF_VALUES, [1, 2, 3], "json", False),
),
(
"List[float]",
[1.1, 2.2, 3.3],
CaseMetadata(3, 1, DataFormat.LIST_OF_VALUES, [1.1, 2.2, 3.3], "json", False),
),
(
"List[bool]",
[True, False, True],
CaseMetadata(
3, 1, DataFormat.LIST_OF_VALUES, [True, False, True], "json", False
),
),
(
"List[None]",
[None, None, None],
CaseMetadata(
3, 1, DataFormat.LIST_OF_VALUES, [None, None, None], "json", False
),
),
(
"List[date]",
[date(2020, 1, 1), date(2020, 1, 2), date(2020, 1, 3)],
CaseMetadata(
3,
1,
DataFormat.LIST_OF_VALUES,
[date(2020, 1, 1), date(2020, 1, 2), date(2020, 1, 3)],
"json",
False,
),
),
(
"Set[str]",
# Set does not have a stable order across different Python version.
# Therefore, we are only testing this with one item.
{"st.number_input", "st.number_input"}, # noqa: B033
CaseMetadata(
1, 1, DataFormat.SET_OF_VALUES, ["st.number_input"], "markdown", False
),
),
(
"Tuple[str]",
("st.text_area", "st.number_input", "st.text_input"),
CaseMetadata(
3,
1,
DataFormat.TUPLE_OF_VALUES,
["st.text_area", "st.number_input", "st.text_input"],
"markdown",
False,
),
),
(
"Frozenset[str]",
# Set does not have a stable order across different Python version.
# Therefore, we are only testing this with one item.
frozenset({"st.number_input", "st.number_input"}), # noqa: B033
CaseMetadata(
1,
1,
DataFormat.SET_OF_VALUES,
["st.number_input"],
"markdown",
False,
set,
),
),
(
"Empty frozenset",
frozenset(),
CaseMetadata(0, 0, DataFormat.SET_OF_VALUES, [], "markdown", False, set),
),
(
"Range",
range(3),
CaseMetadata(
3, 1, DataFormat.LIST_OF_VALUES, [0, 1, 2], "markdown", False, list
),
),
(
"Dict Keys",
{
"st.number_input": "number",
"st.text_area": "text",
"st.text_input": "text",
}.keys(),
CaseMetadata(
3,
1,
DataFormat.LIST_OF_VALUES,
["st.number_input", "st.text_area", "st.text_input"],
"json",
False,
list,
),
),
(
"Dict Values",
{
"st.number_input": "number",
"st.text_area": "text",
"st.text_input": "text",
}.values(),
CaseMetadata(
3,
1,
DataFormat.LIST_OF_VALUES,
["number", "text", "text"],
"json",
False,
list,
),
),
(
"Dict Items",
{
"st.number_input": "number",
"st.text_area": "text",
"st.text_input": "text",
}.items(),
CaseMetadata(
3,
2,
DataFormat.LIST_OF_ROWS,
[
("st.number_input", "number"),
("st.text_area", "text"),
("st.text_input", "text"),
],
"json",
False,
list,
),
),
(
"collections.OrderedDict",
OrderedDict(
[
("st.number_input", "number"),
("st.text_area", "text"),
]
),
CaseMetadata(
2,
1,
DataFormat.KEY_VALUE_DICT,
["st.number_input", "st.text_area"],
"json",
False,
dict,
),
),
(
"collections.defaultdict",
defaultdict(
lambda: "Not Present",
{"st.text_area": "widget", "st.markdown": "element"},
),
CaseMetadata(
2,
1,
DataFormat.KEY_VALUE_DICT,
["st.text_area", "st.markdown"],
"json",
False,
dict,
),
),
(
"collections.Counter",
Counter({"st.number_input": 4, "st.text_area": 2}),
CaseMetadata(
2,
1,
DataFormat.KEY_VALUE_DICT,
["st.number_input", "st.text_area"],
"json",
False,
dict,
),
),
(
"collections.deque",
deque(["st.number_input", "st.text_area", "st.text_input"]),
CaseMetadata(
3,
1,
DataFormat.LIST_OF_VALUES,
["st.number_input", "st.text_area", "st.text_input"],
"markdown",
False,
list,
),
),
(
"collections.ChainMap",
ChainMap(
{"st.number_input": "number", "st.text_area": "text"},
{"st.text_input": "text"},
),
CaseMetadata(
3,
1,
DataFormat.KEY_VALUE_DICT,
["st.number_input", "st.text_area", "st.text_input"],
"json",
False,
dict,
),
),
(
"collections.UserList",
UserList(["st.number_input", "st.text_area", "st.text_input"]),
CaseMetadata(
3,
1,
DataFormat.LIST_OF_VALUES,
["st.number_input", "st.text_area", "st.text_input"],
"json",
False,
list,
),
),
(
"Dataclass",
ElementDataClass("st.number_input", is_widget=True, usage=0.32),
CaseMetadata(
3,
1,
DataFormat.KEY_VALUE_DICT,
["st.number_input", True, 0.32],
"help",
False,
dict,
),
),
(
"TypedDict",
ElementTypedDict(name="st.number_input", is_widget=True, usage=0.32),
CaseMetadata(
3,
1,
DataFormat.KEY_VALUE_DICT,
["name", "is_widget", "usage"],
"json",
False,
dict,
),
),
(
"NamedTuple",
ElementNamedTuple("st.number_input", is_widget=True, usage=0.32),
CaseMetadata(
3,
1,
DataFormat.KEY_VALUE_DICT,
["st.number_input", True, 0.32],
"json",
False,
dict,
),
),
(
"String Enum",
StrTestEnum,
CaseMetadata(
3,
1,
DataFormat.LIST_OF_VALUES,
["st.number_input", "st.text_area", "st.text_input"],
"help",
False,
list,
),
),
(
"Enum",
TestEnum,
CaseMetadata(
3,
1,
DataFormat.LIST_OF_VALUES,
[TestEnum.NUMBER_INPUT, TestEnum.TEXT_AREA, TestEnum.TEXT_INPUT],
"help",
False,
list,
),
),
(
"Generator Function",
data_generator,
CaseMetadata(
3,
1,
DataFormat.UNKNOWN,
["st.number_input", "st.text_area", "st.text_input"],
"write_stream",
True,
),
),
(
"Empty column value mapping",
{"name": [], "type": []},
CaseMetadata(
0, 2, DataFormat.COLUMN_VALUE_MAPPING, ["name", "type"], "json", False
),
),
(
"array.array",
array.array("i", [1, 2, 3]),
CaseMetadata(
3, 1, DataFormat.LIST_OF_VALUES, [1, 2, 3], "markdown", False, list
),
),
(
"MappingProxyType",
MappingProxyType({"st.text_area": "widget", "st.markdown": "element"}),
CaseMetadata(
2,
1,
DataFormat.KEY_VALUE_DICT,
["st.text_area", "st.markdown"],
"json",
False,
dict,
),
),
(
"UserDict",
UserDictExample({"st.text_area": "widget", "st.markdown": "element"}),
CaseMetadata(
2,
1,
DataFormat.KEY_VALUE_DICT,
["st.text_area", "st.markdown"],
"json",
False,
dict,
),
),
(
"List of rows", # List[list[scalar]]
[["st.text_area", "widget"], ["st.markdown", "element"]],
CaseMetadata(
2,
2,
DataFormat.LIST_OF_ROWS,
[["st.text_area", "widget"], ["st.markdown", "element"]],
"json",
False,
),
),
(
"List of records", # List[Dict[str, Scalar]]
[
{"name": "st.text_area", "type": "widget"},
{"name": "st.markdown", "type": "element"},
],
CaseMetadata(
2,
2,
DataFormat.LIST_OF_RECORDS,
[
{"name": "st.text_area", "type": "widget"},
{"name": "st.markdown", "type": "element"},
],
"json",
False,
),
),
(
"Column-index mapping", # ({column: {index: value}})
{
"type": {"st.text_area": "widget", "st.markdown": "element"},
"usage": {"st.text_area": 4.92, "st.markdown": 47.22},
},
CaseMetadata(
2,
2,
DataFormat.COLUMN_INDEX_MAPPING,
["type", "usage"],
"json",
False,
),
),
(
"Column-value mapping", # ({column: List[values]}})
{
"name": ["st.text_area", "st.markdown"],
"type": ["widget", "element"],
},
CaseMetadata(
2,
2,
DataFormat.COLUMN_VALUE_MAPPING,
["name", "type"],
"json",
False,
),
),
(
"Column-series mapping", # ({column: Series(values)})
{
"name": pd.Series(["st.text_area", "st.markdown"], name="name"),
"type": pd.Series(["widget", "element"], name="type"),
},
CaseMetadata(
2,
2,
DataFormat.COLUMN_SERIES_MAPPING,
["name", "type"],
"dataframe",
False,
),
),
(
"Key-value dict", # ({index: value})
{"st.text_area": "widget", "st.markdown": "element"},
CaseMetadata(
2,
1,
DataFormat.KEY_VALUE_DICT,
["st.text_area", "st.markdown"],
"json",
False,
),
),
###################################
########## Pandas Types ###########
###################################
(
"Empty pd.Dataframe",
pd.DataFrame(),
CaseMetadata(0, 0, DataFormat.PANDAS_DATAFRAME, [], "dataframe", False),
),
(
"Empty pd.Dataframe with columns",
pd.DataFrame(
columns=["name", "type"], index=pd.RangeIndex(start=0, step=1)
), # Explicitly set the range index to have the same behavior across versions
CaseMetadata(0, 2, DataFormat.PANDAS_DATAFRAME, [], "dataframe", False),
),
(
"pd.Dataframe",
pd.DataFrame(["st.text_area", "st.markdown"]),
CaseMetadata(
2,
1,
DataFormat.PANDAS_DATAFRAME,
["st.text_area", "st.markdown"],
"dataframe",
False,
),
),
(
"pd.Series[str]",
pd.Series(
["st.text_area", "st.number_input", "st.text_input"],
name="widgets",
),
CaseMetadata(
3,
1,
DataFormat.PANDAS_SERIES,
["st.text_area", "st.number_input", "st.text_input"],
"dataframe",
False,
),
),
(
"pd.Index",
pd.Index(["st.text_area", "st.markdown"]),
CaseMetadata(
2,
1,
DataFormat.PANDAS_INDEX,
["st.text_area", "st.markdown"],
"dataframe",
False,
pd.DataFrame,
),
),
(
"Pandas Styler",
pd.DataFrame(["st.text_area", "st.markdown"]).style,
CaseMetadata(
2,
1,
DataFormat.PANDAS_STYLER,
["st.text_area", "st.markdown"],
"dataframe",
False,
pd.DataFrame,
),
),
(
"pd.array",
pd.array(["st.number_input", "st.text_area", "st.text_input"]),
CaseMetadata(
3,
1,
DataFormat.PANDAS_ARRAY,
["st.number_input", "st.text_area", "st.text_input"],
"dataframe",
False,
pd.DataFrame,
),
),
(
"pd.DatetimeIndex",
pd.DatetimeIndex(["1/1/2020 10:00:00+00:00", "2/1/2020 11:00:00+00:00"]),
CaseMetadata(
2,
1,
DataFormat.PANDAS_INDEX,
[
pd.Timestamp("2020-01-01 10:00:00+0000", tz="UTC"),
pd.Timestamp("2020-02-01 11:00:00+0000", tz="UTC"),
],
"dataframe",
False,
pd.DataFrame,
),
),
(
"pd.RangeIndex",
pd.RangeIndex(start=0, stop=3, step=1),
CaseMetadata(
3, 1, DataFormat.PANDAS_INDEX, [0, 1, 2], "dataframe", False, pd.DataFrame
),
),
###################################
########### Numpy Types ###########
###################################
(
"Empty np.array",
# For unknown reasons, pd.DataFrame initializes empty numpy arrays with a single column
np.ndarray(0),
CaseMetadata(0, 1, DataFormat.NUMPY_LIST, [], "dataframe", False),
),
(
"np.array[str]",
np.array(["st.text_area", "st.number_input", "st.text_input"]),
CaseMetadata(
3,
1,
DataFormat.NUMPY_LIST,
["st.text_area", "st.number_input", "st.text_input"],
"dataframe",
False,
),
),
(
"np.array[int]",
np.array([1, 2, 3]),
CaseMetadata(3, 1, DataFormat.NUMPY_LIST, [1, 2, 3], "dataframe", False),
),
(
"np.array[list[scalar]]",
np.array(
[
["st.text_area", "widget"],
["st.markdown", "element"],
]
),
CaseMetadata(
2,
2,
DataFormat.NUMPY_MATRIX,
["st.text_area", "st.markdown"],
"dataframe",
False,
),
),
(
"np.array[list[str]]", # numpy matrix
np.array(
[
["st.text_area", "widget"],
["st.markdown", "element"],
]
),
CaseMetadata(
2,
2,
DataFormat.NUMPY_MATRIX,
["st.text_area", "st.markdown"],
"dataframe",
False,
),
),
###################################
########## Pyarrow Types ##########
###################################
(
"Pyarrow Table",
pa.Table.from_pandas(pd.DataFrame(["st.text_area", "st.markdown"])),
CaseMetadata(
2,
1,
DataFormat.PYARROW_TABLE,
["st.text_area", "st.markdown"],
"dataframe",
False,
),
),
(
"Pyarrow Array",
pa.array(["st.number_input", "st.text_area", "st.text_input"]),
CaseMetadata(
3,
1,
DataFormat.PYARROW_ARRAY,
["st.number_input", "st.text_area", "st.text_input"],
"dataframe",
False,
),
),
###################################
##### Snowflake Types (Mocks) #####
###################################
(
"Snowpark DataFrame",
SnowparkDataFrame(
pd.DataFrame(
[
{"name": "st.text_area", "type": "widget"},
{"name": "st.markdown", "type": "element"},
]
)
),
CaseMetadata(
2,
2,
DataFormat.SNOWPARK_OBJECT,
["st.text_area", "st.markdown"],
"dataframe",
True,
pd.DataFrame,
),
),
(
"Snowpark Table",
SnowparkTable(
pd.DataFrame(
[
{"name": "st.text_area", "type": "widget"},
{"name": "st.markdown", "type": "element"},
]
)
),
CaseMetadata(
2,
2,
DataFormat.SNOWPARK_OBJECT,
["st.text_area", "st.markdown"],
"dataframe",
True,
pd.DataFrame,
),
),
(
"Snowpark Row List",
[
SnowparkRow({"name": "st.text_area", "type": "widget"}),
SnowparkRow({"name": "st.markdown", "type": "element"}),
SnowparkRow({"name": "st.text_input", "type": "text"}),
],
CaseMetadata(
3,
2,
DataFormat.SNOWPARK_OBJECT,
["st.text_area", "st.markdown", "st.text_input"],
"dataframe",
False,
pd.DataFrame,
),
),
(
"Snowpandas DataFrame",
SnowpandasDataFrame(
pd.DataFrame(
[
{"name": "st.text_area", "type": "widget"},
{"name": "st.markdown", "type": "element"},
]
)
),
CaseMetadata(
2,
2,
DataFormat.SNOWPANDAS_OBJECT,
["st.text_area", "st.markdown"],
"dataframe",
True,
pd.DataFrame,
),
),
(
"Snowpandas Series",
SnowpandasSeries(pd.Series(["st.text_area", "st.markdown"])),
CaseMetadata(
2,
1,
DataFormat.SNOWPANDAS_OBJECT,
["st.text_area", "st.markdown"],
"dataframe",
True,
pd.DataFrame,
),
),
(
"Snowpandas Index",
SnowpandasIndex(
pd.Index(["st.text_area", "st.markdown"]),
),
CaseMetadata(
2,
1,
DataFormat.SNOWPANDAS_OBJECT,
["st.text_area", "st.markdown"],
"dataframe",
True,
pd.DataFrame,
),
),
(
"Modin DataFrame",
ModinDataFrame(
pd.DataFrame(
[
{"name": "st.text_area", "type": "widget"},
{"name": "st.markdown", "type": "element"},
]
)
),
CaseMetadata(
2,
2,
DataFormat.MODIN_OBJECT,
["st.text_area", "st.markdown"],
"dataframe",
True,
pd.DataFrame,
),
),
(
"Modin Series",
ModinSeries(pd.Series(["st.text_area", "st.markdown"])),
CaseMetadata(
2,
1,
DataFormat.MODIN_OBJECT,
["st.text_area", "st.markdown"],
"dataframe",
True,
pd.DataFrame,
),
),
###################################
##### External Types (Mocks) ######
###################################
(
"Pyspark DataFrame",
PySparkDataFrame(
pd.DataFrame(
[
{"name": "st.text_area", "type": "widget"},
{"name": "st.markdown", "type": "element"},
]
)
),
CaseMetadata(
2,
2,
DataFormat.PYSPARK_OBJECT,
["st.text_area", "st.markdown"],
"dataframe",
True,
pd.DataFrame,
),
),
(
"Pyspark Connect DataFrame",
PySparkConnectDataFrame(
pd.DataFrame(
[
{"name": "st.text_area", "type": "widget"},
{"name": "st.markdown", "type": "element"},
]
)
),
CaseMetadata(
2,
2,
DataFormat.PYSPARK_OBJECT,
["st.text_area", "st.markdown"],
"dataframe",
True,
pd.DataFrame,
),
),
(
"Dask DataFrame",
DaskDataFrame(
pd.DataFrame(
[
{"name": "st.text_area", "type": "widget"},
{"name": "st.markdown", "type": "element"},
]
)
),
CaseMetadata(
2,
2,
DataFormat.DASK_OBJECT,
["st.text_area", "st.markdown"],
"dataframe",
True,
pd.DataFrame,
),
),
(
"Dask Series",
DaskSeries(pd.Series(["st.text_area", "st.markdown"])),
CaseMetadata(
2,
1,
DataFormat.DASK_OBJECT,
["st.text_area", "st.markdown"],
"dataframe",
True,
pd.DataFrame,
),
),
(
"Dask Index",
DaskIndex(
pd.Index(["st.text_area", "st.markdown"]),
),
CaseMetadata(
2,
1,
DataFormat.DASK_OBJECT,
["st.text_area", "st.markdown"],
"dataframe",
True,
pd.DataFrame,
),
),
(
"Ray Dataset",
RayDataset(
pd.DataFrame(
[
{"name": "st.text_area", "type": "widget"},
{"name": "st.markdown", "type": "element"},
]
)
),
CaseMetadata(
2,
2,
DataFormat.RAY_DATASET,
["st.text_area", "st.markdown"],
"dataframe",
True,
pd.DataFrame,
),
),
(
"Ray Materialized Dataset",
RayMaterializedDataset(
pd.DataFrame(
[
{"name": "st.text_area", "type": "widget"},
{"name": "st.markdown", "type": "element"},
]
)
),
CaseMetadata(
2,
2,
DataFormat.RAY_DATASET,
["st.text_area", "st.markdown"],
"dataframe",
True,
pd.DataFrame,
),
),
]
###################################
###### Dataframe Interchange ######
###################################
if is_pandas_version_less_than("1.5.0") is False and pa.__version__ != "22.0.0":
# Ignoring pyarrow v22.0.0 since it has issues with the interchange protocol.
# This was fixed in: https://github.com/apache/arrow/pull/47977
SHARED_TEST_CASES.extend(
[
(
"Dataframe-interchange compatible",
CustomDataframe(
pd.DataFrame(
[
{"name": "st.text_area", "type": "widget"},
{"name": "st.markdown", "type": "element"},
]
)
),
CaseMetadata(
2,
2,
DataFormat.UNKNOWN,
["st.text_area", "st.markdown"],
"dataframe",
False,
None,
),
),
]
)
###################################
########### Polars Types ##########
###################################
try:
import polars as pl
SHARED_TEST_CASES.extend(
[
(
"Polars DataFrame",
pl.DataFrame(
[
{"name": "st.text_area", "type": "widget"},
{"name": "st.markdown", "type": "element"},
]
),
CaseMetadata(
2,
2,
DataFormat.POLARS_DATAFRAME,
["st.text_area", "st.markdown"],
"dataframe",
False,
),
),
(
"Polars Series",
pl.Series(["st.number_input", "st.text_area", "st.text_input"]),
CaseMetadata(
3,
1,
DataFormat.POLARS_SERIES,
["st.number_input", "st.text_area", "st.text_input"],
"dataframe",
False,
),
),
(
"Polars LazyFrame",
pl.LazyFrame(
{
"name": ["st.text_area", "st.markdown"],
"type": ["widget", "element"],
}
),
CaseMetadata(
2,
2,
DataFormat.POLARS_LAZYFRAME,
["st.text_area", "st.markdown"],
"dataframe",
True,
pl.DataFrame,
),
),
]
)
except ModuleNotFoundError:
print("Polars not installed. Skipping Polars dataframe integration tests.") # noqa: T201
###################################
########### Xarray Types ##########
###################################
try:
import xarray as xr
SHARED_TEST_CASES.extend(
[
(
"Xarray Dataset",
xr.Dataset.from_dataframe(
pd.DataFrame(
{
"name": ["st.text_area", "st.markdown"],
"type": ["widget", "element"],
}
)
),
CaseMetadata(
2,
2,
DataFormat.XARRAY_DATASET,
["name", "type"],
"dataframe",
False,
),
),
(
"Xarray DataArray",
xr.DataArray.from_series(
pd.Series(
["st.number_input", "st.text_area", "st.text_input"],
name="widgets",
)
),
CaseMetadata(
3,
1,
DataFormat.XARRAY_DATA_ARRAY,
["st.number_input", "st.text_area", "st.text_input"],
"dataframe",
False,
),
),
]
)
except ModuleNotFoundError:
print("Xarray not installed. Skipping Xarray dataframe integration tests.") # noqa: T201
###################################
########## Pydantic Types #########
###################################
try:
from pydantic import BaseModel
class ElementPydanticModel(BaseModel):
name: str
is_widget: bool
usage: float
SHARED_TEST_CASES.extend(
[
(
"Pydantic Model",
ElementPydanticModel(
name="st.number_input", is_widget=True, usage=0.32
),
CaseMetadata(
3,
1,
DataFormat.KEY_VALUE_DICT,
["st.number_input", True, 0.32],
"json",
False,
dict,
),
),
]
)
except ModuleNotFoundError:
print("Pydantic not installed. Skipping Pydantic dataframe tests.") # noqa: T201
| TestEnum |
python | great-expectations__great_expectations | great_expectations/data_context/types/base.py | {
"start": 11991,
"end": 15938
} | class ____(Schema):
class Meta:
unknown = INCLUDE
name = fields.String(required=False, allow_none=True)
class_name = fields.String(
required=False,
allow_none=True,
missing="Asset",
)
module_name = fields.String(
required=False,
all_none=True,
missing="great_expectations.datasource.data_connector.asset",
)
base_directory = fields.String(required=False, allow_none=True)
glob_directive = fields.String(required=False, allow_none=True)
pattern = fields.String(required=False, allow_none=True)
group_names = fields.List(cls_or_instance=fields.Str(), required=False, allow_none=True)
bucket = fields.String(required=False, allow_none=True)
prefix = fields.String(required=False, allow_none=True)
delimiter = fields.String(required=False, allow_none=True)
max_keys = fields.Integer(required=False, allow_none=True)
schema_name = fields.String(required=False, allow_none=True)
batch_spec_passthrough = fields.Dict(required=False, allow_none=True)
"""
Necessary addition for AWS Glue Data Catalog assets.
By using AWS Glue Data Catalog, we need to have both database and table names.
The partitions are optional, it must match the partitions defined in the table
and it is used to create batch identifiers that allows the validation of a single
partition. Example: if we have two partitions (year, month), specifying these would
create one batch id per combination of year and month. The connector gets the partition
values from the AWS Glue Data Catalog.
"""
database_name = fields.String(required=False, allow_none=True)
partitions = fields.List(cls_or_instance=fields.Str(), required=False, allow_none=True)
# Necessary addition for Cloud assets
table_name = fields.String(required=False, allow_none=True)
type = fields.String(required=False, allow_none=True)
batch_identifiers = fields.List(cls_or_instance=fields.Str(), required=False, allow_none=True)
data_asset_name_prefix = fields.String(required=False, allow_none=True)
data_asset_name_suffix = fields.String(required=False, allow_none=True)
include_schema_name = fields.Boolean(required=False, allow_none=True)
partitioner_method = fields.String(required=False, allow_none=True)
partitioner_kwargs = fields.Dict(required=False, allow_none=True)
sorters = fields.List(
cls_or_instance=fields.Nested(SorterConfigSchema, required=False, allow_none=True),
required=False,
allow_none=True,
)
sampling_method = fields.String(required=False, allow_none=True)
sampling_kwargs = fields.Dict(required=False, allow_none=True)
reader_options = fields.Dict(keys=fields.Str(), required=False, allow_none=True)
@validates_schema
def validate_schema(self, data, **kwargs) -> None:
pass
@pre_dump
def prepare_dump(self, data, **kwargs):
"""
Schemas in Spark Dataframes are defined as StructType, which is not serializable
This method calls the schema's jsonValue() method, which translates the object into a json
"""
# check whether spark exists
if (not pyspark.types) or (pyspark.types.StructType is None):
return data
batch_spec_passthrough_config = data.get("batch_spec_passthrough")
if batch_spec_passthrough_config:
reader_options: dict = batch_spec_passthrough_config.get("reader_options")
if reader_options:
schema = reader_options.get("schema")
if schema and pyspark.types and isinstance(schema, pyspark.types.StructType):
data["batch_spec_passthrough"]["reader_options"]["schema"] = schema.jsonValue()
return data
# noinspection PyUnusedLocal
@post_load
def make_asset_config(self, data, **kwargs):
return AssetConfig(**data)
| AssetConfigSchema |
python | charliermarsh__ruff | crates/ruff_benchmark/resources/pydantic/types.py | {
"start": 15571,
"end": 17092
} | class ____(_fields.CustomValidator, Generic[SecretType]):
__slots__ = 'field_type', 'min_length', 'max_length', 'error_prefix'
def __init__(
self, field_type: Type[SecretField[SecretType]], min_length: int | None = None, max_length: int | None = None
) -> None:
self.field_type: Type[SecretField[SecretType]] = field_type
self.min_length = min_length
self.max_length = max_length
self.error_prefix: Literal['string', 'bytes'] = 'string' if field_type is SecretStr else 'bytes'
def __call__(self, __value: SecretField[SecretType] | SecretType, _: core_schema.ValidationInfo) -> Any:
if self.min_length is not None and len(__value) < self.min_length:
short_kind: core_schema.ErrorType = f'{self.error_prefix}_too_short' # type: ignore[assignment]
raise PydanticKnownError(short_kind, {'min_length': self.min_length})
if self.max_length is not None and len(__value) > self.max_length:
long_kind: core_schema.ErrorType = f'{self.error_prefix}_too_long' # type: ignore[assignment]
raise PydanticKnownError(long_kind, {'max_length': self.max_length})
if isinstance(__value, self.field_type):
return __value
else:
return self.field_type(__value) # type: ignore[arg-type]
def __pydantic_update_schema__(self, schema: core_schema.CoreSchema, **constraints: Any) -> None:
self._update_attrs(constraints, {'min_length', 'max_length'})
| SecretFieldValidator |
python | plotly__plotly.py | plotly/graph_objs/_deprecations.py | {
"start": 2076,
"end": 2786
} | class ____(list):
"""
plotly.graph_objs.Frames is deprecated.
Please replace it with a list or tuple of instances of the following types
- plotly.graph_objs.Frame
"""
def __init__(self, *args, **kwargs):
"""
plotly.graph_objs.Frames is deprecated.
Please replace it with a list or tuple of instances of the following types
- plotly.graph_objs.Frame
"""
warnings.warn(
"""plotly.graph_objs.Frames is deprecated.
Please replace it with a list or tuple of instances of the following types
- plotly.graph_objs.Frame
""",
DeprecationWarning,
)
super().__init__(*args, **kwargs)
| Frames |
python | pypa__hatch | src/hatch/config/model.py | {
"start": 7765,
"end": 9848
} | class ____(LazilyParsedConfig):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._field_name = FIELD_TO_PARSE
self._field_path = FIELD_TO_PARSE
self._field_args = FIELD_TO_PARSE
@property
def name(self):
if self._field_name is FIELD_TO_PARSE:
if "name" in self.raw_data:
name = self.raw_data["name"]
if not isinstance(name, str):
self.raise_error("must be a string")
self._field_name = name
else:
self.raise_error("required field")
return self._field_name
@name.setter
def name(self, value):
self.raw_data["name"] = value
self._field_name = FIELD_TO_PARSE
@property
def path(self):
if self._field_path is FIELD_TO_PARSE:
if "path" in self.raw_data:
path = self.raw_data["path"]
if not isinstance(path, str):
self.raise_error("must be a string")
self._field_path = path
else:
self._field_path = self.raw_data["path"] = self.name
return self._field_path
@path.setter
def path(self, value):
self.raw_data["path"] = value
self._field_path = FIELD_TO_PARSE
@property
def args(self):
if self._field_args is FIELD_TO_PARSE:
if "args" in self.raw_data:
args = self.raw_data["args"]
if not isinstance(args, list):
self.raise_error("must be an array")
for i, entry in enumerate(args, 1):
if not isinstance(entry, str):
self.raise_error("must be a string", extra_steps=(str(i),))
self._field_args = args
else:
self._field_args = self.raw_data["args"] = []
return self._field_args
@args.setter
def args(self, value):
self.raw_data["args"] = value
self._field_args = FIELD_TO_PARSE
| ShellConfig |
python | django__django | tests/custom_pk/fields.py | {
"start": 60,
"end": 491
} | class ____:
def __init__(self, value):
self.value = value
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.value)
def __str__(self):
return self.value
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.value == other.value
return self.value == other
def __hash__(self):
return hash(self.value)
| MyWrapper |
python | huggingface__transformers | src/transformers/models/dab_detr/modeling_dab_detr.py | {
"start": 7356,
"end": 9855
} | class ____(nn.Module):
"""
BatchNorm2d where the batch statistics and the affine parameters are fixed.
Copy-paste from torchvision.misc.ops with added eps before rqsrt, without which any other models than
torchvision.models.resnet[18,34,50,101] produce nans.
"""
def __init__(self, n):
super().__init__()
self.register_buffer("weight", torch.ones(n))
self.register_buffer("bias", torch.zeros(n))
self.register_buffer("running_mean", torch.zeros(n))
self.register_buffer("running_var", torch.ones(n))
def _load_from_state_dict(
self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
):
num_batches_tracked_key = prefix + "num_batches_tracked"
if num_batches_tracked_key in state_dict:
del state_dict[num_batches_tracked_key]
super()._load_from_state_dict(
state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
)
def forward(self, x):
# move reshapes to the beginning
# to make it user-friendly
weight = self.weight.reshape(1, -1, 1, 1)
bias = self.bias.reshape(1, -1, 1, 1)
running_var = self.running_var.reshape(1, -1, 1, 1)
running_mean = self.running_mean.reshape(1, -1, 1, 1)
epsilon = 1e-5
scale = weight * (running_var + epsilon).rsqrt()
bias = bias - running_mean * scale
return x * scale + bias
# Copied from transformers.models.detr.modeling_detr.replace_batch_norm with Detr->DabDetr
def replace_batch_norm(model):
r"""
Recursively replace all `torch.nn.BatchNorm2d` with `DabDetrFrozenBatchNorm2d`.
Args:
model (torch.nn.Module):
input model
"""
for name, module in model.named_children():
if isinstance(module, nn.BatchNorm2d):
new_module = DabDetrFrozenBatchNorm2d(module.num_features)
if module.weight.device != torch.device("meta"):
new_module.weight.copy_(module.weight)
new_module.bias.copy_(module.bias)
new_module.running_mean.copy_(module.running_mean)
new_module.running_var.copy_(module.running_var)
model._modules[name] = new_module
if len(list(module.children())) > 0:
replace_batch_norm(module)
# Modified from transformers.models.detr.modeling_detr.DetrConvEncoder with Detr->DabDetr
| DabDetrFrozenBatchNorm2d |
python | openai__openai-python | src/openai/types/responses/web_search_preview_tool_param.py | {
"start": 940,
"end": 1496
} | class ____(TypedDict, total=False):
type: Required[Literal["web_search_preview", "web_search_preview_2025_03_11"]]
"""The type of the web search tool.
One of `web_search_preview` or `web_search_preview_2025_03_11`.
"""
search_context_size: Literal["low", "medium", "high"]
"""High level guidance for the amount of context window space to use for the
search.
One of `low`, `medium`, or `high`. `medium` is the default.
"""
user_location: Optional[UserLocation]
"""The user's location."""
| WebSearchPreviewToolParam |
python | plotly__plotly.py | plotly/graph_objs/histogram2dcontour/_legendgrouptitle.py | {
"start": 233,
"end": 3017
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "histogram2dcontour"
_path_str = "histogram2dcontour.legendgrouptitle"
_valid_props = {"font", "text"}
@property
def font(self):
"""
Sets this legend group's title font.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.histogram2dcontour.legendgrouptitle.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Returns
-------
plotly.graph_objs.histogram2dcontour.legendgrouptitle.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
@property
def text(self):
"""
Sets the title of the legend group.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
@property
def _prop_descriptions(self):
return """\
font
Sets this legend group's title font.
text
Sets the title of the legend group.
"""
def __init__(self, arg=None, font=None, text=None, **kwargs):
"""
Construct a new Legendgrouptitle object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.histogram2dcon
tour.Legendgrouptitle`
font
Sets this legend group's title font.
text
Sets the title of the legend group.
Returns
-------
Legendgrouptitle
"""
super().__init__("legendgrouptitle")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.histogram2dcontour.Legendgrouptitle
constructor must be a dict or
an instance of :class:`plotly.graph_objs.histogram2dcontour.Legendgrouptitle`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("font", arg, font)
self._set_property("text", arg, text)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Legendgrouptitle |
python | anthropics__anthropic-sdk-python | src/anthropic/types/beta/beta_web_fetch_block_param.py | {
"start": 328,
"end": 631
} | class ____(TypedDict, total=False):
content: Required[BetaRequestDocumentBlockParam]
type: Required[Literal["web_fetch_result"]]
url: Required[str]
"""Fetched content URL"""
retrieved_at: Optional[str]
"""ISO 8601 timestamp when the content was retrieved"""
| BetaWebFetchBlockParam |
python | tensorflow__tensorflow | tensorflow/python/saved_model/registration/registration_saving_test.py | {
"start": 7720,
"end": 14788
} | class ____(test.TestCase):
@test_util.deprecated_graph_mode_only
def test_registered_saver_fails_in_saved_model_graph_mode(self):
with context.eager_mode():
p1 = Part([1, 4])
p2 = Part([2, 5])
p3 = Part([3, 6])
s = Stack([p1, p2, p3])
save_dir = os.path.join(self.get_temp_dir(), "save_dir")
save.save(s, save_dir)
with self.assertRaisesRegex(
NotImplementedError,
"registered checkpoint saver is not supported in graph mode"):
load.load(save_dir)
def test_registered_saver_checkpoint(self):
p1 = Part([1, 4])
p2 = Part([2, 5])
p3 = Part([3, 6])
s = Stack([p1, p2, p3])
s2 = Stack([p3, p1, p2])
expected_value_s = s.value()
expected_value_s2 = s2.value()
ckpt_path = os.path.join(self.get_temp_dir(), "ckpt")
util.Checkpoint(s=s, s2=s2).write(ckpt_path)
del s, s2, p1, p2, p3
restore_s = Stack([Part([0, 0]) for _ in range(3)])
util.Checkpoint(s=restore_s).read(ckpt_path).expect_partial()
self.assertAllEqual(expected_value_s, restore_s.value())
util.Checkpoint(s2=restore_s).read(ckpt_path).expect_partial()
self.assertAllEqual(expected_value_s2, restore_s.value())
def test_compatible_with_v1_savedmodel(self):
p1 = Part([1, 4])
p2 = Part([2, 5])
p3 = Part([3, 6])
s = Stack([p1, p2, p3])
save_path = os.path.join(self.get_temp_dir(), "savedmodel")
@def_function.function(input_signature=[])
def serve():
return {"value": s.value()}
exported_value = serve()["value"]
save.save(s, save_path, signatures=serve)
with ops.Graph().as_default(), session.Session() as sess:
metagraph = loader.load(sess, ["serve"], save_path)
value_output = metagraph.signature_def["serving_default"].outputs["value"]
self.assertAllEqual(exported_value, sess.run(value_output.name))
def test_non_strict_predicate(self):
class NonStrictPredicateClass(autotrackable.AutoTrackable):
pass
registration.register_checkpoint_saver(
name="NonStrictPredicate",
predicate=lambda x: isinstance(x, NonStrictPredicateClass),
save_fn=lambda **kwargs: [],
restore_fn=lambda **kwargs: None,
strict_predicate_restore=False)
root = NonStrictPredicateClass()
ckpt_path = os.path.join(self.get_temp_dir(), "ckpt")
util.Checkpoint(root).write(ckpt_path)
root2 = autotrackable.AutoTrackable()
# This should run without throwing an error.
util.Checkpoint(root2).read(ckpt_path)
def test_strict_predicate(self):
class StrictPredicateClass(autotrackable.AutoTrackable):
pass
registration.register_checkpoint_saver(
name="StrictPredicate",
predicate=lambda x: isinstance(x, StrictPredicateClass),
save_fn=lambda **kwargs: [],
restore_fn=lambda **kwargs: None,
strict_predicate_restore=True)
root = StrictPredicateClass()
ckpt_path = os.path.join(self.get_temp_dir(), "ckpt")
util.Checkpoint(root).write(ckpt_path)
root2 = autotrackable.AutoTrackable()
with self.assertRaisesRegex(ValueError, "saver cannot be used"):
util.Checkpoint(root2).read(ckpt_path)
def test_registered_saver_is_called_before_save_after_load(self):
if not context.executing_eagerly():
self.skipTest("This test must run under eager mode.")
class RestoreClass(autotrackable.AutoTrackable):
pass
def save_fn(trackables, file_prefix):
del trackables # Unused.
# Check that directory is empty
files = gfile.ListDirectory(os.path.dirname(file_prefix.numpy()))
self.assertEmpty(files)
def restore_fn(trackables, merged_prefix):
del merged_prefix # Unused.
root = next(trackables.values())
self.assertEqual(root.v.numpy(), 123)
registration.register_checkpoint_saver(
name="OptionalRestore",
predicate=lambda x: isinstance(x, RestoreClass),
save_fn=save_fn,
restore_fn=restore_fn)
root = RestoreClass()
root.v = variables.Variable(123.0)
ckpt_path = os.path.join(self.get_temp_dir(), "ckpt")
util.Checkpoint(root).write(ckpt_path)
def test_migration_backwards_compatibility(self):
# Tests that objects migrated to using the advanced saver registration can
# use pre-migration checkpoints.
class NoRegisteredSaver(autotrackable.AutoTrackable):
def __init__(self, name):
self.name = name
def _serialize_to_tensors(self):
return {"name": constant_op.constant(self.name)}
class RegisteredSaver(autotrackable.AutoTrackable):
def __init__(self, name):
self.name = name
def _get_tensors(trackables, append_name=True):
tensor_names = []
shapes_and_slices = []
tensors = []
restored_trackables = []
for obj_prefix, obj in trackables.items():
tensor_names.append(obj_prefix + "name" if append_name else obj_prefix)
shapes_and_slices.append("")
tensors.append(constant_op.constant(obj.name))
restored_trackables.append(obj)
return tensor_names, shapes_and_slices, tensors, restored_trackables
def save_fn(trackables, file_prefix):
tensor_names, shapes_and_slices, tensors, _ = _get_tensors(trackables)
io_ops.save_v2(file_prefix, tensor_names, shapes_and_slices, tensors)
return file_prefix
def restore_fn(trackables, merged_prefix):
tensor_names, shapes_and_slices, tensors, restored_trackables = (
_get_tensors(trackables))
dtypes = [t.dtype for t in tensors]
try:
restored_tensors = io_ops.restore_v2(merged_prefix, tensor_names,
shapes_and_slices, dtypes)
except errors_impl.NotFoundError:
# If a NotFoundError is caught, then it means that the checkpoint
# was written prior to the saver registration migration.
tensor_names, shapes_and_slices, tensors, restored_trackables = (
_get_tensors(trackables, append_name=False))
restored_tensors = io_ops.restore_v2(merged_prefix, tensor_names,
shapes_and_slices, dtypes)
for trackable, name_tensor in zip(restored_trackables, restored_tensors):
trackable.name = name_tensor
registration.register_checkpoint_saver(
name="MigratedSaver",
predicate=lambda x: isinstance(x, RegisteredSaver),
save_fn=save_fn,
restore_fn=restore_fn,
)
before = NoRegisteredSaver("before")
after = RegisteredSaver("after")
before_ckpt_path = os.path.join(self.get_temp_dir(), "before_ckpt")
util.Checkpoint(before).write(before_ckpt_path)
after_ckpt = util.Checkpoint(after)
after_ckpt_path = os.path.join(self.get_temp_dir(), "after_ckpt")
after_ckpt.write(after_ckpt_path)
# Try loading the pre-migrated checkpoint to the migrated object.
after_ckpt.read(before_ckpt_path)
self.assertEqual(b"before", self.evaluate(after.name))
if __name__ == "__main__":
test.main()
| SingleCycleTest |
python | tornadoweb__tornado | tornado/platform/asyncio.py | {
"start": 16895,
"end": 25943
} | class ____:
"""Define ``add_reader`` methods to be called in a background select thread.
Instances of this class start a second thread to run a selector.
This thread is completely hidden from the user;
all callbacks are run on the wrapped event loop's thread.
Typically used via ``AddThreadSelectorEventLoop``,
but can be attached to a running asyncio loop.
"""
_closed = False
def __init__(self, real_loop: asyncio.AbstractEventLoop) -> None:
self._main_thread_ctx = contextvars.copy_context()
self._real_loop = real_loop
self._select_cond = threading.Condition()
self._select_args: Optional[
Tuple[List[_FileDescriptorLike], List[_FileDescriptorLike]]
] = None
self._closing_selector = False
self._thread: Optional[threading.Thread] = None
self._thread_manager_handle = self._thread_manager()
async def thread_manager_anext() -> None:
# the anext builtin wasn't added until 3.10. We just need to iterate
# this generator one step.
await self._thread_manager_handle.__anext__()
# When the loop starts, start the thread. Not too soon because we can't
# clean up if we get to this point but the event loop is closed without
# starting.
self._real_loop.call_soon(
lambda: self._real_loop.create_task(thread_manager_anext()),
context=self._main_thread_ctx,
)
self._readers: Dict[_FileDescriptorLike, Callable] = {}
self._writers: Dict[_FileDescriptorLike, Callable] = {}
# Writing to _waker_w will wake up the selector thread, which
# watches for _waker_r to be readable.
self._waker_r, self._waker_w = socket.socketpair()
self._waker_r.setblocking(False)
self._waker_w.setblocking(False)
_selector_loops.add(self)
self.add_reader(self._waker_r, self._consume_waker)
def close(self) -> None:
if self._closed:
return
with self._select_cond:
self._closing_selector = True
self._select_cond.notify()
self._wake_selector()
if self._thread is not None:
self._thread.join()
_selector_loops.discard(self)
self.remove_reader(self._waker_r)
self._waker_r.close()
self._waker_w.close()
self._closed = True
async def _thread_manager(self) -> typing.AsyncGenerator[None, None]:
# Create a thread to run the select system call. We manage this thread
# manually so we can trigger a clean shutdown from an atexit hook. Note
# that due to the order of operations at shutdown, only daemon threads
# can be shut down in this way (non-daemon threads would require the
# introduction of a new hook: https://bugs.python.org/issue41962)
self._thread = threading.Thread(
name="Tornado selector",
daemon=True,
target=self._run_select,
)
self._thread.start()
self._start_select()
try:
# The presense of this yield statement means that this coroutine
# is actually an asynchronous generator, which has a special
# shutdown protocol. We wait at this yield point until the
# event loop's shutdown_asyncgens method is called, at which point
# we will get a GeneratorExit exception and can shut down the
# selector thread.
yield
except GeneratorExit:
self.close()
raise
def _wake_selector(self) -> None:
if self._closed:
return
try:
self._waker_w.send(b"a")
except BlockingIOError:
pass
def _consume_waker(self) -> None:
try:
self._waker_r.recv(1024)
except BlockingIOError:
pass
def _start_select(self) -> None:
# Capture reader and writer sets here in the event loop
# thread to avoid any problems with concurrent
# modification while the select loop uses them.
with self._select_cond:
assert self._select_args is None
self._select_args = (list(self._readers.keys()), list(self._writers.keys()))
self._select_cond.notify()
def _run_select(self) -> None:
while True:
with self._select_cond:
while self._select_args is None and not self._closing_selector:
self._select_cond.wait()
if self._closing_selector:
return
assert self._select_args is not None
to_read, to_write = self._select_args
self._select_args = None
# We use the simpler interface of the select module instead of
# the more stateful interface in the selectors module because
# this class is only intended for use on windows, where
# select.select is the only option. The selector interface
# does not have well-documented thread-safety semantics that
# we can rely on so ensuring proper synchronization would be
# tricky.
try:
# On windows, selecting on a socket for write will not
# return the socket when there is an error (but selecting
# for reads works). Also select for errors when selecting
# for writes, and merge the results.
#
# This pattern is also used in
# https://github.com/python/cpython/blob/v3.8.0/Lib/selectors.py#L312-L317
rs, ws, xs = select.select(to_read, to_write, to_write)
ws = ws + xs
except OSError as e:
# After remove_reader or remove_writer is called, the file
# descriptor may subsequently be closed on the event loop
# thread. It's possible that this select thread hasn't
# gotten into the select system call by the time that
# happens in which case (at least on macOS), select may
# raise a "bad file descriptor" error. If we get that
# error, check and see if we're also being woken up by
# polling the waker alone. If we are, just return to the
# event loop and we'll get the updated set of file
# descriptors on the next iteration. Otherwise, raise the
# original error.
if e.errno == getattr(errno, "WSAENOTSOCK", errno.EBADF):
rs, _, _ = select.select([self._waker_r.fileno()], [], [], 0)
if rs:
ws = []
else:
raise
else:
raise
try:
self._real_loop.call_soon_threadsafe(
self._handle_select, rs, ws, context=self._main_thread_ctx
)
except RuntimeError:
# "Event loop is closed". Swallow the exception for
# consistency with PollIOLoop (and logical consistency
# with the fact that we can't guarantee that an
# add_callback that completes without error will
# eventually execute).
pass
except AttributeError:
# ProactorEventLoop may raise this instead of RuntimeError
# if call_soon_threadsafe races with a call to close().
# Swallow it too for consistency.
pass
def _handle_select(
self, rs: List[_FileDescriptorLike], ws: List[_FileDescriptorLike]
) -> None:
for r in rs:
self._handle_event(r, self._readers)
for w in ws:
self._handle_event(w, self._writers)
self._start_select()
def _handle_event(
self,
fd: _FileDescriptorLike,
cb_map: Dict[_FileDescriptorLike, Callable],
) -> None:
try:
callback = cb_map[fd]
except KeyError:
return
callback()
def add_reader(
self, fd: _FileDescriptorLike, callback: Callable[..., None], *args: Any
) -> None:
self._readers[fd] = functools.partial(callback, *args)
self._wake_selector()
def add_writer(
self, fd: _FileDescriptorLike, callback: Callable[..., None], *args: Any
) -> None:
self._writers[fd] = functools.partial(callback, *args)
self._wake_selector()
def remove_reader(self, fd: _FileDescriptorLike) -> bool:
try:
del self._readers[fd]
except KeyError:
return False
self._wake_selector()
return True
def remove_writer(self, fd: _FileDescriptorLike) -> bool:
try:
del self._writers[fd]
except KeyError:
return False
self._wake_selector()
return True
| SelectorThread |
python | pytorch__pytorch | test/inductor/test_ordered_set.py | {
"start": 52270,
"end": 52572
} | class ____(TestOnlySetsInBinaryOps, TestCase):
def setUp(self):
super().setUp()
self.OrderedSet = OrderedSet((1, 2, 3))
self.other = "abc"
self.otherIsIterable = True
# ------------------------------------------------------------------------------
| TestOnlySetsString |
python | dask__distributed | distributed/dashboard/components/__init__.py | {
"start": 116,
"end": 1550
} | class ____:
"""Base class for Dask.distributed UI dashboard components.
This class must have two attributes, ``root`` and ``source``, and one
method ``update``:
* source: a Bokeh ColumnDataSource
* root: a Bokeh Model
* update: a method that consumes the messages dictionary found in
distributed.bokeh.messages
"""
def __init__(self):
self.source = None
self.root = None
def update(self, messages):
"""Reads from bokeh.distributed.messages and updates self.source"""
def add_periodic_callback(doc, component, interval):
"""Add periodic callback to doc in a way that avoids reference cycles
If we instead use ``doc.add_periodic_callback(component.update, 100)`` then
the component stays in memory as a reference cycle because its method is
still around. This way we avoid that and let things clean up a bit more
nicely.
TODO: we still have reference cycles. Docs seem to be referred to by their
add_periodic_callback methods.
"""
ref = weakref.ref(component)
doc.add_periodic_callback(lambda: update(ref), interval)
_attach(doc, component)
@without_property_validation
def update(ref):
comp = ref()
if comp is not None:
comp.update()
def _attach(doc, component):
if not hasattr(doc, "components"):
doc.components = set()
doc.components.add(component)
| DashboardComponent |
python | mitmproxy__pdoc | test/testdata/flavors_numpy.py | {
"start": 6494,
"end": 13747
} | class ____(object):
"""The summary line for a class docstring should fit on one line.
If the class has public attributes, they may be documented here
in an ``Attributes`` section and follow the same formatting as a
function's ``Args`` section. Alternatively, attributes may be documented
inline with the attribute's declaration (see __init__ method below).
Properties created with the ``@property`` decorator should be documented
in the property's getter method.
Attributes
----------
attr1 : str
Description of `attr1`.
attr2 : :obj:`int`, optional
Description of `attr2`.
"""
def __init__(self, param1, param2, param3):
"""Example of docstring on the __init__ method.
The __init__ method may be documented in either the class level
docstring, or as a docstring on the __init__ method itself.
Either form is acceptable, but the two should not be mixed. Choose one
convention to document the __init__ method and be consistent with it.
Note
----
Do not include the `self` parameter in the ``Parameters`` section.
Parameters
----------
param1 : str
Description of `param1`.
param2 : :obj:`list` of :obj:`str`
Description of `param2`. Multiple
lines are supported.
param3 : :obj:`int`, optional
Description of `param3`.
"""
self.attr1 = param1
self.attr2 = param2
self.attr3 = param3 #: Doc comment *inline* with attribute
#: list of str: Doc comment *before* attribute, with type specified
self.attr4 = ["attr4"]
self.attr5 = None
"""str: Docstring *after* attribute, with type specified."""
@property
def readonly_property(self):
"""str: Properties should be documented in their getter method."""
return "readonly_property"
@property
def readwrite_property(self):
""":obj:`list` of :obj:`str`: Properties with both a getter and setter
should only be documented in their getter method.
If the setter method contains notable behavior, it should be
mentioned here.
"""
return ["readwrite_property"]
@readwrite_property.setter
def readwrite_property(self, value):
value
def example_method(self, param1, param2):
"""Class methods are similar to regular functions.
Note
----
Do not include the `self` parameter in the ``Parameters`` section.
Parameters
----------
param1
The first parameter.
param2
The second parameter.
Returns
-------
bool
True if successful, False otherwise.
"""
return True
def __special__(self):
"""By default special members with docstrings are not included.
Special members are any methods or attributes that start with and
end with a double underscore. Any special member with a docstring
will be included in the output, if
``napoleon_include_special_with_doc`` is set to True.
This behavior can be enabled by changing the following setting in
Sphinx's conf.py::
napoleon_include_special_with_doc = True
"""
pass
def __special_without_docstring__(self):
pass
def _private(self):
"""By default private members are not included.
Private members are any methods or attributes that start with an
underscore and are *not* special. By default they are not included
in the output.
This behavior can be changed such that private members *are* included
by changing the following setting in Sphinx's conf.py::
napoleon_include_private_with_doc = True
"""
pass
def _private_without_docstring(self):
pass
def foo(var1, var2, *args, long_var_name='hi', **kwargs):
r"""Summarize the function in one line.
Several sentences providing an extended description. Refer to
variables using back-ticks, e.g. `var`.
Parameters
----------
var1 : array_like
Array_like means all those objects -- lists, nested lists, etc. --
that can be converted to an array. We can also refer to
variables like `var1`.
var2 : int
The type above can either refer to an actual Python type
(e.g. ``int``), or describe the type of the variable in more
detail, e.g. ``(N,) ndarray`` or ``array_like``.
*args : iterable
Other arguments.
long_var_name : {'hi', 'ho'}, optional
Choices in brackets, default first when optional.
**kwargs : dict
Keyword arguments.
Returns
-------
type
Explanation of anonymous return value of type ``type``.
describe : type
Explanation of return value named `describe`.
out : type
Explanation of `out`.
type_without_description
Other Parameters
----------------
only_seldom_used_keywords : type
Explanation.
common_parameters_listed_above : type
Explanation.
Raises
------
BadException
Because you shouldn't have done that.
See Also
--------
numpy.array : Relationship (optional).
numpy.ndarray : Relationship (optional), which could be fairly long, in
which case the line wraps here.
numpy.dot, numpy.linalg.norm, numpy.eye
Notes
-----
Notes about the implementation algorithm (if needed).
This can have multiple paragraphs.
You may include some math:
.. math:: X(e^{j\omega } ) = x(n)e^{ - j\omega n}
And even use a Greek symbol like :math:`\omega` inline.
References
----------
Cite the relevant literature, e.g. [1]_. You may also cite these
references in the notes section above.
.. [1] O. McNoleg, "The integration of GIS, remote sensing,
expert systems and adaptive co-kriging for environmental habitat
modelling of the Highland Haggis using object-oriented, fuzzy-logic
and neural-network techniques," Computers & Geosciences, vol. 22,
pp. 585-588, 1996.
Examples
--------
These are written in doctest format, and should illustrate how to
use the function.
>>> a = [1, 2, 3]
>>> print([x + 3 for x in a])
[4, 5, 6]
>>> print("a\nb")
a
b
"""
# After closing class docstring, there should be one blank line to
# separate following codes (according to PEP257).
# But for function, method and module, there should be no blank lines
# after closing the docstring.
pass
def invalid_format(test):
"""
In this example, there is no description for the test argument
Parameters
----------
param1
"""
def invalid_format2() -> None:
"""
Another example without description, but this time indented.
Returns
-------
Text describing the return value.
"""
def invalid_format3() -> None:
"""
Another example with a multiline text.
Returns
-------
Multiline text
describing the return value.
"""
| ExampleClass |
python | getsentry__sentry | src/sentry/monitors/endpoints/base_monitor_stats.py | {
"start": 850,
"end": 3744
} | class ____(BaseEndpointMixin, StatsMixin):
def get_monitor_stats(self, request: Request, project, monitor) -> Response:
args = self._parse_args(request)
start = normalize_to_epoch(args["start"], args["rollup"])
end = normalize_to_epoch(args["end"], args["rollup"])
tracked_statuses = [
CheckInStatus.OK,
CheckInStatus.ERROR,
CheckInStatus.MISSED,
CheckInStatus.TIMEOUT,
CheckInStatus.UNKNOWN,
]
check_ins = MonitorCheckIn.objects.filter(
monitor=monitor,
status__in=tracked_statuses,
date_added__gt=args["start"],
date_added__lte=args["end"],
)
environments = get_environments(request, project.organization)
if environments:
check_ins = check_ins.filter(
monitor_environment__environment_id__in=[e.id for e in environments]
)
# Use postgres' `date_bin` to bucket rounded to our rollups
bucket = Func(
timedelta(seconds=args["rollup"]),
"date_added",
datetime.fromtimestamp(end, UTC),
function="date_bin",
output_field=DateTimeField(),
)
# Save space on date allocation and return buckets as unix timestamps
bucket = Extract(bucket, "epoch")
# retrieve the list of checkins in the time range and count each by
# status. Bucketing is done at the postgres level for performance
check_in_history = (
check_ins.all()
.annotate(bucket=bucket)
.values("status", "bucket")
.order_by("bucket")
.annotate(count=Count("*"))
.values_list("bucket", "status", "count")
)
# Duration count must be done as a second query
duration_history = (
check_ins.all()
.annotate(bucket=bucket)
.values("bucket")
.order_by("bucket")
.annotate(duration_avg=Avg("duration"))
.values_list("bucket", "duration_avg")
)
stats = OrderedDict()
status_to_name = dict(CheckInStatus.as_choices())
# initialize success/failure/missed/duration stats
while start <= end:
stats[start] = {status_to_name[status]: 0 for status in tracked_statuses}
stats[start]["duration"] = 0
start += args["rollup"]
for ts, status, count in check_in_history.iterator():
named_status = status_to_name[status]
stats[ts][named_status] = count
for ts, duration_avg in duration_history.iterator():
stats[ts]["duration"] = duration_avg
# Ordered dict keeps timestamp order
stats_list = [{"ts": ts, **data} for ts, data in stats.items()]
return Response(stats_list)
| MonitorStatsMixin |
python | scipy__scipy | scipy/stats/tests/test_morestats.py | {
"start": 88096,
"end": 89733
} | class ____:
@pytest.mark.filterwarnings("ignore:invalid value encountered") # Dask
def test_empty_input(self, xp):
x = xp.asarray([])
with eager_warns(SmallSampleWarning, match=too_small_1d_not_omit, xp=xp):
res = stats.kstatvar(x)
xp_assert_equal(res, xp.asarray(xp.nan))
def test_nan_input(self, xp):
data = xp.arange(10.)
data = xp.where(data == 6, xp.nan, data)
xp_assert_equal(stats.kstat(data), xp.asarray(xp.nan))
@skip_xp_backends(np_only=True,
reason='input validation of `n` does not depend on backend')
def test_bad_arg(self, xp):
# Raise ValueError is n is not 1 or 2.
data = [1]
n = 10
message = 'Only n=1 or n=2 supported.'
with pytest.raises(ValueError, match=message):
stats.kstatvar(data, n=n)
def test_against_R_mathworld(self, xp):
# Test against reference values computed using formulas exactly as
# they appear at https://mathworld.wolfram.com/k-Statistic.html
# This is *really* similar to how they appear in the implementation,
# but that could change, and this should not.
n = len(x_kstat)
k2 = 12.65006954022974 # see source code in TestKstat
k4 = -141.6682291883626
res = stats.kstatvar(xp.asarray(x_kstat), 1)
ref = k2 / n
xp_assert_close(res, xp.asarray(ref))
res = stats.kstatvar(xp.asarray(x_kstat), 2)
# *unbiased estimator* for var(k2)
ref = (2*k2**2*n + (n-1)*k4) / (n * (n+1))
xp_assert_close(res, xp.asarray(ref))
| TestKstatVar |
python | huggingface__transformers | src/transformers/models/bridgetower/image_processing_bridgetower.py | {
"start": 4539,
"end": 26048
} | class ____(BaseImageProcessor):
r"""
Constructs a BridgeTower image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the
`do_resize` parameter in the `preprocess` method.
size (`dict[str, int]` *optional*, defaults to `{'shortest_edge': 288}`):
Resize the shorter side of the input to `size["shortest_edge"]`. The longer side will be limited to under
`int((1333 / 800) * size["shortest_edge"])` while preserving the aspect ratio. Only has an effect if
`do_resize` is set to `True`. Can be overridden by the `size` parameter in the `preprocess` method.
size_divisor (`int`, *optional*, defaults to 32):
The size by which to make sure both the height and width can be divided. Only has an effect if `do_resize`
is set to `True`. Can be overridden by the `size_divisor` parameter in the `preprocess` method.
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
Resampling filter to use if resizing the image. Only has an effect if `do_resize` is set to `True`. Can be
overridden by the `resample` parameter in the `preprocess` method.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
parameter in the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Only has an effect if `do_rescale` is set to `True`. Can be
overridden by the `rescale_factor` parameter in the `preprocess` method.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
method. Can be overridden by the `do_normalize` parameter in the `preprocess` method.
image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. Can be
overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
Can be overridden by the `image_std` parameter in the `preprocess` method.
do_center_crop (`bool`, *optional*, defaults to `True`):
Whether to center crop the image. Can be overridden by the `do_center_crop` parameter in the `preprocess`
method.
crop_size (`dict[str, int]`, *optional*):
Desired output size when applying center-cropping. Only has an effect if `do_center_crop` is set to `True`.
Can be overridden by the `crop_size` parameter in the `preprocess` method. If unset defaults to `size`,
do_pad (`bool`, *optional*, defaults to `True`):
Whether to pad the image to the `(max_height, max_width)` of the images in the batch. Can be overridden by
the `do_pad` parameter in the `preprocess` method.
"""
model_input_names = ["pixel_values", "pixel_mask"]
valid_kwargs = BridgeTowerImageProcessorKwargs
def __init__(
self,
do_resize: bool = True,
size: Optional[dict[str, int]] = None,
size_divisor: int = 32,
resample: PILImageResampling = PILImageResampling.BICUBIC,
do_rescale: bool = True,
rescale_factor: Union[int, float] = 1 / 255,
do_normalize: bool = True,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
do_center_crop: bool = True,
crop_size: Optional[dict[str, int]] = None,
do_pad: bool = True,
**kwargs,
) -> None:
super().__init__(**kwargs)
size = size if size is not None else {"shortest_edge": 288}
size = get_size_dict(size, default_to_square=False)
self.do_resize = do_resize
self.size = size
self.size_divisor = size_divisor
self.resample = resample
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD
self.do_pad = kwargs.pop("pad_and_return_pixel_mask", do_pad)
self.do_center_crop = do_center_crop
self.crop_size = crop_size
# Copied from transformers.models.vilt.image_processing_vilt.ViltImageProcessor.resize
def resize(
self,
image: np.ndarray,
size: dict[str, int],
size_divisor: int = 32,
resample: PILImageResampling = PILImageResampling.BICUBIC,
data_format: Optional[Union[str, ChannelDimension]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs,
) -> np.ndarray:
"""
Resize an image.
Resizes the shorter side of the image to `size["shortest_edge"]` while preserving the aspect ratio. If the
longer side is larger than the max size `(int(`size["shortest_edge"]` * 1333 / 800))`, the longer side is then
resized to the max size while preserving the aspect ratio.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Controls the size of the output image. Should be of the form `{"shortest_edge": int}`.
size_divisor (`int`, *optional*, defaults to 32):
The image is resized to a size that is a multiple of this value.
resample (`PILImageResampling` filter, *optional*, defaults to `PILImageResampling.BICUBIC`):
Resampling filter to use when resiizing the image.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
"""
size = get_size_dict(size, default_to_square=False)
if "shortest_edge" not in size:
raise ValueError(f"The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}")
shorter = size["shortest_edge"]
longer = int(1333 / 800 * shorter)
output_size = get_resize_output_image_size(
image, shorter=shorter, longer=longer, size_divisor=size_divisor, input_data_format=input_data_format
)
return resize(
image,
size=output_size,
resample=resample,
data_format=data_format,
input_data_format=input_data_format,
**kwargs,
)
def center_crop(
self,
image: np.ndarray,
size: dict[str, int],
data_format: Optional[Union[str, ChannelDimension]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs,
) -> np.ndarray:
"""
Center crop an image to `(size["height"], size["width"])`. If the input size is smaller than `crop_size` along
any edge, the image is padded with 0's and then center cropped.
Args:
image (`np.ndarray`):
Image to center crop.
size (`dict[str, int]`):
Size of the output image in the form `{"height": h, "width": w}`.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred from the input
image.
"""
output_size = size["shortest_edge"]
return center_crop(
image,
size=(output_size, output_size),
data_format=data_format,
input_data_format=input_data_format,
**kwargs,
)
# Copied from transformers.models.vilt.image_processing_vilt.ViltImageProcessor._pad_image
def _pad_image(
self,
image: np.ndarray,
output_size: tuple[int, int],
constant_values: Union[float, Iterable[float]] = 0,
data_format: Optional[ChannelDimension] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> np.ndarray:
"""
Pad an image with zeros to the given size.
"""
input_height, input_width = get_image_size(image, channel_dim=input_data_format)
output_height, output_width = output_size
pad_bottom = output_height - input_height
pad_right = output_width - input_width
padding = ((0, pad_bottom), (0, pad_right))
padded_image = pad(
image,
padding,
mode=PaddingMode.CONSTANT,
constant_values=constant_values,
data_format=data_format,
input_data_format=input_data_format,
)
return padded_image
# Copied from transformers.models.vilt.image_processing_vilt.ViltImageProcessor.pad
def pad(
self,
images: list[np.ndarray],
constant_values: Union[float, Iterable[float]] = 0,
return_pixel_mask: bool = True,
return_tensors: Optional[Union[str, TensorType]] = None,
data_format: Optional[ChannelDimension] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> BatchFeature:
"""
Pads a batch of images to the bottom and right of the image with zeros to the size of largest height and width
in the batch and optionally returns their corresponding pixel mask.
Args:
image (`np.ndarray`):
Image to pad.
constant_values (`float` or `Iterable[float]`, *optional*):
The value to use for the padding if `mode` is `"constant"`.
return_pixel_mask (`bool`, *optional*, defaults to `True`):
Whether to return a pixel mask.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
"""
pad_size = get_max_height_width(images, input_data_format=input_data_format)
padded_images = [
self._pad_image(
image,
pad_size,
constant_values=constant_values,
data_format=data_format,
input_data_format=input_data_format,
)
for image in images
]
data = {"pixel_values": padded_images}
if return_pixel_mask:
masks = [
make_pixel_mask(image=image, output_size=pad_size, input_data_format=input_data_format)
for image in images
]
data["pixel_mask"] = masks
return BatchFeature(data=data, tensor_type=return_tensors)
@filter_out_non_signature_kwargs()
def preprocess(
self,
images: ImageInput,
do_resize: Optional[bool] = None,
size: Optional[dict[str, int]] = None,
size_divisor: Optional[int] = None,
resample: Optional[PILImageResampling] = None,
do_rescale: Optional[bool] = None,
rescale_factor: Optional[float] = None,
do_normalize: Optional[bool] = None,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
do_pad: Optional[bool] = None,
do_center_crop: Optional[bool] = None,
crop_size: Optional[dict[str, int]] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
data_format: ChannelDimension = ChannelDimension.FIRST,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> PIL.Image.Image:
"""
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Controls the size of the image after `resize`. The shortest edge of the image is resized to
`size["shortest_edge"]` whilst preserving the aspect ratio. If the longest edge of this resized image
is > `int(size["shortest_edge"] * (1333 / 800))`, then the image is resized again to make the longest
edge equal to `int(size["shortest_edge"] * (1333 / 800))`.
size_divisor (`int`, *optional*, defaults to `self.size_divisor`):
The image is resized to a size that is a multiple of this value.
resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. Only has an effect if `do_resize` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image values between [0 - 1].
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean to normalize the image by if `do_normalize` is set to `True`.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation to normalize the image by if `do_normalize` is set to `True`.
do_pad (`bool`, *optional*, defaults to `self.do_pad`):
Whether to pad the image to the (max_height, max_width) in the batch. If `True`, a pixel mask is also
created and returned.
do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
Whether to center crop the image. If the input size is smaller than `crop_size` along any edge, the
image is padded with 0's and then center cropped.
crop_size (`dict[str, int]`, *optional*, defaults to `self.crop_size`):
Size of the image after center crop. If one edge the image is smaller than `crop_size`, it will be
padded with zeros and then cropped
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
do_resize = do_resize if do_resize is not None else self.do_resize
size_divisor = size_divisor if size_divisor is not None else self.size_divisor
resample = resample if resample is not None else self.resample
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
do_pad = do_pad if do_pad is not None else self.do_pad
do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
# For backwards compatibility. Initial version of this processor was cropping to the "size" argument, which
# it should default to if crop_size is undefined.
crop_size = (
crop_size if crop_size is not None else (self.crop_size if self.crop_size is not None else self.size)
)
size = size if size is not None else self.size
size = get_size_dict(size, default_to_square=False)
images = self.fetch_images(images)
images = make_flat_list_of_images(images)
if not valid_images(images):
raise ValueError("Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor")
# Here, crop_size is used only if it is set, else size will be used.
validate_preprocess_arguments(
do_rescale=do_rescale,
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
do_center_crop=do_center_crop,
crop_size=crop_size,
do_resize=do_resize,
size=size,
resample=resample,
)
# All transformations expect numpy arrays.
images = [to_numpy_array(image) for image in images]
if do_rescale and is_scaled_image(images[0]):
logger.warning_once(
"It looks like you are trying to rescale already rescaled images. If the input"
" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
)
if do_resize:
images = [
self.resize(
image=image,
size=size,
size_divisor=size_divisor,
resample=resample,
input_data_format=input_data_format,
)
for image in images
]
if do_center_crop:
images = [
self.center_crop(image=image, size=crop_size, input_data_format=input_data_format) for image in images
]
if do_rescale:
images = [
self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
for image in images
]
if do_normalize:
images = [
self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
for image in images
]
images = [
to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
]
if do_pad:
encoded_outputs = self.pad(
images, return_pixel_mask=True, return_tensors=return_tensors, input_data_format=data_format
)
else:
encoded_outputs = BatchFeature(data={"pixel_values": images}, tensor_type=return_tensors)
return encoded_outputs
__all__ = ["BridgeTowerImageProcessor"]
| BridgeTowerImageProcessor |
python | huggingface__transformers | tests/models/timesformer/test_modeling_timesformer.py | {
"start": 5402,
"end": 12451
} | class ____(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
"""
Here we also overwrite some of the tests of test_modeling_common.py, as TimeSformer does not use input_ids, inputs_embeds,
attention_mask and seq_length.
"""
all_model_classes = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
pipeline_model_mapping = (
{"feature-extraction": TimesformerModel, "video-classification": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
test_resize_embeddings = False
test_torch_exportable = True
def setUp(self):
self.model_tester = TimesformerModelTester(self)
self.config_tester = ConfigTester(
self, config_class=TimesformerConfig, has_text_modality=False, hidden_size=37
)
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
inputs_dict = copy.deepcopy(inputs_dict)
if return_labels:
if model_class in get_values(MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING):
inputs_dict["labels"] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=torch_device
)
return inputs_dict
def test_config(self):
self.config_tester.run_common_tests()
@unittest.skip(reason="TimeSformer does not use inputs_embeds")
def test_inputs_embeds(self):
pass
def test_model_get_set_embeddings(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
self.assertIsInstance(model.get_input_embeddings(), (nn.Module))
x = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(x, nn.Linear))
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_for_video_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
model_name = "facebook/timesformer-base-finetuned-k400"
model = TimesformerModel.from_pretrained(model_name)
self.assertIsNotNone(model)
def test_attention_outputs(self):
if not self.has_attentions:
self.skipTest(reason="Model has no attentions")
else:
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
for model_class in self.all_model_classes:
seq_len = self.model_tester.seq_length
num_frames = self.model_tester.num_frames
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.return_dict = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1],
)
out_len = len(outputs)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
self.assertEqual(out_len + 1, len(outputs))
self_attentions = outputs.attentions
self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers)
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1],
)
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.hidden_states
expected_num_layers = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(hidden_states), expected_num_layers)
seq_length = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[seq_length, self.model_tester.hidden_size],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
# We will verify our results on a video of eating spaghetti
# Frame indices used: [164 168 172 176 181 185 189 193 198 202 206 210 215 219 223 227]
def prepare_video():
file = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video", filename="eating_spaghetti.npy", repo_type="dataset"
)
video = np.load(file)
return list(video)
@require_torch
@require_vision
| TimesformerModelTest |
python | PrefectHQ__prefect | src/prefect/server/schemas/core.py | {
"start": 43288,
"end": 44573
} | class ____(ORMBaseModel):
key: str = Field(description="An optional unique reference key for this artifact.")
latest_id: UUID = Field(
description="The latest artifact ID associated with the key."
)
type: Optional[str] = Field(
default=None,
description=(
"An identifier that describes the shape of the data field. e.g. 'result',"
" 'table', 'markdown'"
),
)
description: Optional[str] = Field(
default=None, description="A markdown-enabled description of the artifact."
)
data: Optional[Union[Dict[str, Any], Any]] = Field(
default=None,
description=(
"Data associated with the artifact, e.g. a result.; structure depends on"
" the artifact type."
),
)
metadata_: Optional[Dict[str, str]] = Field(
default=None,
description=(
"User-defined artifact metadata. Content must be string key and value"
" pairs."
),
)
flow_run_id: Optional[UUID] = Field(
default=None, description="The flow run associated with the artifact."
)
task_run_id: Optional[UUID] = Field(
default=None, description="The task run associated with the artifact."
)
| ArtifactCollection |
python | ansible__ansible | test/units/plugins/test/test_all.py | {
"start": 634,
"end": 7140
} | class ____:
_tempdir: tempfile.TemporaryDirectory[str] | None = None
def __call__(self, *args, **kwargs) -> str:
self._tempdir = tempfile.TemporaryDirectory()
symlink = pathlib.Path(self._tempdir.name) / 'a_symlink'
symlink.symlink_to('something')
return str(symlink)
def __del__(self) -> None:
if self._tempdir:
self._tempdir.cleanup()
def __repr__(self) -> str:
return 'MakeLink'
TEST_DATA_SET: tuple[tuple[t.Any, str, bool, Extra | None], ...] = (
# core
(dict(failed=1), 'failed', True, None),
(dict(failed=0), 'failed', False, None),
(dict(), 'failed', False, None),
(dict(failed=1), 'success', False, None),
(dict(failed=0), 'success', True, None),
(dict(), 'success', True, None),
(dict(unreachable=1), 'reachable', False, None),
(dict(unreachable=0), 'reachable', True, None),
(dict(), 'reachable', True, None),
(dict(unreachable=0), 'unreachable', False, None),
(dict(unreachable=1), 'unreachable', True, None),
(dict(), 'unreachable', False, None),
(dict(timedout=dict(period=99)), 'timedout', True, None),
# (dict(timedout=1), 'timedout', False, None), # oops, bug
(dict(timedout=0), 'timedout', False, None),
(dict(), 'timedout', False, None),
(dict(changed=1), 'changed', True, None),
(dict(changed=0), 'changed', False, None),
(dict(), 'changed', False, None),
# (dict(results=[]), 'changed', True, None), # oops, bug
(dict(results=[dict(changed=1)]), 'changed', True, None),
(dict(results=[dict(changed=0)]), 'changed', False, None),
(dict(), 'changed', False, None),
(dict(skipped=1), 'skipped', True, None),
(dict(skipped=0), 'skipped', False, None),
(dict(), 'skipped', False, None),
(dict(finished=1), 'finished', True, None),
(dict(finished=0), 'finished', False, None),
(dict(), 'finished', True, None),
(dict(started=1), 'started', True, None),
(dict(started=0), 'started', False, None),
(dict(), 'started', True, None),
('"foo"', 'match', True, Extra(args=['"foo"'])),
('"foo"', 'match', False, Extra(args=['"bar"'])),
('"xxfooxx"', 'search', True, Extra(args=['"foo"'])),
('"xxfooxx"', 'search', False, Extra(args=['"bar"'])),
('"fooxx"', 'regex', True, Extra(args=['"FOO"'], kwargs=dict(ignorecase=True, multiline=True, match_type='"match"'))),
('"fooxx"', 'regex', False, Extra(args=['"BAR"'], kwargs=dict(ignorecase=True, multiline=True, match_type='"match"'))),
('1.1', 'version_compare', True, Extra(args=['1.1', '"eq"'])),
('1.1', 'version_compare', False, Extra(args=['1.0', '"eq"'])),
([0], 'any', False, None),
([1], 'any', True, None),
([0], 'all', False, None),
([1], 'all', True, None),
(1, 'truthy', True, None),
(0, 'truthy', False, None),
(1, 'falsy', False, None),
(0, 'falsy', True, None),
('foo', 'vault_encrypted', True, Extra(variables=dict(foo=EncryptedString(ciphertext='$ANSIBLE_VAULT;1.1;BLAH')))),
('foo', 'vault_encrypted', False, Extra(variables=dict(foo='not_encrypted'))),
(repr(str(pathlib.Path(__file__).parent / "dummy_vault.txt")), 'vaulted_file', True, None),
(repr(__file__), 'vaulted_file', False, None),
('q', 'defined', True, None),
('not_defined', 'defined', False, None),
('q', 'undefined', False, None),
('not_defined', 'undefined', True, None),
# files
('"/"', 'directory', True, None),
(repr(__file__), 'directory', False, None),
(repr(__file__), 'file', True, None),
('"/"', 'file', False, None),
('make_link()', 'link', True, Extra(variables=dict(make_link=MakeLink()))),
('"/"', 'link', False, None),
('"/"', 'exists', True, None),
('"/does_not_exist"', 'exists', False, None),
('"/"', 'link_exists', True, None),
('"/does_not_exist"', 'link_exists', False, None),
('"/absolute"', 'abs', True, None),
('"relative"', 'abs', False, None),
('"/"', 'same_file', True, Extra(args=['"/"'])),
(repr(__file__), 'same_file', False, Extra(args=['"/"'])),
('"/"', 'mount', True, None),
('"/not_a_mount_point"', 'mount', False, None),
# mathstuff
([1], 'subset', True, Extra(args=[[1]])),
([0], 'subset', False, Extra(args=[[1]])),
([1], 'superset', True, Extra(args=[[1]])),
([0], 'superset', False, Extra(args=[[1]])),
([0], 'contains', True, Extra(args=[0])),
([1], 'contains', False, Extra(args=[0])),
('nan', 'nan', True, Extra(variables=dict(nan=math.nan))),
('"a string"', 'nan', False, None),
# uri
('"https://ansible.com/"', 'uri', True, None),
(1, 'uri', False, None),
('"https://ansible.com/"', 'url', True, None),
(1, 'url', False, None),
('"urn:https://ansible.com/"', 'urn', True, None),
(1, 'urn', False, None),
)
@pytest.mark.parametrize("value,test,expected,extra", TEST_DATA_SET, ids=str)
def test_truthy_inputs(value: object, test: str, expected: bool, extra: Extra | None) -> None:
"""Ensure test plugins return the expected bool result, not just a truthy/falsey value."""
test_invocation = test
if extra:
test_args = extra.args or []
test_args.extend(f'{k}={v}' for k, v in (extra.kwargs or {}).items())
test_invocation += '(' + ', '.join(str(arg) for arg in test_args) + ')'
expression = f'{value} is {test_invocation}'
with emits_warnings(deprecation_pattern=[]):
result = Templar(variables=extra.variables if extra else None).evaluate_expression(trust_as_template(expression))
assert result is expected
def test_ensure_all_plugins_tested() -> None:
"""Ensure all plugins have at least one entry in the test data set, accounting for functions which have multiple names."""
test_plugins: list[AnsibleJinja2Test] = [plugin for plugin in test_loader.all() if plugin.ansible_name.startswith('ansible.builtin.')]
plugin_aliases: dict[t.Any, set[str]] = collections.defaultdict(set)
for test_plugin in test_plugins:
plugin_aliases[test_plugin.j2_function].add(test_plugin.ansible_name)
missing_entries: list[str] = []
for plugin_names in plugin_aliases.values():
matching_tests = {_expected for _value, test, _expected, _extra in TEST_DATA_SET if f'ansible.builtin.{test}' in plugin_names}
missing = {True, False} - matching_tests
if missing: # pragma: nocover
missing_entries.append(f'{plugin_names}: {missing}')
assert not missing_entries
| MakeLink |
python | scikit-learn__scikit-learn | examples/calibration/plot_calibration_multiclass.py | {
"start": 617,
"end": 10759
} | class ____ an instance (red: class 1, green: class 2, blue: class 3).
"""
# %%
# Data
# ----
# Below, we generate a classification dataset with 2000 samples, 2 features
# and 3 target classes. We then split the data as follows:
#
# * train: 600 samples (for training the classifier)
# * valid: 400 samples (for calibrating predicted probabilities)
# * test: 1000 samples
#
# Note that we also create `X_train_valid` and `y_train_valid`, which consists
# of both the train and valid subsets. This is used when we only want to train
# the classifier but not calibrate the predicted probabilities.
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import numpy as np
from sklearn.datasets import make_blobs
np.random.seed(0)
X, y = make_blobs(
n_samples=2000, n_features=2, centers=3, random_state=42, cluster_std=5.0
)
X_train, y_train = X[:600], y[:600]
X_valid, y_valid = X[600:1000], y[600:1000]
X_train_valid, y_train_valid = X[:1000], y[:1000]
X_test, y_test = X[1000:], y[1000:]
# %%
# Fitting and calibration
# -----------------------
#
# First, we will train a :class:`~sklearn.ensemble.RandomForestClassifier`
# with 25 base estimators (trees) on the concatenated train and validation
# data (1000 samples). This is the uncalibrated classifier.
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier(n_estimators=25)
clf.fit(X_train_valid, y_train_valid)
# %%
# To train the calibrated classifier, we start with the same
# :class:`~sklearn.ensemble.RandomForestClassifier` but train it using only
# the train data subset (600 samples) then calibrate, with `method='sigmoid'`,
# using the valid data subset (400 samples) in a 2-stage process.
from sklearn.calibration import CalibratedClassifierCV
from sklearn.frozen import FrozenEstimator
clf = RandomForestClassifier(n_estimators=25)
clf.fit(X_train, y_train)
cal_clf = CalibratedClassifierCV(FrozenEstimator(clf), method="sigmoid")
cal_clf.fit(X_valid, y_valid)
# %%
# Compare probabilities
# ---------------------
# Below we plot a 2-simplex with arrows showing the change in predicted
# probabilities of the test samples.
import matplotlib.pyplot as plt
plt.figure(figsize=(10, 10))
colors = ["r", "g", "b"]
clf_probs = clf.predict_proba(X_test)
cal_clf_probs = cal_clf.predict_proba(X_test)
# Plot arrows
for i in range(clf_probs.shape[0]):
plt.arrow(
clf_probs[i, 0],
clf_probs[i, 1],
cal_clf_probs[i, 0] - clf_probs[i, 0],
cal_clf_probs[i, 1] - clf_probs[i, 1],
color=colors[y_test[i]],
head_width=1e-2,
)
# Plot perfect predictions, at each vertex
plt.plot([1.0], [0.0], "ro", ms=20, label="Class 1")
plt.plot([0.0], [1.0], "go", ms=20, label="Class 2")
plt.plot([0.0], [0.0], "bo", ms=20, label="Class 3")
# Plot boundaries of unit simplex
plt.plot([0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], "k", label="Simplex")
# Annotate points 6 points around the simplex, and mid point inside simplex
plt.annotate(
r"($\frac{1}{3}$, $\frac{1}{3}$, $\frac{1}{3}$)",
xy=(1.0 / 3, 1.0 / 3),
xytext=(1.0 / 3, 0.23),
xycoords="data",
arrowprops=dict(facecolor="black", shrink=0.05),
horizontalalignment="center",
verticalalignment="center",
)
plt.plot([1.0 / 3], [1.0 / 3], "ko", ms=5)
plt.annotate(
r"($\frac{1}{2}$, $0$, $\frac{1}{2}$)",
xy=(0.5, 0.0),
xytext=(0.5, 0.1),
xycoords="data",
arrowprops=dict(facecolor="black", shrink=0.05),
horizontalalignment="center",
verticalalignment="center",
)
plt.annotate(
r"($0$, $\frac{1}{2}$, $\frac{1}{2}$)",
xy=(0.0, 0.5),
xytext=(0.1, 0.5),
xycoords="data",
arrowprops=dict(facecolor="black", shrink=0.05),
horizontalalignment="center",
verticalalignment="center",
)
plt.annotate(
r"($\frac{1}{2}$, $\frac{1}{2}$, $0$)",
xy=(0.5, 0.5),
xytext=(0.6, 0.6),
xycoords="data",
arrowprops=dict(facecolor="black", shrink=0.05),
horizontalalignment="center",
verticalalignment="center",
)
plt.annotate(
r"($0$, $0$, $1$)",
xy=(0, 0),
xytext=(0.1, 0.1),
xycoords="data",
arrowprops=dict(facecolor="black", shrink=0.05),
horizontalalignment="center",
verticalalignment="center",
)
plt.annotate(
r"($1$, $0$, $0$)",
xy=(1, 0),
xytext=(1, 0.1),
xycoords="data",
arrowprops=dict(facecolor="black", shrink=0.05),
horizontalalignment="center",
verticalalignment="center",
)
plt.annotate(
r"($0$, $1$, $0$)",
xy=(0, 1),
xytext=(0.1, 1),
xycoords="data",
arrowprops=dict(facecolor="black", shrink=0.05),
horizontalalignment="center",
verticalalignment="center",
)
# Add grid
plt.grid(False)
for x in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:
plt.plot([0, x], [x, 0], "k", alpha=0.2)
plt.plot([0, 0 + (1 - x) / 2], [x, x + (1 - x) / 2], "k", alpha=0.2)
plt.plot([x, x + (1 - x) / 2], [0, 0 + (1 - x) / 2], "k", alpha=0.2)
plt.title("Change of predicted probabilities on test samples after sigmoid calibration")
plt.xlabel("Probability class 1")
plt.ylabel("Probability class 2")
plt.xlim(-0.05, 1.05)
plt.ylim(-0.05, 1.05)
_ = plt.legend(loc="best")
# %%
# In the figure above, each vertex of the simplex represents
# a perfectly predicted class (e.g., 1, 0, 0). The mid point
# inside the simplex represents predicting the three classes with equal
# probability (i.e., 1/3, 1/3, 1/3). Each arrow starts at the
# uncalibrated probabilities and end with the arrow head at the calibrated
# probability. The color of the arrow represents the true class of that test
# sample.
#
# The uncalibrated classifier is overly confident in its predictions and
# incurs a large :ref:`log loss <log_loss>`. The calibrated classifier incurs
# a lower :ref:`log loss <log_loss>` due to two factors. First, notice in the
# figure above that the arrows generally point away from the edges of the
# simplex, where the probability of one class is 0. Second, a large proportion
# of the arrows point towards the true class, e.g., green arrows (samples where
# the true class is 'green') generally point towards the green vertex. This
# results in fewer over-confident, 0 predicted probabilities and at the same
# time an increase in the predicted probabilities of the correct class.
# Thus, the calibrated classifier produces more accurate predicted probabilities
# that incur a lower :ref:`log loss <log_loss>`
#
# We can show this objectively by comparing the :ref:`log loss <log_loss>` of
# the uncalibrated and calibrated classifiers on the predictions of the 1000
# test samples. Note that an alternative would have been to increase the number
# of base estimators (trees) of the
# :class:`~sklearn.ensemble.RandomForestClassifier` which would have resulted
# in a similar decrease in :ref:`log loss <log_loss>`.
from sklearn.metrics import log_loss
loss = log_loss(y_test, clf_probs)
cal_loss = log_loss(y_test, cal_clf_probs)
print("Log-loss of:")
print(f" - uncalibrated classifier: {loss:.3f}")
print(f" - calibrated classifier: {cal_loss:.3f}")
# %%
# We can also assess calibration with the Brier score for probabilistics predictions
# (lower is better, possible range is [0, 2]):
from sklearn.metrics import brier_score_loss
loss = brier_score_loss(y_test, clf_probs)
cal_loss = brier_score_loss(y_test, cal_clf_probs)
print("Brier score of")
print(f" - uncalibrated classifier: {loss:.3f}")
print(f" - calibrated classifier: {cal_loss:.3f}")
# %%
# According to the Brier score, the calibrated classifier is not better than
# the original model.
#
# Finally we generate a grid of possible uncalibrated probabilities over
# the 2-simplex, compute the corresponding calibrated probabilities and
# plot arrows for each. The arrows are colored according the highest
# uncalibrated probability. This illustrates the learned calibration map:
plt.figure(figsize=(10, 10))
# Generate grid of probability values
p1d = np.linspace(0, 1, 20)
p0, p1 = np.meshgrid(p1d, p1d)
p2 = 1 - p0 - p1
p = np.c_[p0.ravel(), p1.ravel(), p2.ravel()]
p = p[p[:, 2] >= 0]
# Use the three class-wise calibrators to compute calibrated probabilities
calibrated_classifier = cal_clf.calibrated_classifiers_[0]
prediction = np.vstack(
[
calibrator.predict(this_p)
for calibrator, this_p in zip(calibrated_classifier.calibrators, p.T)
]
).T
# Re-normalize the calibrated predictions to make sure they stay inside the
# simplex. This same renormalization step is performed internally by the
# predict method of CalibratedClassifierCV on multiclass problems.
prediction /= prediction.sum(axis=1)[:, None]
# Plot changes in predicted probabilities induced by the calibrators
for i in range(prediction.shape[0]):
plt.arrow(
p[i, 0],
p[i, 1],
prediction[i, 0] - p[i, 0],
prediction[i, 1] - p[i, 1],
head_width=1e-2,
color=colors[np.argmax(p[i])],
)
# Plot the boundaries of the unit simplex
plt.plot([0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], "k", label="Simplex")
plt.grid(False)
for x in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:
plt.plot([0, x], [x, 0], "k", alpha=0.2)
plt.plot([0, 0 + (1 - x) / 2], [x, x + (1 - x) / 2], "k", alpha=0.2)
plt.plot([x, x + (1 - x) / 2], [0, 0 + (1 - x) / 2], "k", alpha=0.2)
plt.title("Learned sigmoid calibration map")
plt.xlabel("Probability class 1")
plt.ylabel("Probability class 2")
plt.xlim(-0.05, 1.05)
plt.ylim(-0.05, 1.05)
plt.show()
# %%
# One can observe that, on average, the calibrator is pushing highly confident
# predictions away from the boundaries of the simplex while simultaneously
# moving uncertain predictions towards one of three modes, one for each class.
# We can also observe that the mapping is not symmetric. Furthermore some
# arrows seem to cross class assignment boundaries which is not necessarily
# what one would expect from a calibration map as it means that some predicted
# classes will change after calibration.
#
# All in all, the One-vs-Rest multiclass-calibration strategy implemented in
# `CalibratedClassifierCV` should not be trusted blindly.
| of |
python | getsentry__sentry | tests/sentry/integrations/slack/test_message_builder.py | {
"start": 49876,
"end": 53947
} | class ____(TestCase, PerformanceIssueTestCase, OccurrenceTestMixin):
@freeze_time("2024-02-23")
def setUp(self) -> None:
self.endpoint_regression_issue = self.create_group(
type=PerformanceP95EndpointRegressionGroupType.type_id
)
self.cron_issue = self.create_group(type=MonitorIncidentType.type_id)
self.feedback_issue = self.create_group(
type=FeedbackGroup.type_id, substatus=GroupSubStatus.NEW
)
@freeze_time("2024-02-23")
@patch("sentry.models.Group.get_recommended_event_for_environments")
def test_get_context(self, mock_event: MagicMock) -> None:
event = self.store_event(data={"message": "Hello world"}, project_id=self.project.id)
group_event = event.for_group(event.groups[0])
occurrence = self.build_occurrence(level="info", evidence_data={"breakpoint": 1709161200})
occurrence.save()
group_event.occurrence = occurrence
mock_event.return_value = group_event
# endpoint regression should use Approx Start Time
context = get_context(self.endpoint_regression_issue)
breakpoint_time = datetime(2024, 2, 28, 23, 0)
assert f"Approx. Start Time: *{breakpoint_time.strftime('%Y-%m-%d %H:%M:%S')}*" in context
# crons don't have context
assert get_context(self.cron_issue) == ""
# feedback doesn't have context
assert get_context(self.feedback_issue) == ""
def test_get_context_error_user_count(self) -> None:
event = self.store_event(
data={},
project_id=self.project.id,
assert_no_errors=False,
)
group = event.group
assert group
context_without_error_user_count = get_context(group)
assert (
context_without_error_user_count
== f"State: *New* First Seen: *{time_since(group.first_seen)}*"
)
group.times_seen = 3
group.substatus = GroupSubStatus.ONGOING
group.save()
context_with_error_user_count = get_context(group)
assert (
context_with_error_user_count
== f"Events: *3* State: *Ongoing* First Seen: *{time_since(group.first_seen)}*"
)
def test_get_context_users_affected(self) -> None:
env = self.create_environment(project=self.project)
env2 = self.create_environment(project=self.project)
rule = IssueAlertRule.objects.create(project=self.project, label="my rule")
event = [
self.store_event(
data={
"user": {"id": i},
"environment": env.name,
},
project_id=self.project.id,
assert_no_errors=False,
)
for i in range(5)
][0]
[
self.store_event(
data={
"user": {"id": i},
"environment": env2.name,
},
project_id=self.project.id,
assert_no_errors=False,
)
for i in range(5, 7)
]
group = event.group
assert group
group.update(type=1, substatus=GroupSubStatus.ONGOING, times_seen=3)
context = get_context(group, [rule])
assert (
context
== f"Events: *3* Users Affected: *7* State: *Ongoing* First Seen: *{time_since(group.first_seen)}*"
)
# filter users affected by env
rule.update(environment_id=env.id)
context = get_context(group, [rule])
assert (
context
== f"Events: *3* Users Affected: *5* State: *Ongoing* First Seen: *{time_since(group.first_seen)}*"
)
def test_get_tags(self) -> None:
# don't use default tags. if we don't pass in tags to get_tags, we don't return any
tags = get_tags(
self.endpoint_regression_issue, self.endpoint_regression_issue.get_latest_event()
)
assert not tags
| SlackNotificationConfigTest |
python | keras-team__keras | keras/src/layers/preprocessing/category_encoding_test.py | {
"start": 349,
"end": 12725
} | class ____(testing.TestCase):
@parameterized.named_parameters(TEST_CASES)
def test_count_output(self, sparse):
input_array = np.array([1, 2, 3, 1])
expected_output = np.array([0, 2, 1, 1, 0, 0])
num_tokens = 6
expected_output_shape = (num_tokens,)
layer = layers.CategoryEncoding(
num_tokens=num_tokens, output_mode="count", sparse=sparse
)
int_data = layer(input_array)
self.assertEqual(expected_output_shape, int_data.shape)
self.assertAllClose(int_data, expected_output)
self.assertSparse(int_data, sparse)
# Test symbolic call.
output = layer(
layers.Input(batch_shape=input_array.shape, dtype="int32")
)
self.assertEqual(expected_output_shape, output.shape)
self.assertEqual("float32", output.dtype)
self.assertSparse(output, sparse)
@parameterized.named_parameters(TEST_CASES)
def test_count_weighted_output(self, sparse):
input_array = np.array([[0, 1], [0, 0], [1, 2], [3, 1]])
count_weights = np.array(
[[0.1, 0.2], [0.1, 0.1], [0.2, 0.3], [0.4, 0.2]]
)
expected_output = np.array(
[
[0.1, 0.2, 0.0, 0.0, 0.0, 0.0],
[0.2, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.2, 0.3, 0.0, 0.0, 0.0],
[0.0, 0.2, 0.0, 0.4, 0.0, 0.0],
]
)
num_tokens = 6
expected_output_shape = (input_array.shape[0], num_tokens)
layer = layers.CategoryEncoding(
num_tokens=num_tokens, output_mode="count", sparse=sparse
)
int_data = layer(input_array, count_weights=count_weights)
self.assertEqual(expected_output_shape, int_data.shape)
self.assertAllClose(int_data, expected_output)
self.assertSparse(int_data, sparse)
# Test symbolic call.
output = layer(
layers.Input(batch_shape=input_array.shape, dtype="int32"),
count_weights=layers.Input(
batch_shape=input_array.shape, dtype="float32"
),
)
self.assertEqual(expected_output_shape, output.shape)
self.assertEqual("float32", output.dtype)
self.assertSparse(output, sparse)
@parameterized.named_parameters(TEST_CASES)
def test_batched_count_output(self, sparse):
input_array = np.array([[1, 2, 3, 1], [0, 3, 1, 0]])
expected_output = np.array([[0, 2, 1, 1, 0, 0], [2, 1, 0, 1, 0, 0]])
num_tokens = 6
expected_output_shape = (2, num_tokens)
layer = layers.CategoryEncoding(
num_tokens=num_tokens, output_mode="count", sparse=sparse
)
int_data = layer(input_array)
self.assertEqual(expected_output_shape, int_data.shape)
self.assertAllClose(int_data, expected_output)
self.assertSparse(int_data, sparse)
# Test symbolic call.
output = layer(
layers.Input(batch_shape=input_array.shape, dtype="int32")
)
self.assertEqual(expected_output_shape, output.shape)
self.assertEqual("float32", output.dtype)
self.assertSparse(output, sparse)
@parameterized.named_parameters(TEST_CASES)
def test_multi_hot(self, sparse):
input_data = np.array([3, 2, 0, 1])
expected_output = np.array([1, 1, 1, 1, 0, 0])
num_tokens = 6
expected_output_shape = (num_tokens,)
# Test call on layer directly.
layer = layers.CategoryEncoding(
num_tokens=num_tokens, output_mode="multi_hot", sparse=sparse
)
output_data = layer(input_data)
self.assertAllClose(expected_output, output_data)
self.assertEqual(expected_output_shape, output_data.shape)
self.assertSparse(output_data, sparse)
# Test symbolic call.
output = layer(
layers.Input(batch_shape=input_data.shape, dtype="int32")
)
self.assertEqual(expected_output_shape, output.shape)
self.assertEqual("float32", output.dtype)
self.assertSparse(output, sparse)
@parameterized.named_parameters(TEST_CASES)
def test_batched_multi_hot(self, sparse):
input_data = np.array([[3, 2, 0, 1], [3, 2, 0, 1]])
expected_output = np.array([[1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 0, 0]])
num_tokens = 6
expected_output_shape = (input_data.shape[0], num_tokens)
# Test call on layer directly.
layer = layers.CategoryEncoding(
num_tokens=num_tokens, output_mode="multi_hot", sparse=sparse
)
output_data = layer(input_data)
self.assertAllClose(expected_output, output_data)
self.assertEqual(expected_output_shape, output_data.shape)
self.assertSparse(output_data, sparse)
# Test symbolic call.
output = layer(
layers.Input(batch_shape=input_data.shape, dtype="int32")
)
self.assertEqual(expected_output_shape, output.shape)
self.assertEqual("float32", output.dtype)
self.assertSparse(output, sparse)
# Test compute_output_shape
input_data = np.array((4))
layer = layers.CategoryEncoding(
num_tokens=num_tokens, output_mode="multi_hot", sparse=sparse
)
self.assertEqual(
layer(input_data).shape,
layer.compute_output_shape(input_data.shape),
)
@parameterized.named_parameters(TEST_CASES)
def test_one_hot(self, sparse):
input_data = np.array([3, 2, 0, 1])
expected_output = np.array(
[
[0, 0, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 0],
[1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
]
)
num_tokens = 6
expected_output_shape = (input_data.shape[0], num_tokens)
# Test call on layer directly.
layer = layers.CategoryEncoding(
num_tokens=num_tokens, output_mode="one_hot", sparse=sparse
)
output_data = layer(input_data)
self.assertAllClose(expected_output, output_data)
self.assertEqual(expected_output_shape, output_data.shape)
self.assertSparse(output_data, sparse)
# Test symbolic call.
output = layer(
layers.Input(batch_shape=input_data.shape, dtype="int32")
)
self.assertEqual(expected_output_shape, output.shape)
self.assertEqual("float32", output.dtype)
self.assertSparse(output, sparse)
# Test compute_output_shape
layer = layers.CategoryEncoding(
num_tokens=num_tokens, output_mode="one_hot", sparse=sparse
)
self.assertEqual(
layer(input_data).shape,
layer.compute_output_shape(input_data.shape),
)
# Test compute_output_shape with 1 extra dimension
input_data = np.array([[3], [2], [0], [1]])
layer = layers.CategoryEncoding(
num_tokens=num_tokens, output_mode="one_hot", sparse=sparse
)
self.assertEqual(
layer(input_data).shape,
layer.compute_output_shape(input_data.shape),
)
input_data = np.array((4,))
layer = layers.CategoryEncoding(
num_tokens=num_tokens, output_mode="one_hot", sparse=sparse
)
self.assertEqual(
layer(input_data).shape,
layer.compute_output_shape(input_data.shape),
)
@parameterized.named_parameters(TEST_CASES)
def test_batched_one_hot(self, sparse):
input_data = np.array([[3, 2, 0, 1], [3, 2, 0, 1]])
expected_output = np.array(
[
[
[0, 0, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 0],
[1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
],
[
[0, 0, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 0],
[1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
],
]
)
num_tokens = 6
expected_output_shape = input_data.shape[0:2] + (num_tokens,)
# Test call on layer directly.
layer = layers.CategoryEncoding(
num_tokens=num_tokens, output_mode="one_hot", sparse=sparse
)
output_data = layer(input_data)
self.assertAllClose(expected_output, output_data)
self.assertEqual(expected_output_shape, output_data.shape)
self.assertSparse(output_data, sparse)
# Test symbolic call.
output = layer(
layers.Input(batch_shape=input_data.shape, dtype="int32")
)
self.assertEqual(expected_output_shape, output.shape)
self.assertEqual("float32", output.dtype)
self.assertSparse(output, sparse)
def test_tf_data_compatibility(self):
layer = layers.CategoryEncoding(
num_tokens=4, output_mode="one_hot", dtype="int32"
)
input_data = np.array([3, 2, 0, 1])
expected_output = np.array(
[
[0, 0, 0, 1],
[0, 0, 1, 0],
[1, 0, 0, 0],
[0, 1, 0, 0],
]
)
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(4).map(layer)
for output in ds.take(1):
output = output.numpy()
self.assertAllClose(output, expected_output)
def test_category_encoding_without_num_tokens(self):
with self.assertRaisesRegex(
ValueError, r"num_tokens must be set to use this layer"
):
layers.CategoryEncoding(output_mode="multi_hot")
def test_category_encoding_with_invalid_num_tokens(self):
with self.assertRaisesRegex(ValueError, r"`num_tokens` must be >= 1"):
layers.CategoryEncoding(num_tokens=0, output_mode="multi_hot")
with self.assertRaisesRegex(ValueError, r"`num_tokens` must be >= 1"):
layers.CategoryEncoding(num_tokens=-1, output_mode="multi_hot")
def test_category_encoding_with_unnecessary_count_weights(self):
layer = layers.CategoryEncoding(num_tokens=4, output_mode="multi_hot")
input_data = np.array([0, 1, 2, 3])
count_weights = np.array([0.1, 0.2, 0.3, 0.4])
with self.assertRaisesRegex(
ValueError, r"`count_weights` is not used when `output_mode`"
):
layer(input_data, count_weights=count_weights)
def test_invalid_output_mode_raises_error(self):
with self.assertRaisesRegex(
ValueError, r"Unknown arg for output_mode: invalid_mode"
):
layers.CategoryEncoding(num_tokens=4, output_mode="invalid_mode")
def test_encode_one_hot_single_sample(self):
layer = layers.CategoryEncoding(num_tokens=4, output_mode="one_hot")
input_array = np.array([1, 2, 3, 1])
expected_output = np.array(
[
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
[0, 1, 0, 0],
]
)
output = layer._encode(input_array)
self.assertAllClose(expected_output, output)
def test_encode_one_hot_batched_samples(self):
layer = layers.CategoryEncoding(num_tokens=4, output_mode="one_hot")
input_array = np.array([[3, 2, 0, 1], [3, 2, 0, 1]])
expected_output = np.array(
[
[[0, 0, 0, 1], [0, 0, 1, 0], [1, 0, 0, 0], [0, 1, 0, 0]],
[[0, 0, 0, 1], [0, 0, 1, 0], [1, 0, 0, 0], [0, 1, 0, 0]],
]
)
output = layer._encode(input_array)
self.assertAllClose(expected_output, output)
def test_count_single_sample(self):
layer = layers.CategoryEncoding(num_tokens=4, output_mode="count")
input_array = np.array([1, 2, 3, 1])
expected_output = np.array([0, 2, 1, 1])
output = layer(input_array)
self.assertAllClose(expected_output, output)
def test_count_batched_samples(self):
layer = layers.CategoryEncoding(num_tokens=4, output_mode="count")
input_array = np.array([[1, 2, 3, 1], [0, 3, 1, 0]])
expected_output = np.array([[0, 2, 1, 1], [2, 1, 0, 1]])
output = layer(input_array)
self.assertAllClose(expected_output, output)
| CategoryEncodingTest |
python | huggingface__transformers | src/transformers/models/electra/modeling_electra.py | {
"start": 49626,
"end": 54146
} | class ____(ElectraPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.electra = ElectraModel(config)
self.sequence_summary = ElectraSequenceSummary(config)
self.classifier = nn.Linear(config.hidden_size, 1)
# Initialize weights and apply final processing
self.post_init()
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple[torch.Tensor], MultipleChoiceModelOutput]:
r"""
input_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
token_type_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
`input_ids` above)
"""
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
discriminator_hidden_states = self.electra(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
return_dict=True,
**kwargs,
)
sequence_output = discriminator_hidden_states[0]
pooled_output = self.sequence_summary(sequence_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
return MultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=discriminator_hidden_states.hidden_states,
attentions=discriminator_hidden_states.attentions,
)
@auto_docstring(
custom_intro="""
ELECTRA Model with a `language modeling` head on top for CLM fine-tuning.
"""
)
| ElectraForMultipleChoice |
python | celery__celery | t/unit/app/test_app.py | {
"start": 60881,
"end": 61843
} | class ____:
def test_platform_python_implementation(self):
with conftest.platform_pyimp(lambda: 'Xython'):
assert pyimplementation() == 'Xython'
def test_platform_jython(self):
with conftest.platform_pyimp():
with conftest.sys_platform('java 1.6.51'):
assert 'Jython' in pyimplementation()
def test_platform_pypy(self):
with conftest.platform_pyimp():
with conftest.sys_platform('darwin'):
with conftest.pypy_version((1, 4, 3)):
assert 'PyPy' in pyimplementation()
with conftest.pypy_version((1, 4, 3, 'a4')):
assert 'PyPy' in pyimplementation()
def test_platform_fallback(self):
with conftest.platform_pyimp():
with conftest.sys_platform('darwin'):
with conftest.pypy_version():
assert 'CPython' == pyimplementation()
| test_pyimplementation |
python | huggingface__transformers | src/transformers/models/mm_grounding_dino/modeling_mm_grounding_dino.py | {
"start": 5307,
"end": 6296
} | class ____(nn.Module):
"""
This module learns positional embeddings up to a fixed maximum size.
"""
def __init__(self, config):
super().__init__()
embedding_dim = config.d_model // 2
self.row_embeddings = nn.Embedding(50, embedding_dim)
self.column_embeddings = nn.Embedding(50, embedding_dim)
def forward(self, pixel_values, pixel_mask=None):
height, width = pixel_values.shape[-2:]
width_values = torch.arange(width, device=pixel_values.device)
height_values = torch.arange(height, device=pixel_values.device)
x_emb = self.column_embeddings(width_values)
y_emb = self.row_embeddings(height_values)
pos = torch.cat([x_emb.unsqueeze(0).repeat(height, 1, 1), y_emb.unsqueeze(1).repeat(1, width, 1)], dim=-1)
pos = pos.permute(2, 0, 1)
pos = pos.unsqueeze(0)
pos = pos.repeat(pixel_values.shape[0], 1, 1, 1)
return pos
| MMGroundingDinoLearnedPositionEmbedding |
python | simonw__datasette | datasette/utils/asgi.py | {
"start": 5445,
"end": 6360
} | class ____:
def __init__(self, stream_fn, status=200, headers=None, content_type="text/plain"):
self.stream_fn = stream_fn
self.status = status
self.headers = headers or {}
self.content_type = content_type
async def asgi_send(self, send):
# Remove any existing content-type header
headers = {k: v for k, v in self.headers.items() if k.lower() != "content-type"}
headers["content-type"] = self.content_type
await send(
{
"type": "http.response.start",
"status": self.status,
"headers": [
[key.encode("utf-8"), value.encode("utf-8")]
for key, value in headers.items()
],
}
)
w = AsgiWriter(send)
await self.stream_fn(w)
await send({"type": "http.response.body", "body": b""})
| AsgiStream |
python | huggingface__transformers | src/transformers/models/funnel/modeling_funnel.py | {
"start": 43244,
"end": 46166
} | class ____(FunnelPreTrainedModel):
_tied_weights_keys = {"lm_head.weight": "funnel.embeddings.word_embeddings.weight"}
def __init__(self, config: FunnelConfig) -> None:
super().__init__(config)
self.funnel = FunnelModel(config)
self.lm_head = nn.Linear(config.d_model, config.vocab_size)
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self) -> nn.Linear:
return self.lm_head
def set_output_embeddings(self, new_embeddings: nn.Embedding) -> None:
self.lm_head = new_embeddings
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, MaskedLMOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.funnel(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
last_hidden_state = outputs[0]
prediction_logits = self.lm_head(last_hidden_state)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss() # -100 index = padding token
masked_lm_loss = loss_fct(prediction_logits.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_logits,) + outputs[1:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return MaskedLMOutput(
loss=masked_lm_loss,
logits=prediction_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@auto_docstring(
custom_intro="""
Funnel Transformer Model with a sequence classification/regression head on top (two linear layer on top of the
first timestep of the last hidden state) e.g. for GLUE tasks.
"""
)
| FunnelForMaskedLM |
python | scipy__scipy | scipy/fftpack/tests/test_real_transforms.py | {
"start": 15458,
"end": 15598
} | class ____(_TestDSTBase):
def setup_method(self):
self.rdt = np.float32
self.dec = 7
self.type = 3
| TestDSTIIIFloat |
python | dagster-io__dagster | examples/docs_snippets/docs_snippets/concepts/io_management/custom_io_manager.py | {
"start": 350,
"end": 897
} | class ____(dg.ConfigurableIOManager):
# specifies an optional string list input, via config system
path_prefix: list[str] = []
def _get_path(self, context) -> str:
return "/".join(self.path_prefix + context.asset_key.path)
def handle_output(self, context: dg.OutputContext, obj):
write_csv(self._get_path(context), obj)
def load_input(self, context: dg.InputContext):
return read_csv(self._get_path(context))
# end_io_manager_marker
# start_io_manager_factory_marker
import dagster as dg
| MyIOManager |
python | h5py__h5py | h5py/_hl/group.py | {
"start": 30297,
"end": 31049
} | class ____:
"""
Represents an HDF5 external link. Paths may be absolute or relative.
No checking is performed to ensure either the target or file exists.
"""
@property
def path(self):
""" Soft link path, i.e. the part inside the HDF5 file. """
return self._path
@property
def filename(self):
""" Path to the external HDF5 file in the filesystem. """
return self._filename
def __init__(self, filename, path):
self._filename = filename_decode(filename_encode(filename))
self._path = path
def __repr__(self):
return '<ExternalLink to "%s" in file "%s"' % (self.path,
self.filename)
| ExternalLink |
python | sympy__sympy | sympy/plotting/pygletplot/plot_interval.py | {
"start": 152,
"end": 5431
} | class ____:
"""
"""
_v, _v_min, _v_max, _v_steps = None, None, None, None
def require_all_args(f):
def check(self, *args, **kwargs):
for g in [self._v, self._v_min, self._v_max, self._v_steps]:
if g is None:
raise ValueError("PlotInterval is incomplete.")
return f(self, *args, **kwargs)
return check
def __init__(self, *args):
if len(args) == 1:
if isinstance(args[0], PlotInterval):
self.fill_from(args[0])
return
elif isinstance(args[0], str):
try:
args = eval(args[0])
except TypeError:
s_eval_error = "Could not interpret string %s."
raise ValueError(s_eval_error % (args[0]))
elif isinstance(args[0], (tuple, list)):
args = args[0]
else:
raise ValueError("Not an interval.")
if not isinstance(args, (tuple, list)) or len(args) > 4:
f_error = "PlotInterval must be a tuple or list of length 4 or less."
raise ValueError(f_error)
args = list(args)
if len(args) > 0 and (args[0] is None or isinstance(args[0], Symbol)):
self.v = args.pop(0)
if len(args) in [2, 3]:
self.v_min = args.pop(0)
self.v_max = args.pop(0)
if len(args) == 1:
self.v_steps = args.pop(0)
elif len(args) == 1:
self.v_steps = args.pop(0)
def get_v(self):
return self._v
def set_v(self, v):
if v is None:
self._v = None
return
if not isinstance(v, Symbol):
raise ValueError("v must be a SymPy Symbol.")
self._v = v
def get_v_min(self):
return self._v_min
def set_v_min(self, v_min):
if v_min is None:
self._v_min = None
return
try:
self._v_min = sympify(v_min)
float(self._v_min.evalf())
except TypeError:
raise ValueError("v_min could not be interpreted as a number.")
def get_v_max(self):
return self._v_max
def set_v_max(self, v_max):
if v_max is None:
self._v_max = None
return
try:
self._v_max = sympify(v_max)
float(self._v_max.evalf())
except TypeError:
raise ValueError("v_max could not be interpreted as a number.")
def get_v_steps(self):
return self._v_steps
def set_v_steps(self, v_steps):
if v_steps is None:
self._v_steps = None
return
if isinstance(v_steps, int):
v_steps = Integer(v_steps)
elif not isinstance(v_steps, Integer):
raise ValueError("v_steps must be an int or SymPy Integer.")
if v_steps <= S.Zero:
raise ValueError("v_steps must be positive.")
self._v_steps = v_steps
@require_all_args
def get_v_len(self):
return self.v_steps + 1
v = property(get_v, set_v)
v_min = property(get_v_min, set_v_min)
v_max = property(get_v_max, set_v_max)
v_steps = property(get_v_steps, set_v_steps)
v_len = property(get_v_len)
def fill_from(self, b):
if b.v is not None:
self.v = b.v
if b.v_min is not None:
self.v_min = b.v_min
if b.v_max is not None:
self.v_max = b.v_max
if b.v_steps is not None:
self.v_steps = b.v_steps
@staticmethod
def try_parse(*args):
"""
Returns a PlotInterval if args can be interpreted
as such, otherwise None.
"""
if len(args) == 1 and isinstance(args[0], PlotInterval):
return args[0]
try:
return PlotInterval(*args)
except ValueError:
return None
def _str_base(self):
return ",".join([str(self.v), str(self.v_min),
str(self.v_max), str(self.v_steps)])
def __repr__(self):
"""
A string representing the interval in class constructor form.
"""
return "PlotInterval(%s)" % (self._str_base())
def __str__(self):
"""
A string representing the interval in list form.
"""
return "[%s]" % (self._str_base())
@require_all_args
def assert_complete(self):
pass
@require_all_args
def vrange(self):
"""
Yields v_steps+1 SymPy numbers ranging from
v_min to v_max.
"""
d = (self.v_max - self.v_min) / self.v_steps
for i in range(self.v_steps + 1):
a = self.v_min + (d * Integer(i))
yield a
@require_all_args
def vrange2(self):
"""
Yields v_steps pairs of SymPy numbers ranging from
(v_min, v_min + step) to (v_max - step, v_max).
"""
d = (self.v_max - self.v_min) / self.v_steps
a = self.v_min + (d * S.Zero)
for i in range(self.v_steps):
b = self.v_min + (d * Integer(i + 1))
yield a, b
a = b
def frange(self):
for i in self.vrange():
yield float(i.evalf())
| PlotInterval |
python | pola-rs__polars | py-polars/tests/unit/constructors/test_constructors.py | {
"start": 1480,
"end": 1563
} | class ____:
d: datetime
e: float
f: str
@dataclasses.dataclass
| _TestBazDC |
python | realpython__materials | build-a-gui-with-wxpython/mp3_tag_editor.py | {
"start": 1531,
"end": 3517
} | class ____(wx.Panel):
def __init__(self, parent):
super().__init__(parent)
main_sizer = wx.BoxSizer(wx.VERTICAL)
self.row_obj_dict = {}
self.current_folder_path = None
self.list_ctrl = wx.ListCtrl(
self, size=(-1, 100), style=wx.LC_REPORT | wx.BORDER_SUNKEN
)
self.list_ctrl.InsertColumn(0, "Artist", width=140)
self.list_ctrl.InsertColumn(1, "Album", width=140)
self.list_ctrl.InsertColumn(2, "Title", width=200)
self.list_ctrl.InsertColumn(3, "Year", width=200)
main_sizer.Add(self.list_ctrl, 0, wx.ALL | wx.EXPAND, 5)
edit_button = wx.Button(self, label="Edit")
edit_button.Bind(wx.EVT_BUTTON, self.on_edit)
main_sizer.Add(edit_button, 0, wx.ALL | wx.CENTER, 5)
self.SetSizer(main_sizer)
def on_edit(self, event):
selection = self.list_ctrl.GetFocusedItem()
if selection >= 0:
mp3 = self.row_obj_dict[selection]
dlg = EditDialog(mp3)
dlg.ShowModal()
self.update_mp3_listing(self.current_folder_path)
dlg.Destroy()
def update_mp3_listing(self, folder_path):
self.current_folder_path = folder_path
self.list_ctrl.ClearAll()
self.list_ctrl.InsertColumn(0, "Artist", width=140)
self.list_ctrl.InsertColumn(1, "Album", width=140)
self.list_ctrl.InsertColumn(2, "Title", width=200)
self.list_ctrl.InsertColumn(3, "Year", width=200)
mp3s = glob.glob(folder_path + "/*.mp3")
mp3_objects = []
index = 0
for mp3 in mp3s:
mp3_object = eyed3.load(mp3)
self.list_ctrl.InsertItem(index, mp3_object.tag.artist)
self.list_ctrl.SetItem(index, 1, mp3_object.tag.album)
self.list_ctrl.SetItem(index, 2, mp3_object.tag.title)
mp3_objects.append(mp3_object)
self.row_obj_dict[index] = mp3_object
index += 1
| Mp3Panel |
python | scrapy__scrapy | tests/test_commands.py | {
"start": 2037,
"end": 2890
} | class ____:
"""A base class for tests that may need a Scrapy project."""
project_name = "testproject"
@pytest.fixture(scope="session")
def _proj_path_cached(self, tmp_path_factory: pytest.TempPathFactory) -> Path:
"""Create a Scrapy project in a temporary directory and return its path.
Used as a cache for ``proj_path``.
"""
tmp_path = tmp_path_factory.mktemp("proj")
call("startproject", self.project_name, cwd=tmp_path)
return tmp_path / self.project_name
@pytest.fixture
def proj_path(self, tmp_path: Path, _proj_path_cached: Path) -> Path:
"""Copy a pre-generated Scrapy project into a temporary directory and return its path."""
proj_path = tmp_path / self.project_name
copytree(_proj_path_cached, proj_path)
return proj_path
| TestProjectBase |
python | pytorch__pytorch | torch/distributed/optim/named_optimizer.py | {
"start": 408,
"end": 13974
} | class ____(optim.Optimizer):
"""
``_NamedOptimizer`` takes a dict of parameters and exposes ``state_dict`` by parameter key.
We replace the original key (number) in an optim to the
fully qualified name (FQN) string. User can initialize the optim as they
initialize a PyTorch optim, the only difference is that they also need to
pass in the FQN of each parameters.
Args:
named_parameters (Mapping[str, Union[torch.Tensor, ShardedTensor]]):
Mapping from FQN to parameter.
optimizer_class (optim.Optimizer):
The class of optimizer to instantiate.
param_groups (Collection[Mapping[str, Any]]):
`param_groups` to pass to optimizer if specified.
The key of the inner map needs to be FQNs.
Default: None
module (nn.Module): the module whose parameters to updated
by the optimizer.
args: arguments to pass to the optimizer constructor.
kwargs: arguments to pass to the optimizer constructor.
Example::
>>> # xdoctest: +SKIP("distributed")
>>> from torch import optim
>>> from torch.distributed.optim import _NamedOptimizer
>>>
>>> # Define the named optimizer.
>>> m = Model(...)
>>> named_optim = _NamedOptimizer(m.named_parameters(), optim.SGD)
>>> # Forward pass + backward pass.
>>> named_optim.step()
>>> ...
>>> # Call state_dict for the named optimizer returns a FQN state_dict.
>>> named_optim.state_dict()
Warning: This API is still in development and subject to change.
TODO: Add tutorial for _NamedOptimizer.
TODO: Add documentation in the docstring for the public attributes
like self.param_groups and self.named_parameters.
"""
def __init__(
self,
named_parameters: Mapping[str, torch.Tensor | ShardedTensor],
optimizer_class: optim.Optimizer,
param_groups: Collection[Mapping[str, Any]] | None = None,
module: nn.Module | None = None,
*args: tuple[Any, ...],
**kwargs: dict[str, Any],
) -> None:
torch._C._log_api_usage_once("torch.distributed.optim._NamedOptimizer")
self.param_groups: Collection[Mapping[str, Any]] = param_groups # type: ignore[assignment]
self._param_groups_check()
self.named_parameters = dict(named_parameters)
params_for_optimizer = (
self.named_parameters.values() if param_groups is None else param_groups
)
self._optimizer = optimizer_class( # type: ignore[operator]
params_for_optimizer,
*args,
**kwargs,
)
self.module = module
if param_groups is None:
self.ordered_param_keys = list(self.named_parameters.keys())
else:
warnings.warn(
"Since we pass in param_groups, we will use param_groups to "
"initialize the optimizer, not all parameters of the module.",
stacklevel=2,
)
param_to_key = {param: key for key, param in self.named_parameters.items()} # type: ignore[misc, has-type]
ordered_param_keys = []
for group in param_groups:
for param in group["params"]:
if param not in param_to_key:
raise ValueError(
f"Expect param name {param} found in param group but is missing."
)
ordered_param_keys.append(param_to_key[param])
self.ordered_param_keys = ordered_param_keys
# Update param_groups from optimizer.
self.param_groups = self._optimizer.param_groups
def _param_groups_check(self) -> None:
if self.param_groups is not None:
for param_group in self.param_groups:
assert isinstance(param_group, dict), "param group must be a dict"
assert "params" in param_group, "param group must contain key params"
params = param_group["params"]
if isinstance(params, torch.Tensor):
params = [params]
params = list(params)
for param in params:
if not isinstance(param, torch.Tensor):
raise TypeError(
"optimizer can only optimize Tensors, "
"but one of the params is " + torch.typename(param)
)
param_group["params"] = params
def state_dict(self) -> dict[str, Any]:
"""
Return the ``state_dict`` of the optimizer.
Instead of using number to index
parameters, we will use module fully qualified name (FQN) as the key.
"""
state_dict = self._optimizer.state_dict()
param_groups = state_dict["param_groups"]
ret_state = {
self.ordered_param_keys[st_key]: state_val
for st_key, state_val in state_dict["state"].items()
}
ret_groups = []
for group in param_groups:
param_keys = [self.ordered_param_keys[param] for param in group["params"]]
ret_group = {"params": sorted(param_keys)}
for k, v in group.items():
if k != "params":
ret_group[k] = deepcopy(v)
ret_groups.append(ret_group)
return self._post_state_dict({"state": ret_state, "param_groups": ret_groups})
@overload
def step(self, closure: None = None) -> None: ...
@overload
def step(self, closure: Callable[[], float]) -> float: ...
def step(self, closure: Callable[[], float] | None = None) -> float | None:
"""
Perform a single optimization step.
This will call :meth:`torch.optim.Optimizer.step` on the wrapped
optimizer.
"""
return self._optimizer.step(closure=closure)
@property
def state(self) -> Mapping[torch.Tensor, Any]: # type: ignore[override]
return self._optimizer.state
def load_state_dict(self, state_dict: dict[str, Any]) -> None:
"""
Define the default behavior to load a state_dict for ``_NamedOptimizer``.
Sample Code
```
my_model = MyModule()
optimizer = _NamedOptimizer(my_model.named_parameters(), Adagrad)
...
optim_state_dict = optimizer.state_dict()
...
...
optimizer.load_state_dict(optim_state_dict)
...
```
Args:
state_dict (dict[str, Any]) : A ``state_dict`` to load into the optimizer.
Note that this state dict update is performed in place.
.. note:: PyTorch is using lazy init to initialize the optim states.
So it is possible that there is no optim state when user call
``load_state_dict`` and for ``_NamedOptimizer`` we make it stricter
that users can only call ``load_state_dict`` after the state is initialized.
By doing this, we can validate the optim ``state_dict`` to be loaded.
"""
new_state_dict = self._optimizer.state_dict()
state_dict = self._pre_load_state_dict(state_dict)
state = state_dict["state"]
new_state = new_state_dict["state"]
if len(new_state) == 0:
raise ValueError(
"Expects the optim to be initialized before load but found not initialized."
)
for idx, param_key in enumerate(self.ordered_param_keys):
# When the conditional training is performed, not all parameters are updated in the optim.
if param_key not in state:
continue
if len(state[param_key]) != len(new_state[idx]):
raise ValueError(
f"Expects equal length as {len(new_state[idx])} for parameter {param_key} but found: {len(state[param_key])}"
)
# Iterate through all optimizer states.
for state_key, state_val in new_state[idx].items():
if state_key not in state[param_key]:
raise ValueError(
f"Expects state {state_key} for parameter {param_key} but not found."
)
src_state_val = state[param_key][state_key]
if isinstance(state_val, ShardedTensor):
assert isinstance(src_state_val, ShardedTensor)
num_shards = len(state_val.local_shards())
num_new_shards = len(src_state_val.local_shards())
if num_shards != num_new_shards:
raise ValueError(
f"Expects equal number of shards as {num_new_shards} but found {num_shards} for {param_key}/{state_key}"
)
for shard, src_shard in zip(
state_val.local_shards(), src_state_val.local_shards()
):
shard.tensor.detach().copy_(src_shard.tensor)
elif isinstance(state_val, torch.Tensor):
assert isinstance(src_state_val, torch.Tensor)
state_val.detach().copy_(src_state_val)
else:
new_state[idx][state_key] = deepcopy(src_state_val)
# Load param_groups of state_dict
src_param_groups = state_dict["param_groups"]
new_param_groups = new_state_dict["param_groups"]
src_group_map = {}
for group in src_param_groups:
param_keys = list(group["params"])
src_group_map[_gen_param_group_key(param_keys)] = group
new_group_map = {}
for new_group in new_param_groups:
param_keys = []
for param_key in new_group["params"]:
param_keys.append(self.ordered_param_keys[param_key]) # type: ignore[call-overload]
new_group_map[_gen_param_group_key(param_keys)] = new_group
for group_key, new_group in new_group_map.items():
# When not all parameters are used in training or receive gradient, aka., not all parameters
# would be in the param_group. Thus we skip the group_key here.
if group_key not in src_group_map:
continue
src_group = src_group_map[group_key]
if len(src_group) != len(new_group):
raise ValueError(
f"Expects equal param_group size as {len(new_group)} for group {group_key} but found {len(src_group)}."
)
for k in src_group:
if k not in new_group:
raise ValueError(
f"Expects group key {k} to be in group {group_key} in `state_dict` but is missing."
)
if k != "params":
new_group[k] = deepcopy(src_group[k])
self._optimizer.load_state_dict(new_state_dict)
def add_param_group(self, param_group: Mapping[str, Any]) -> None:
"""
Add a param group to the :class:`_NamedOptimizer` s `param_groups`.
Warning: This API is still in development and subject to change.
"""
assert isinstance(param_group, dict), "param group must be a dict"
params = param_group["params"]
if isinstance(params, torch.Tensor):
param_group["params"] = [params]
else:
param_group["params"] = list(params)
param_to_key = {param: key for key, param in self.named_parameters.items()} # type: ignore[misc, has-type]
for param in param_group["params"]:
if param not in param_to_key:
raise ValueError("some parameters are not in the module")
self.ordered_param_keys.append(param_to_key[param])
self._optimizer.add_param_group(param_group)
# Update param_groups from optimizer.
self.param_groups = self._optimizer.param_groups
def init_state(self) -> None:
"""
Run a dummy optimizer step, which allows to initialize optimizer state because we do lazy init for most optimizers.
This allows doing in-place loading of optimizer state from a checkpoint.
"""
for param in self.named_parameters.values():
if param.requires_grad:
t = torch.zeros_like(param)
param.grad = torch.autograd.Variable(t)
# Calling ``step`` will load the initial state for optimizer states.
self.step(closure=None)
def _pre_load_state_dict(self, state_dict: dict[str, Any]) -> dict[str, Any]:
# TODO(chienchin): This API should be FSDP agnostic and should support
# general user hooks.
if isinstance(self.module, FSDP):
return FSDP.optim_state_dict_to_load(
self.module, self._optimizer, state_dict, is_named_optimizer=True
)
return state_dict
def _post_state_dict(self, state_dict: dict[str, Any]) -> dict[str, Any]:
# TODO(chienchin): This API should be FSDP agnostic and should support
# general user hooks.
if isinstance(self.module, FSDP):
FSDP.optim_state_dict(self.module, self._optimizer, state_dict)
return state_dict
def _gen_param_group_key(param_keys: list[str]) -> str:
"""Concatenate all param keys as a unique identifier for one param group."""
return "/".join(sorted(param_keys))
| _NamedOptimizer |
python | nedbat__coveragepy | tests/test_coverage.py | {
"start": 2259,
"end": 3630
} | class ____(CoverageTest):
"""The simplest tests, for quick smoke testing of fundamental changes."""
def test_simple(self) -> None:
self.check_coverage(
"""\
a = 1
b = 2
c = 4
# Nothing here
d = 6
""",
lines=[1, 2, 4, 6],
report="4 0 0 0 100%",
)
def test_indentation_wackiness(self) -> None:
# Partial final lines are OK.
self.check_coverage(
"""\
import sys
if not sys.path:
a = 1
""", # indented last line
lines=[1, 2, 3],
missing="3",
)
def test_multiline_initializer(self) -> None:
self.check_coverage(
"""\
d = {
'foo': 1+2,
'bar': (lambda x: x+1)(1),
'baz': str(1),
}
e = { 'foo': 1, 'bar': 2 }
""",
lines=[1, 7],
missing="",
)
def test_list_comprehension(self) -> None:
self.check_coverage(
"""\
l = [
2*i for i in range(10)
if i > 5
]
assert l == [12, 14, 16, 18]
""",
lines=[1, 5],
missing="",
)
| BasicCoverageTest |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-monday/unit_tests/integrations/monday_responses/items_response_builder.py | {
"start": 163,
"end": 401
} | class ____(HttpResponseBuilder):
@classmethod
def items_response(cls) -> "ItemsResponseBuilder":
return cls(find_template("items", __file__), NestedPath(["data", "boards", 0, "items_page", "items"]), None)
| ItemsResponseBuilder |
python | getsentry__sentry | tests/sentry/rules/processing/test_processor.py | {
"start": 1687,
"end": 1940
} | class ____(EventCondition):
id = "tests.sentry.rules.processing.test_processor.MockConditionTrue"
label = "Mock condition which always passes."
def passes(self, event, state) -> bool:
return True
@mock_redis_buffer()
| MockConditionTrue |
python | ray-project__ray | python/ray/data/_internal/actor_autoscaler/autoscaling_actor_pool.py | {
"start": 1036,
"end": 3305
} | class ____(ABC):
"""Abstract interface of an autoscaling actor pool.
A `PhysicalOperator` can manage one or more `AutoscalingActorPool`s.
`Autoscaler` is responsible for deciding autoscaling of these actor
pools.
"""
@abstractmethod
def min_size(self) -> int:
"""Min size of the actor pool."""
...
@abstractmethod
def max_size(self) -> int:
"""Max size of the actor pool."""
...
@abstractmethod
def current_size(self) -> int:
"""Current size of the actor pool."""
...
@abstractmethod
def num_running_actors(self) -> int:
"""Number of running actors."""
...
@abstractmethod
def num_active_actors(self) -> int:
"""Number of actors with at least one active task."""
...
@abstractmethod
def num_pending_actors(self) -> int:
"""Number of actors pending creation."""
...
@abstractmethod
def max_tasks_in_flight_per_actor(self) -> int:
"""Max number of in-flight tasks per actor."""
...
@abstractmethod
def max_actor_concurrency(self) -> int:
"""Returns max number of tasks single actor could run concurrently."""
...
@abstractmethod
def num_tasks_in_flight(self) -> int:
"""Number of current in-flight tasks (ie total nubmer of tasks that have been
submitted to the actor pool)."""
...
def num_free_task_slots(self) -> int:
"""Number of free slots to run tasks.
This doesn't include task slots for pending actors.
"""
return (
self.max_tasks_in_flight_per_actor() * self.num_running_actors()
- self.num_tasks_in_flight()
)
@abstractmethod
def scale(self, req: ActorPoolScalingRequest):
"""Applies autoscaling action"""
...
@abstractmethod
def per_actor_resource_usage(self) -> ExecutionResources:
"""Per actor resource usage."""
...
@abstractmethod
def get_pool_util(self) -> float:
"""Calculate the utilization of the given actor pool."""
...
def max_concurrent_tasks(self) -> int:
return self.max_actor_concurrency() * self.num_running_actors()
| AutoscalingActorPool |
python | dagster-io__dagster | python_modules/libraries/dagster-databricks/dagster_databricks_tests/test_databricks_pyspark_step_launcher.py | {
"start": 1241,
"end": 3867
} | class ____:
def test_given_add_dagster_env_vars_retrieves_dagster_system_vars(
self, mock_step_launcher_factory, monkeypatch
):
test_env_variables = {"add": "this"}
test_launcher = mock_step_launcher_factory(
add_dagster_env_variables=True,
env_variables=test_env_variables,
databricks_token="abc123",
)
system_vars = {}
for var in DAGSTER_SYSTEM_ENV_VARS:
system_vars[var] = f"{var}_value"
monkeypatch.setenv(var, f"{var}_value")
correct_vars = dict(**system_vars, **test_env_variables)
env_vars = test_launcher.create_remote_config()
assert env_vars.env_variables == correct_vars
def test_given_no_add_dagster_env_vars_no_system_vars_added(
self, mock_step_launcher_factory, monkeypatch
):
vars_to_add = {"add": "this"}
test_launcher = mock_step_launcher_factory(
add_dagster_env_variables=False, env_variables=vars_to_add, databricks_token="abc123"
)
for var in DAGSTER_SYSTEM_ENV_VARS:
monkeypatch.setenv(var, f"{var}_value")
env_vars = test_launcher.create_remote_config()
assert env_vars.env_variables == vars_to_add
def test_given_no_dagster_system_vars_none_added(self, mock_step_launcher_factory):
vars_to_add = {"add": "this"}
test_launcher = mock_step_launcher_factory(
add_dagster_env_variables=True, env_variables=vars_to_add, databricks_token="abc123"
)
for var in DAGSTER_SYSTEM_ENV_VARS:
assert not os.getenv(var)
env_vars = test_launcher.create_remote_config()
assert env_vars.env_variables == vars_to_add
@mock.patch("dagster_databricks.databricks.Config")
def test_given_bad_config_raises_ValueError(
self, mock_workspace_client_config, mock_step_launcher_factory
):
with pytest.raises(
ValueError,
match=(
"If using databricks service principal oauth credentials, both oauth_client_id and"
" oauth_client_secret must be provided"
),
):
mock_step_launcher_factory(
oauth_creds={"client_id": "abc123"},
)
with pytest.raises(
ValueError,
match=(
"If using azure service principal auth, azure_client_id, azure_client_secret, and"
" azure_tenant_id must be provided"
),
):
mock_step_launcher_factory(azure_creds={"azure_client_id": "abc123"})
| TestCreateRemoteConfig |
python | pytorch__pytorch | torch/nn/modules/instancenorm.py | {
"start": 313,
"end": 4167
} | class ____(_NormBase):
def __init__(
self,
num_features: int,
eps: float = 1e-5,
momentum: float = 0.1,
affine: bool = False,
track_running_stats: bool = False,
device=None,
dtype=None,
) -> None:
factory_kwargs = {"device": device, "dtype": dtype}
super().__init__(
num_features, eps, momentum, affine, track_running_stats, **factory_kwargs
)
def _check_input_dim(self, input):
raise NotImplementedError
def _get_no_batch_dim(self):
raise NotImplementedError
def _handle_no_batch_input(self, input):
return self._apply_instance_norm(input.unsqueeze(0)).squeeze(0)
def _apply_instance_norm(self, input):
return F.instance_norm(
input,
self.running_mean,
self.running_var,
self.weight,
self.bias,
self.training or not self.track_running_stats,
self.momentum if self.momentum is not None else 0.0,
self.eps,
)
def _load_from_state_dict(
self,
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
) -> None:
version = local_metadata.get("version", None)
# at version 1: removed running_mean and running_var when
# track_running_stats=False (default)
if version is None and not self.track_running_stats:
running_stats_keys = []
for name in ("running_mean", "running_var"):
key = prefix + name
if key in state_dict:
running_stats_keys.append(key)
if len(running_stats_keys) > 0:
error_msgs.append(
"Unexpected running stats buffer(s) {names} for {klass} "
"with track_running_stats=False. If state_dict is a "
"checkpoint saved before 0.4.0, this may be expected "
"because {klass} does not track running stats by default "
"since 0.4.0. Please remove these keys from state_dict. If "
"the running stats are actually needed, instead set "
"track_running_stats=True in {klass} to enable them. See "
"the documentation of {klass} for details.".format(
names=" and ".join(f'"{k}"' for k in running_stats_keys),
klass=self.__class__.__name__,
)
)
for key in running_stats_keys:
state_dict.pop(key)
super()._load_from_state_dict(
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
)
def forward(self, input: Tensor) -> Tensor:
self._check_input_dim(input)
feature_dim = input.dim() - self._get_no_batch_dim()
if input.size(feature_dim) != self.num_features:
if self.affine:
raise ValueError(
f"expected input's size at dim={feature_dim} to match num_features"
f" ({self.num_features}), but got: {input.size(feature_dim)}."
)
else:
warnings.warn(
f"input's size at dim={feature_dim} does not match num_features. "
"You can silence this warning by not passing in num_features, "
"which is not used because affine=False",
stacklevel=2,
)
if input.dim() == self._get_no_batch_dim():
return self._handle_no_batch_input(input)
return self._apply_instance_norm(input)
| _InstanceNorm |
python | huggingface__transformers | src/transformers/models/rt_detr/modeling_rt_detr_resnet.py | {
"start": 11346,
"end": 12508
} | class ____(PreTrainedModel):
config: RTDetrResNetConfig
base_model_prefix = "resnet"
main_input_name = "pixel_values"
input_modalities = ("image",)
_no_split_modules = ["RTDetrResNetConvLayer", "RTDetrResNetShortCut"]
@torch.no_grad()
def _init_weights(self, module):
if isinstance(module, nn.Conv2d):
init.kaiming_normal_(module.weight, mode="fan_out", nonlinearity="relu")
# copied from the `reset_parameters` method of `class Linear(Module)` in `torch`.
elif isinstance(module, nn.Linear):
init.kaiming_uniform_(module.weight, a=math.sqrt(5))
if module.bias is not None:
fan_in, _ = torch.nn.init._calculate_fan_in_and_fan_out(module.weight)
bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0
init.uniform_(module.bias, -bound, bound)
elif isinstance(module, (nn.BatchNorm2d, nn.GroupNorm)):
init.constant_(module.weight, 1)
init.constant_(module.bias, 0)
@auto_docstring(
custom_intro="""
ResNet backbone, to be used with frameworks like RTDETR.
"""
)
| RTDetrResNetPreTrainedModel |
python | boto__boto3 | boto3/exceptions.py | {
"start": 1233,
"end": 1764
} | class ____(
Boto3Error, botocore.exceptions.DataNotFoundError
):
def __init__(self, service_name, bad_api_version, available_api_versions):
msg = (
f"The '{service_name}' resource does not support an API version of: {bad_api_version}\n"
f"Valid API versions are: {available_api_versions}"
)
# Not using super because we don't want the DataNotFoundError
# to be called, it has a different __init__ signature.
Boto3Error.__init__(self, msg)
| UnknownAPIVersionError |
python | django__django | tests/backends/models.py | {
"start": 2419,
"end": 2489
} | class ____(Reporter):
class Meta:
proxy = True
| ReporterProxy |
python | weaviate__weaviate-python-client | weaviate/collections/iterator.py | {
"start": 2794,
"end": 4622
} | class ____(
Generic[TProperties, TReferences],
AsyncIterable[Object[TProperties, TReferences]],
):
def __init__(
self,
query: _FetchObjectsQueryAsync[Any, Any],
inputs: _IteratorInputs[TProperties, TReferences],
cache_size: Optional[int] = None,
) -> None:
self.__query = query
self.__inputs = inputs
self.__iter_object_cache: List[Object[TProperties, TReferences]] = []
self.__iter_object_last_uuid: Optional[UUID] = _parse_after(self.__inputs.after)
self.__iter_cache_size = cache_size or ITERATOR_CACHE_SIZE
def __aiter__(
self,
) -> AsyncIterator[Object[TProperties, TReferences]]:
self.__iter_object_cache = []
self.__iter_object_last_uuid = _parse_after(self.__inputs.after)
return self
async def __anext__(
self,
) -> Object[TProperties, TReferences]:
if len(self.__iter_object_cache) == 0:
res = await self.__query.fetch_objects(
limit=self.__iter_cache_size,
after=self.__iter_object_last_uuid,
include_vector=self.__inputs.include_vector,
return_metadata=self.__inputs.return_metadata,
return_properties=self.__inputs.return_properties,
return_references=self.__inputs.return_references,
)
self.__iter_object_cache = res.objects # type: ignore
if len(self.__iter_object_cache) == 0:
raise StopAsyncIteration
ret_object = self.__iter_object_cache.pop(0)
self.__iter_object_last_uuid = ret_object.uuid
assert (
self.__iter_object_last_uuid is not None
) # if this is None the iterator will never stop
return ret_object # pyright: ignore
| _ObjectAIterator |
python | great-expectations__great_expectations | tests/scripts/test_public_api_report.py | {
"start": 20959,
"end": 27557
} | class ____:
def test_instantiate(self, code_reference_filter: CodeReferenceFilter):
assert code_reference_filter.excludes
assert code_reference_filter.includes
def test_instantiate_with_non_default_include_exclude(
self,
code_reference_filter_with_non_default_include_exclude: CodeReferenceFilter,
):
code_reference_filter = code_reference_filter_with_non_default_include_exclude
assert code_reference_filter.excludes
assert code_reference_filter.includes
assert len(code_reference_filter.excludes) == 1
assert len(code_reference_filter.includes) == 1
def test_filter_definitions_no_include_exclude(
self, code_reference_filter_with_no_include_exclude: CodeReferenceFilter
):
observed = code_reference_filter_with_no_include_exclude.filter_definitions()
assert len(observed) == 6
assert {d.name for d in observed} == {
"ExampleClass",
# "__init__", # Filtered private methods
# "_example_private_method", # Filtered private methods
# "_example_private_module_level_function", # Filtered private methods
"example_classmethod",
"example_method",
"example_method_with_args",
"example_module_level_function",
"example_staticmethod",
}
assert {d.filepath for d in observed} == {
pathlib.Path("sample_with_definitions_python_file_string.py")
}
def test_filter_definitions_with_references_from_docs_content(
self,
code_reference_filter_with_references_from_docs_content: CodeReferenceFilter,
):
observed = code_reference_filter_with_references_from_docs_content.filter_definitions()
assert len(observed) == 1
assert {d.name for d in observed} == {"ExampleClass"}
assert {d.filepath for d in observed} == {
pathlib.Path("sample_with_definitions_python_file_string.py")
}
def test_filter_definitions_exclude_by_file(
self, code_reference_filter_with_exclude_by_file: CodeReferenceFilter
):
observed = code_reference_filter_with_exclude_by_file.filter_definitions()
assert len(observed) == 0
assert {d.name for d in observed} == set()
assert {d.filepath for d in observed} == set()
def test_filter_definitions_exclude_by_file_and_name(
self, code_reference_filter_with_exclude_by_file_and_name: CodeReferenceFilter
):
observed = code_reference_filter_with_exclude_by_file_and_name.filter_definitions()
assert len(observed) == 4
assert {d.name for d in observed} == {
"ExampleClass",
"example_classmethod",
"example_method_with_args",
"example_staticmethod",
}
assert {d.filepath for d in observed} == {
pathlib.Path("sample_with_definitions_python_file_string.py")
}
def test_filter_definitions_include_by_file_and_name_already_included(
self,
code_reference_filter_with_include_by_file_and_name_already_included: CodeReferenceFilter,
):
"""What does this test and why?
That include directives that try to include already included definitions
will not include multiple copies of the same definitions (when not
accounting for different but equivalent ast definition object instances).
"""
observed = code_reference_filter_with_include_by_file_and_name_already_included.filter_definitions() # noqa: E501 # FIXME CoP
# There are two extra (8 vs 6) here due to the ast_definition classes
# pointing to different but equivalent objects.
assert len(observed) == 8
assert {d.name for d in observed} == {
"ExampleClass",
"example_classmethod",
"example_method",
"example_method_with_args",
"example_module_level_function",
"example_staticmethod",
}
assert {d.filepath for d in observed} == {
pathlib.Path("sample_with_definitions_python_file_string.py")
}
def test_filter_definitions_include_by_file_and_name_already_excluded(
self,
code_reference_filter_with_include_by_file_and_name_already_excluded: CodeReferenceFilter,
):
"""What does this test and why?
Include overrides exclude.
"""
observed = code_reference_filter_with_include_by_file_and_name_already_excluded.filter_definitions() # noqa: E501 # FIXME CoP
# There are two extra (4 vs 2) here due to the ast_definition classes
# pointing to different but equivalent objects.
assert len(observed) == 4
assert {d.name for d in observed} == {
"example_method",
"example_module_level_function",
}
assert {d.filepath for d in observed} == {
pathlib.Path("sample_with_definitions_python_file_string.py")
}
def test_filter_definitions_include_by_file_and_name_already_excluded_not_used_in_docs_example(
self,
code_reference_filter_with_include_by_file_and_name_not_used_in_docs_example_exclude_file: CodeReferenceFilter, # noqa: E501 # FIXME CoP
):
"""What does this test and why?
Include overrides exclude. Method that was not included in docs examples
is still included if manually added.
"""
observed = code_reference_filter_with_include_by_file_and_name_not_used_in_docs_example_exclude_file.filter_definitions() # noqa: E501 # FIXME CoP
assert len(observed) == 1
assert {d.name for d in observed} == {
"example_no_usages_in_sample_docs_example_python_file_string",
}
assert {d.filepath for d in observed} == {
pathlib.Path("sample_with_definitions_python_file_string.py")
}
@pytest.fixture
def public_api_report(
code_reference_filter_with_no_include_exclude: CodeReferenceFilter,
repo_root: pathlib.Path,
) -> PublicAPIReport:
return PublicAPIReport(
definitions=code_reference_filter_with_no_include_exclude.filter_definitions(),
repo_root=repo_root,
)
@pytest.fixture
def public_api_report_filter_out_file(
code_reference_filter_with_exclude_by_file: CodeReferenceFilter,
repo_root: pathlib.Path,
) -> PublicAPIReport:
return PublicAPIReport(
definitions=code_reference_filter_with_exclude_by_file.filter_definitions(),
repo_root=repo_root,
)
| TestCodeReferenceFilter |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.