language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | Textualize__textual | src/textual/dom.py | {
"start": 3503,
"end": 4187
} | class ____:
"""A descriptor to manage the `classes` property."""
def __get__(
self, obj: DOMNode, objtype: type[DOMNode] | None = None
) -> frozenset[str]:
"""A frozenset of the current classes on the widget."""
return frozenset(obj._classes)
def __set__(self, obj: DOMNode, classes: str | Iterable[str]) -> None:
"""Replaces classes entirely."""
if isinstance(classes, str):
class_names = set(classes.split())
else:
class_names = set(classes)
check_identifiers("class name", *class_names)
obj._classes = class_names
obj._update_styles()
@rich.repr.auto
| _ClassesDescriptor |
python | pytorch__pytorch | torch/ao/quantization/quantizer/embedding_quantizer.py | {
"start": 1178,
"end": 3397
} | class ____(Quantizer):
@classmethod
def get_supported_quantization_configs(cls) -> list[QuantizationConfig]:
op_configs: set[QuantizationConfig] = {
spec for spec, _ in cls.get_supported_operators()
}
return list(op_configs)
@classmethod
def get_supported_operator_for_quantization_config(
cls, quantization_config: QuantizationConfig
) -> list[OperatorPatternType]:
for config, ops in cls.get_supported_operators():
# note: this assumes each entry in cls.supported_spec_and_operators
# corresponds to one spec, e.g. we don't have
# [(spec1, op_list1), (spec1, op_list2), (spec2, op_list3)]
# where the first and second entry have the same spec but did not
# merge the op list
if config == quantization_config:
return ops
return []
def annotate(self, model: torch.fx.GraphModule) -> torch.fx.GraphModule:
"""just handling global spec for now"""
self._annotate_embedding_ops(model.graph)
return model
def _annotate_embedding_ops(self, graph: torch.fx.Graph) -> None:
embedding_config: OperatorConfig = get_embedding_operators_config()
for node in graph.nodes:
# Keep node parsing based annotations instead of module partitioners
# just as an example of alternate ways of annotating
if (
node.op == "call_function"
and node.target is torch.ops.aten.embedding.default
):
if embedding_config.config.weight is None:
raise ValueError(
"Embedding config must have a valid weight quantization spec."
)
node.meta["quantization_annotation"] = QuantizationAnnotation(
input_qspec_map={
node.args[0]: embedding_config.config.weight,
}
)
def validate(self, model: torch.fx.GraphModule) -> None:
pass
@classmethod
def get_supported_operators(cls) -> list[OperatorConfig]:
return [get_embedding_operators_config()]
| EmbeddingQuantizer |
python | numba__numba | numba/core/dispatcher.py | {
"start": 6015,
"end": 6462
} | class ____(object):
"""
A simple counter that increment in __enter__ and decrement in __exit__.
"""
def __init__(self):
self.counter = 0
def __enter__(self):
assert self.counter >= 0
self.counter += 1
def __exit__(self, *args, **kwargs):
self.counter -= 1
assert self.counter >= 0
def __bool__(self):
return self.counter > 0
__nonzero__ = __bool__
| CompilingCounter |
python | django__django | tests/forms_tests/tests/test_forms.py | {
"start": 233051,
"end": 237908
} | class ____(SimpleTestCase):
def test_renderer_custom_bound_field(self):
t = Template("{{ form }}")
html = t.render(Context({"form": Person()}))
expected = """
<div><label for="id_first_name">First name</label>
<input type="text" name="first_name" required
id="id_first_name"></div>
<div><label for="id_last_name">Last name</label>
<input type="text" name="last_name" required
id="id_last_name"></div><div>
<label for="id_birthday">Birthday</label>
<input type="text" name="birthday" required
id="id_birthday"></div>"""
self.assertHTMLEqual(html, expected)
def test_form_custom_boundfield(self):
class CustomBoundFieldPerson(Person):
bound_field_class = BoundFieldWithTwoColons
with self.subTest("form's BoundField takes over renderer's BoundField"):
t = Template("{{ form }}")
html = t.render(Context({"form": CustomBoundFieldPerson()}))
expected = """
<div><label for="id_first_name">First name::</label>
<input type="text" name="first_name" required
id="id_first_name"></div>
<div><label for="id_last_name">Last name::</label>
<input type="text" name="last_name" required
id="id_last_name"></div><div>
<label for="id_birthday">Birthday::</label>
<input type="text" name="birthday" required
id="id_birthday"></div>"""
self.assertHTMLEqual(html, expected)
with self.subTest("Constructor argument takes over class property"):
t = Template("{{ form }}")
html = t.render(
Context(
{
"form": CustomBoundFieldPerson(
bound_field_class=BoundFieldWithCustomClass
)
}
)
)
expected = """
<div><label class="custom-class" for="id_first_name">First name:</label>
<input type="text" name="first_name" required
id="id_first_name"></div>
<div><label class="custom-class" for="id_last_name">Last name:</label>
<input type="text" name="last_name" required
id="id_last_name"></div><div>
<label class="custom-class" for="id_birthday">Birthday:</label>
<input type="text" name="birthday" required
id="id_birthday"></div>"""
self.assertHTMLEqual(html, expected)
with self.subTest("Overriding css_classes works as expected"):
t = Template("{{ form }}")
html = t.render(
Context(
{
"form": CustomBoundFieldPerson(
bound_field_class=BoundFieldWithWrappingClass
)
}
)
)
expected = """
<div class="field-class"><label for="id_first_name">First name:</label>
<input type="text" name="first_name" required
id="id_first_name"></div>
<div class="field-class"><label for="id_last_name">Last name:</label>
<input type="text" name="last_name" required
id="id_last_name"></div><div class="field-class">
<label for="id_birthday">Birthday:</label>
<input type="text" name="birthday" required
id="id_birthday"></div>"""
self.assertHTMLEqual(html, expected)
def test_field_custom_bound_field(self):
class BoundFieldWithTwoColonsCharField(CharField):
bound_field_class = BoundFieldWithTwoColons
class CustomFieldBoundFieldPerson(Person):
bound_field_class = BoundField
first_name = BoundFieldWithTwoColonsCharField()
last_name = BoundFieldWithTwoColonsCharField(
bound_field_class=BoundFieldWithCustomClass
)
html = Template("{{ form }}").render(
Context({"form": CustomFieldBoundFieldPerson()})
)
expected = """
<div><label for="id_first_name">First name::</label>
<input type="text" name="first_name" required
id="id_first_name"></div>
<div><label class="custom-class" for="id_last_name">Last name:</label>
<input type="text" name="last_name" required
id="id_last_name"></div><div>
<label for="id_birthday">Birthday:</label>
<input type="text" name="birthday" required
id="id_birthday"></div>"""
self.assertHTMLEqual(html, expected)
| CustomBoundFieldTest |
python | django-debug-toolbar__django-debug-toolbar | tests/models.py | {
"start": 281,
"end": 589
} | class ____(models.Model):
field = JSONField()
def __str__(self):
return ""
if settings.USE_GIS:
from django.contrib.gis.db import models as gismodels
class Location(gismodels.Model):
point = gismodels.PointField()
def __str__(self):
return ""
| PostgresJSON |
python | mlflow__mlflow | mlflow/gateway/config.py | {
"start": 15303,
"end": 15819
} | class ____(ConfigModel):
name: str
route_type: str
model: EndpointModelInfo
route_url: str
limit: Limit | None = None
model_config = ConfigDict(json_schema_extra=_ROUTE_EXTRA_SCHEMA)
def to_endpoint(self):
from mlflow.deployments.server.config import Endpoint
return Endpoint(
name=self.name,
endpoint_type=self.route_type,
model=self.model,
endpoint_url=self.route_url,
limit=self.limit,
)
| _LegacyRoute |
python | PrefectHQ__prefect | src/prefect/server/events/filters.py | {
"start": 1002,
"end": 1469
} | class ____(PrefectFilterBaseModel):
"""Filter by `Automation.created`."""
before_: Optional[DateTime] = Field(
default=None,
description="Only include automations created before this datetime",
)
def _get_filter_list(
self, db: PrefectDBInterface
) -> Iterable[sa.ColumnElement[bool]]:
if self.before_ is not None:
return [db.Automation.created <= self.before_]
return ()
| AutomationFilterCreated |
python | pydata__xarray | xarray/core/indexes.py | {
"start": 36641,
"end": 54523
} | class ____(PandasIndex):
"""Wrap a pandas.MultiIndex as an xarray compatible index."""
index: pd.MultiIndex
dim: Hashable
coord_dtype: Any
level_coords_dtype: dict[Hashable | None, Any]
__slots__ = ("coord_dtype", "dim", "index", "level_coords_dtype")
def __init__(self, array: Any, dim: Hashable, level_coords_dtype: Any = None):
super().__init__(array, dim)
# default index level names
names = []
for i, idx in enumerate(self.index.levels):
name = idx.name or f"{dim}_level_{i}"
if name == dim:
raise ValueError(
f"conflicting multi-index level name {name!r} with dimension {dim!r}"
)
names.append(name)
self.index.names = names
if level_coords_dtype is None:
level_coords_dtype = {
idx.name: get_valid_numpy_dtype(idx) for idx in self.index.levels
}
self.level_coords_dtype = level_coords_dtype
def _replace(self, index, dim=None, level_coords_dtype=None) -> PandasMultiIndex:
if dim is None:
dim = self.dim
index.name = dim
if level_coords_dtype is None:
level_coords_dtype = self.level_coords_dtype
return type(self)(index, dim, level_coords_dtype)
@classmethod
def from_variables(
cls,
variables: Mapping[Any, Variable],
*,
options: Mapping[str, Any],
) -> PandasMultiIndex:
_check_dim_compat(variables)
dim = next(iter(variables.values())).dims[0]
index = pd.MultiIndex.from_arrays(
[var.values for var in variables.values()], names=list(variables.keys())
)
index.name = dim
level_coords_dtype = {name: var.dtype for name, var in variables.items()}
obj = cls(index, dim, level_coords_dtype=level_coords_dtype)
return obj
@classmethod
def concat(
cls,
indexes: Sequence[Self],
dim: Hashable,
positions: Iterable[Iterable[int]] | None = None,
) -> Self:
new_pd_index = cls._concat_indexes(indexes, dim, positions)
if not indexes:
level_coords_dtype = None
else:
level_coords_dtype = {}
for name in indexes[0].level_coords_dtype:
level_coords_dtype[name] = np.result_type(
*[idx.level_coords_dtype[name] for idx in indexes]
)
return cls(new_pd_index, dim=dim, level_coords_dtype=level_coords_dtype)
@classmethod
def stack(
cls, variables: Mapping[Any, Variable], dim: Hashable
) -> PandasMultiIndex:
"""Create a new Pandas MultiIndex from the product of 1-d variables (levels) along a
new dimension.
Level variables must have a dimension distinct from each other.
Keeps levels the same (doesn't refactorize them) so that it gives back the original
labels after a stack/unstack roundtrip.
"""
_check_dim_compat(variables, all_dims="different")
level_indexes = [safe_cast_to_index(var) for var in variables.values()]
for name, idx in zip(variables, level_indexes, strict=True):
if isinstance(idx, pd.MultiIndex):
raise ValueError(
f"cannot create a multi-index along stacked dimension {dim!r} "
f"from variable {name!r} that wraps a multi-index"
)
# from_product sorts by default, so we can't use that always
# https://github.com/pydata/xarray/issues/980
# https://github.com/pandas-dev/pandas/issues/14672
if all(index.is_monotonic_increasing for index in level_indexes):
index = pd.MultiIndex.from_product(
level_indexes, sortorder=0, names=list(variables.keys())
)
else:
split_labels, levels = zip(
*[lev.factorize() for lev in level_indexes], strict=True
)
labels_mesh = np.meshgrid(*split_labels, indexing="ij")
labels = [x.ravel().tolist() for x in labels_mesh]
index = pd.MultiIndex(
levels=levels, codes=labels, sortorder=0, names=list(variables.keys())
)
level_coords_dtype = {k: var.dtype for k, var in variables.items()}
return cls(index, dim, level_coords_dtype=level_coords_dtype)
def unstack(self) -> tuple[dict[Hashable, Index], pd.MultiIndex]:
clean_index = remove_unused_levels_categories(self.index)
if not clean_index.is_unique:
raise ValueError(
"Cannot unstack MultiIndex containing duplicates. Make sure entries "
f"are unique, e.g., by calling ``.drop_duplicates('{self.dim}')``, "
"before unstacking."
)
new_indexes: dict[Hashable, Index] = {}
for name, lev in zip(clean_index.names, clean_index.levels, strict=True):
idx = PandasIndex(
lev.copy(), name, coord_dtype=self.level_coords_dtype[name]
)
new_indexes[name] = idx
return new_indexes, clean_index
@classmethod
def from_variables_maybe_expand(
cls,
dim: Hashable,
current_variables: Mapping[Any, Variable],
variables: Mapping[Any, Variable],
) -> tuple[PandasMultiIndex, IndexVars]:
"""Create a new multi-index maybe by expanding an existing one with
new variables as index levels.
The index and its corresponding coordinates may be created along a new dimension.
"""
names: list[Hashable] = []
codes: list[Iterable[int]] = []
levels: list[Iterable[Any]] = []
level_variables: dict[Any, Variable] = {}
_check_dim_compat({**current_variables, **variables})
if len(current_variables) > 1:
# expand from an existing multi-index
data = cast(
PandasMultiIndexingAdapter, next(iter(current_variables.values()))._data
)
current_index = data.array
names.extend(current_index.names)
codes.extend(current_index.codes)
levels.extend(current_index.levels)
for name in current_index.names:
level_variables[name] = current_variables[name]
elif len(current_variables) == 1:
# expand from one 1D variable (no multi-index): convert it to an index level
var = next(iter(current_variables.values()))
new_var_name = f"{dim}_level_0"
names.append(new_var_name)
cat = pd.Categorical(var.values, ordered=True)
codes.append(cat.codes)
levels.append(cat.categories)
level_variables[new_var_name] = var
for name, var in variables.items():
names.append(name)
cat = pd.Categorical(var.values, ordered=True)
codes.append(cat.codes)
levels.append(cat.categories)
level_variables[name] = var
codes_as_lists = [list(x) for x in codes]
levels_as_lists = [list(level) for level in levels]
index = pd.MultiIndex(levels=levels_as_lists, codes=codes_as_lists, names=names)
level_coords_dtype = {k: var.dtype for k, var in level_variables.items()}
obj = cls(index, dim, level_coords_dtype=level_coords_dtype)
index_vars = obj.create_variables(level_variables)
return obj, index_vars
def keep_levels(
self, level_variables: Mapping[Any, Variable]
) -> PandasMultiIndex | PandasIndex:
"""Keep only the provided levels and return a new multi-index with its
corresponding coordinates.
"""
index = self.index.droplevel(
[k for k in self.index.names if k not in level_variables]
)
if isinstance(index, pd.MultiIndex):
level_coords_dtype = {k: self.level_coords_dtype[k] for k in index.names}
return self._replace(index, level_coords_dtype=level_coords_dtype)
else:
# backward compatibility: rename the level coordinate to the dimension name
return PandasIndex(
index.rename(self.dim),
self.dim,
coord_dtype=self.level_coords_dtype[index.name],
)
def reorder_levels(
self, level_variables: Mapping[Any, Variable]
) -> PandasMultiIndex:
"""Re-arrange index levels using input order and return a new multi-index with
its corresponding coordinates.
"""
index = cast(pd.MultiIndex, self.index.reorder_levels(level_variables.keys()))
level_coords_dtype = {k: self.level_coords_dtype[k] for k in index.names}
return self._replace(index, level_coords_dtype=level_coords_dtype)
def create_variables(
self, variables: Mapping[Any, Variable] | None = None
) -> IndexVars:
from xarray.core.variable import IndexVariable
if variables is None:
variables = {}
index_vars: IndexVars = {}
for name in (self.dim,) + tuple(self.index.names):
if name == self.dim:
level = None
dtype = None
else:
level = name
dtype = self.level_coords_dtype[name]
var = variables.get(name)
if var is not None:
attrs = var.attrs
encoding = var.encoding
else:
attrs = {}
encoding = {}
data = PandasMultiIndexingAdapter(self.index, dtype=dtype, level=level) # type: ignore[arg-type] # TODO: are Hashables ok?
index_vars[name] = IndexVariable(
self.dim,
data,
attrs=attrs,
encoding=encoding,
fastpath=True,
)
return index_vars
def sel(self, labels, method=None, tolerance=None) -> IndexSelResult:
from xarray.core.dataarray import DataArray
from xarray.core.variable import Variable
if method is not None or tolerance is not None:
raise ValueError(
"multi-index does not support ``method`` and ``tolerance``"
)
new_index = None
scalar_coord_values = {}
indexer: int | slice | np.ndarray | Variable | DataArray
# label(s) given for multi-index level(s)
if all(lbl in self.index.names for lbl in labels):
label_values = {}
for k, v in labels.items():
label_array = normalize_label(v, dtype=self.level_coords_dtype[k])
try:
label_values[k] = as_scalar(label_array)
except ValueError as err:
# label should be an item not an array-like
raise ValueError(
"Vectorized selection is not "
f"available along coordinate {k!r} (multi-index level)"
) from err
has_slice = any(isinstance(v, slice) for v in label_values.values())
if len(label_values) == self.index.nlevels and not has_slice:
indexer = self.index.get_loc(
tuple(label_values[k] for k in self.index.names)
)
else:
indexer, new_index = self.index.get_loc_level(
tuple(label_values.values()), level=tuple(label_values.keys())
)
scalar_coord_values.update(label_values)
# GH2619. Raise a KeyError if nothing is chosen
if indexer.dtype.kind == "b" and indexer.sum() == 0: # type: ignore[union-attr]
raise KeyError(f"{labels} not found")
# assume one label value given for the multi-index "array" (dimension)
else:
if len(labels) > 1:
coord_name = next(iter(set(labels) - set(self.index.names)))
raise ValueError(
f"cannot provide labels for both coordinate {coord_name!r} (multi-index array) "
f"and one or more coordinates among {self.index.names!r} (multi-index levels)"
)
coord_name, label = next(iter(labels.items()))
if is_dict_like(label):
invalid_levels = tuple(
name for name in label if name not in self.index.names
)
if invalid_levels:
raise ValueError(
f"multi-index level names {invalid_levels} not found in indexes {tuple(self.index.names)}"
)
return self.sel(label)
elif isinstance(label, slice):
indexer = _query_slice(self.index, label, coord_name)
elif isinstance(label, tuple):
if _is_nested_tuple(label):
indexer = self.index.get_locs(label)
elif len(label) == self.index.nlevels:
indexer = self.index.get_loc(label)
else:
levels = [self.index.names[i] for i in range(len(label))]
indexer, new_index = self.index.get_loc_level(label, level=levels)
scalar_coord_values.update(dict(zip(levels, label, strict=True)))
else:
label_array = normalize_label(label)
if label_array.ndim == 0:
label_value = as_scalar(label_array)
indexer, new_index = self.index.get_loc_level(label_value, level=0)
scalar_coord_values[self.index.names[0]] = label_value
elif label_array.dtype.kind == "b":
indexer = label_array
else:
if label_array.ndim > 1:
raise ValueError(
"Vectorized selection is not available along "
f"coordinate {coord_name!r} with a multi-index"
)
indexer = get_indexer_nd(self.index, label_array)
if np.any(indexer < 0):
raise KeyError(f"not all values found in index {coord_name!r}")
# attach dimension names and/or coordinates to positional indexer
if isinstance(label, Variable):
indexer = Variable(label.dims, indexer)
elif isinstance(label, DataArray):
# do not include label-indexer DataArray coordinates that conflict
# with the level names of this index
coords = {
k: v
for k, v in label._coords.items()
if k not in self.index.names
}
indexer = DataArray(indexer, coords=coords, dims=label.dims)
if new_index is not None:
if isinstance(new_index, pd.MultiIndex):
level_coords_dtype = {
k: self.level_coords_dtype[k] for k in new_index.names
}
new_index = self._replace(
new_index, level_coords_dtype=level_coords_dtype
)
dims_dict = {}
drop_coords = []
else:
new_index = PandasIndex(
new_index,
new_index.name,
coord_dtype=self.level_coords_dtype[new_index.name],
)
dims_dict = {self.dim: new_index.index.name}
drop_coords = [self.dim]
# variable(s) attrs and encoding metadata are propagated
# when replacing the indexes in the resulting xarray object
new_vars = new_index.create_variables()
indexes = cast(dict[Any, Index], dict.fromkeys(new_vars, new_index))
# add scalar variable for each dropped level
variables = new_vars
for name, val in scalar_coord_values.items():
variables[name] = Variable([], val)
return IndexSelResult(
{self.dim: indexer},
indexes=indexes,
variables=variables,
drop_indexes=list(scalar_coord_values),
drop_coords=drop_coords,
rename_dims=dims_dict,
)
else:
return IndexSelResult({self.dim: indexer})
def join(self, other, how: str = "inner"):
if how == "outer":
# bug in pandas? need to reset index.name
other_index = other.index.copy()
other_index.name = None
index = self.index.union(other_index)
index.name = self.dim
else:
# how = "inner"
index = self.index.intersection(other.index)
level_coords_dtype = {
k: np.result_type(lvl_dtype, other.level_coords_dtype[k])
for k, lvl_dtype in self.level_coords_dtype.items()
}
return type(self)(index, self.dim, level_coords_dtype=level_coords_dtype)
def rename(self, name_dict, dims_dict):
if not set(self.index.names) & set(name_dict) and self.dim not in dims_dict:
return self
# pandas 1.3.0: could simply do `self.index.rename(names_dict)`
new_names = [name_dict.get(k, k) for k in self.index.names]
index = self.index.rename(new_names)
new_dim = dims_dict.get(self.dim, self.dim)
new_level_coords_dtype = dict(
zip(new_names, self.level_coords_dtype.values(), strict=True)
)
return self._replace(
index, dim=new_dim, level_coords_dtype=new_level_coords_dtype
)
| PandasMultiIndex |
python | getsentry__sentry | src/sentry/workflow_engine/migrations/0084_crons_dedupe_workflows.py | {
"start": 3835,
"end": 5199
} | class ____(CheckedMigration):
# This flag is used to mark that a migration shouldn't be automatically run in production.
# This should only be used for operations where it's safe to run the migration after your
# code has deployed. So this should not be used for most operations that alter the schema
# of a table.
# Here are some things that make sense to mark as post deployment:
# - Large data migrations. Typically we want these to be run manually so that they can be
# monitored and not block the deploy for a long period of time while they run.
# - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to
# run this outside deployments so that we don't block them. Note that while adding an index
# is a schema change, it's completely safe to run the operation after the code has deployed.
# Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment
is_post_deployment = True
dependencies = [
("workflow_engine", "0083_add_status_to_action"),
("monitors", "0009_backfill_monitor_detectors"),
]
operations = [
migrations.RunPython(
dedupe_cron_shadow_workflows,
migrations.RunPython.noop,
hints={"tables": ["sentry_rule"]},
),
]
| Migration |
python | doocs__leetcode | solution/0900-0999/0908.Smallest Range I/Solution.py | {
"start": 0,
"end": 155
} | class ____:
def smallestRangeI(self, nums: List[int], k: int) -> int:
mx, mi = max(nums), min(nums)
return max(0, mx - mi - k * 2)
| Solution |
python | getsentry__sentry | tests/sentry/integrations/github/test_ticket_action.py | {
"start": 770,
"end": 6719
} | class ____(RuleTestCase, BaseAPITestCase):
rule_cls = GitHubCreateTicketAction
repo = "foo/bar"
assignee = "sentry_user"
labels = ["bug", "invalid"]
issue_num = 1
def setUp(self) -> None:
super().setUp()
self.integration = self.create_integration(
organization=self.organization,
provider="github",
name="Github",
external_id="1",
metadata={
"verify_ssl": True,
},
)
self.installation = get_installation_of_type(
GitHubIntegration, self.integration, self.organization.id
)
self.login_as(user=self.user)
responses.add(
method=responses.POST,
url="https://api.github.com/app/installations/1/access_tokens",
body='{"token": "12345token", "expires_at": "2099-01-01T00:00:00Z"}',
status=200,
content_type="application/json",
)
@pytest.fixture(autouse=True)
def stub_get_jwt(self):
with patch.object(client, "get_jwt", return_value="jwt_token_1"):
yield
def trigger(self, event, rule_object):
action = rule_object.data.get("actions", ())[0]
action_inst = self.get_rule(data=action, rule=rule_object)
results = list(action_inst.after(event=event))
assert len(results) == 1
rule_future = RuleFuture(rule=rule_object, kwargs=results[0].kwargs)
return results[0].callback(event, futures=[rule_future])
def get_key(self, event: GroupEvent):
return ExternalIssue.objects.get_linked_issues(event, self.integration).values_list(
"key", flat=True
)[0]
@responses.activate()
def test_ticket_rules(self) -> None:
title = "sample title"
sample_description = "sample bug report"
html_url = f"https://github.com/foo/bar/issues/{self.issue_num}"
responses.add(
method=responses.POST,
url="https://api.github.com/repos/foo/bar/issues",
json={
"number": self.issue_num,
"title": title,
"body": sample_description,
"html_url": html_url,
},
status=200,
)
responses.add(
method=responses.GET,
url=f"https://api.github.com/repos/foo/bar/issues/{self.issue_num}",
json={
"number": "1",
"title": title,
"body": sample_description,
"html_url": html_url,
},
status=200,
)
# Create a new Rule
response = self.client.post(
reverse(
"sentry-api-0-project-rules",
kwargs={
"organization_id_or_slug": self.organization.slug,
"project_id_or_slug": self.project.slug,
},
),
format="json",
data={
"name": "hello world",
"owner": self.user.id,
"environment": None,
"actionMatch": "any",
"frequency": 5,
"actions": [
{
"id": "sentry.integrations.github.notify_action.GitHubCreateTicketAction",
"integration": self.integration.id,
"dynamic_form_fields": [{"random": "garbage"}],
"repo": self.repo,
"assignee": self.assignee,
"labels": self.labels,
}
],
"conditions": [],
},
)
assert response.status_code == 200
# Get the rule from DB
rule_object = Rule.objects.get(id=response.data["id"])
event = self.get_group_event()
# Trigger its `after`
self.trigger(event, rule_object)
# assert ticket created in DB
key = self.get_key(event)
assert key == f"{self.repo}#{self.issue_num}"
external_issue_count = len(ExternalIssue.objects.filter(key=key))
assert external_issue_count == 1
# assert ticket created in GitHub
data = self.installation.get_issue(
key, data={"repo": self.repo, "externalIssue": self.issue_num}
)
assert sample_description in data["description"]
# Trigger its `after` _again_
self.trigger(event, rule_object)
# assert new ticket NOT created in DB
assert ExternalIssue.objects.count() == external_issue_count
@responses.activate()
def test_fails_validation(self) -> None:
"""
Test that the absence of dynamic_form_fields in the action fails validation
"""
# Create a new Rule
response = self.client.post(
reverse(
"sentry-api-0-project-rules",
kwargs={
"organization_id_or_slug": self.organization.slug,
"project_id_or_slug": self.project.slug,
},
),
format="json",
data={
"name": "hello world",
"owner": self.user.id,
"environment": None,
"actionMatch": "any",
"frequency": 5,
"actions": [
{
"id": "sentry.integrations.github.notify_action.GitHubCreateTicketAction",
"integration": self.integration.id,
"repo": self.repo,
"assignee": self.assignee,
"labels": self.labels,
}
],
"conditions": [],
},
)
assert response.status_code == 400
assert response.data["actions"][0] == "Must configure issue link settings."
| GitHubTicketRulesTestCase |
python | pypa__virtualenv | src/virtualenv/util/path/_sync.py | {
"start": 1778,
"end": 2116
} | class ____:
def __init__(self, src, dest) -> None:
self.src = src
self.dest = dest
def __str__(self) -> str:
return f"{'directory ' if self.src.is_dir() else ''}{self.src!s} to {self.dest!s}"
__all__ = [
"copy",
"copytree",
"ensure_dir",
"safe_delete",
"symlink",
"symlink",
]
| _Debug |
python | run-llama__llama_index | llama-index-packs/llama-index-packs-panel-chatbot/llama_index/packs/panel_chatbot/app.py | {
"start": 2467,
"end": 12024
} | class ____(pn.viewable.Viewer):
"""
The IndexLoader enables the user to interactively create a VectorStoreIndex from a
github repository of choice.
"""
value: VectorStoreIndex = param.ClassSelector(class_=VectorStoreIndex)
status = param.String(constant=True, doc="A status message")
owner: str = param.String(
default="holoviz", doc="The repository owner. For example 'holoviz'"
)
repo: str = param.String(
default="panel", doc="The repository name. For example 'panel'"
)
filter_directories: str = param.String(
default="examples,docs,panel",
label="Folders",
doc="A comma separated list of folders to include. For example 'examples,docs,panel'",
)
filter_file_extensions: str = param.String(
default=".py,.md,.ipynb",
label="File Extensions",
doc="A comma separated list of file extensions to include. For example '.py,.md,.ipynb'",
)
_load = param.Event(
label="LOAD",
doc="Loads the repository index from the cache if it exists and otherwise from scratch",
)
_reload = param.Event(
default=False,
label="RELOAD ALL",
doc="Loads the repository index from scratch",
)
def __init__(self) -> None:
super().__init__()
if self.index_exists:
pn.state.execute(self.load)
else:
self._update_status(INDEX_NOT_LOADED)
self._layout = pn.Column(
self.param.owner,
self.param.repo,
self.param.filter_directories,
self.param.filter_file_extensions,
pn.pane.HTML(self.github_url),
pn.widgets.Button.from_param(
self.param._load,
button_type="primary",
disabled=self._is_loading,
loading=self._is_loading,
),
pn.widgets.Button.from_param(
self.param._reload,
button_type="primary",
button_style="outline",
disabled=self._is_loading,
loading=self._is_loading,
),
pn.pane.Markdown("### Status", margin=(3, 5)),
pn.pane.Str(self.param.status),
)
def __panel__(self) -> pn.Column:
return self._layout
@property
def _unique_id(self):
uid = (
self.owner
+ self.repo
+ self.filter_directories
+ self.filter_file_extensions
)
return uid.replace(",", "").replace(".", "")
@property
def _cached_docs_path(self):
return CACHE_PATH / f"docs_{self._unique_id}.pickle"
@property
def _cached_index_path(self):
return CACHE_PATH / f"index_{self._unique_id}.pickle"
async def _download_docs(self):
github_client = GithubClient(os.getenv("GITHUB_TOKEN"))
filter_directories = _split_and_clean(self.filter_directories)
filter_file_extensions = _split_and_clean(self.filter_file_extensions)
loader = GithubRepositoryReader(
github_client,
owner=self.owner,
repo=self.repo,
filter_directories=(
filter_directories,
GithubRepositoryReader.FilterType.INCLUDE,
),
filter_file_extensions=(
filter_file_extensions,
GithubRepositoryReader.FilterType.INCLUDE,
),
verbose=True,
concurrent_requests=10,
)
return loader.load_data(branch="main")
async def _get_docs(self):
docs_path = self._cached_docs_path
index_path = self._cached_index_path
if docs_path.exists():
self._update_status(LOADING_EXISTING_DOCS)
with docs_path.open("rb") as f:
return pickle.load(f)
self._update_status(LOADING_NEW_DOCS)
docs = await self._download_docs()
with docs_path.open("wb") as f:
pickle.dump(docs, f, pickle.HIGHEST_PROTOCOL)
if index_path.exists():
index_path.unlink()
return docs
async def _create_index(self, docs):
return VectorStoreIndex.from_documents(docs, use_async=True)
async def _get_index(self, index):
index_path = self._cached_index_path
if index_path.exists():
self._update_status(LOADING_EXISTING_INDEX)
with index_path.open("rb") as f:
return pickle.load(f)
self._update_status(LOADING_NEW_INDEX)
index = await self._create_index(index)
with index_path.open("wb") as f:
pickle.dump(index, f, pickle.HIGHEST_PROTOCOL)
return index
@param.depends("status")
def _is_loading(self):
return self.status not in [INDEX_LOADED, INDEX_NOT_LOADED]
@param.depends("status")
def _is_not_loading(self):
return self.status in [INDEX_LOADED, INDEX_NOT_LOADED]
@param.depends("_load", watch=True)
async def load(self):
"""
Loads the repository index either from the cache or by downloading from
the repository.
"""
self._update_status("Loading ...")
self.value = None
docs = await self._get_docs()
self.value = await self._get_index(docs)
self._update_status(INDEX_LOADED)
@param.depends("_reload", watch=True)
async def reload(self):
self._update_status("Deleting cached index ...")
if self._cached_docs_path.exists():
self._cached_docs_path.unlink()
if self._cached_index_path.exists():
self._cached_index_path.unlink()
await self.load()
def _update_status(self, text):
with param.edit_constant(self):
self.status = text
print(text)
@param.depends("owner", "repo")
def github_url(self):
"""Returns a html string with a link to the github repository."""
text = f"{self.owner}/{self.repo}"
href = f"https://github.com/{text}"
return f"<a href='{href}' target='_blank'>{text}</a>"
@property
def index_exists(self):
"""Returns True if the index already exists."""
return self._cached_docs_path.exists() and self._cached_index_path.exists()
def powered_by():
"""Returns a component describing the frameworks powering the chat ui."""
params = {"height": 40, "sizing_mode": "fixed", "margin": (0, 10)}
return pn.Column(
pn.pane.Markdown("### AI Powered By", margin=(10, 5, 10, 0)),
pn.Row(
pn.pane.Image(LLAMA_INDEX_LOGO, link_url=LLAMA_INDEX_URL, **params),
pn.pane.Image(CHAT_GPT_LOGO, link_url=CHAT_GPT_URL, **params),
pn.pane.Image(PANEL_LOGO[pn.config.theme], link_url=PANEL_URL, **params),
align="center",
),
)
async def chat_component(index: VectorStoreIndex, index_loader: IndexLoader):
"""Returns the chat component powering the main area of the application."""
if not index:
return pn.Column(
pn.chat.ChatMessage(
"You are a now a *GitHub Repository assistant*.",
user="System",
),
pn.chat.ChatMessage(
"Please **load a GitHub Repository** to start chatting with me. This can take from seconds to minutes!",
user="Assistant",
),
)
chat_engine = index.as_chat_engine(chat_mode="context", verbose=True)
async def generate_response(contents, user, instance):
response = await chat_engine.astream_chat(contents)
text = ""
async for token in response.async_response_gen():
text += token
yield text
chat_interface = pn.chat.ChatInterface(
callback=generate_response,
sizing_mode="stretch_both",
)
chat_interface.send(
pn.chat.ChatMessage(
"You are a now a *GitHub Repository Assistant*.", user="System"
),
respond=False,
)
chat_interface.send(
pn.chat.ChatMessage(
f"Hello! you can ask me anything about {index_loader.github_url()}.",
user="Assistant",
),
respond=False,
)
return chat_interface
def settings_components(index_loader: IndexLoader):
"""Returns a list of the components to add to the sidebar."""
return [
pn.pane.Image(
CUTE_LLAMA,
height=250,
align="center",
margin=(10, 5, 25, 5),
link_url=CUTE_LLAMA_URL,
),
"## Github Repository",
index_loader,
powered_by(),
]
def create_chat_ui():
"""Returns the Chat UI."""
pn.extension(
sizing_mode="stretch_width", raw_css=[CSS_FIXES_TO_BE_UPSTREAMED_TO_PANEL]
)
index_loader = IndexLoader()
pn.state.location.sync(
index_loader,
parameters={
"owner": "owner",
"repo": "repo",
"filter_directories": "folders",
"filter_file_extensions": "file_extensions",
},
)
bound_chat_interface = pn.bind(
chat_component, index=index_loader.param.value, index_loader=index_loader
)
return pn.template.FastListTemplate(
title="Chat with GitHub",
sidebar=settings_components(index_loader),
main=[bound_chat_interface],
accent=ACCENT,
main_max_width="1000px",
main_layout=None,
)
if pn.state.served:
create_chat_ui().servable()
| IndexLoader |
python | PyCQA__pylint | tests/functional/u/unused/unused_import_class_def_keyword.py | {
"start": 476,
"end": 558
} | class ____(Child, domain=DOMAIN_2):
DOMAIN_2 = DOMAIN_2
# Alternative 2
| Parent_2 |
python | Netflix__metaflow | test/core/tests/basic_config_parameters.py | {
"start": 72,
"end": 4548
} | class ____(MetaflowTest):
PRIORITY = 1
REQUIRED_FILES = ["basic_config_silly.txt"]
SKIP_GRAPHS = [
"simple_switch",
"nested_switch",
"branch_in_switch",
"foreach_in_switch",
"switch_in_branch",
"switch_in_foreach",
"recursive_switch",
"recursive_switch_inside_foreach",
]
PARAMETERS = {
"default_from_config": {
"default": "config_expr('config2').default_param",
"type": "int",
},
"default_from_func": {"default": "param_default", "type": "int"},
}
CONFIGS = {
# Test a default value as a dict
"config": {"default_value": "default_config"},
# Test parser, various arguments and overriden default
"silly_config": {
"required": True,
"parser": "silly_parser",
"default": "'silly.txt'",
},
"config2": {},
# Test using a function to get the value
"config3": {"default_value": "config_default"},
# Test ** notation
"config_env": {},
}
HEADER = """
import json
import os
# Test passing values directly on the command line
os.environ['METAFLOW_FLOW_CONFIG_VALUE'] = json.dumps(
{
"config2": {"default_param": 123},
"config_env": {"vars": {"var1": "value1", "var2": "value2"}}
}
)
# Test overriding a file (the default one)
os.environ['METAFLOW_FLOW_CONFIG'] = json.dumps(
{
"silly_config": "basic_config_silly.txt"
}
)
def silly_parser(s):
k, v = s.split(":")
return {k: v.strip()}
default_config = {
"value": 42,
"str_value": "foobar",
"project_name": "test_config",
"nested": {"value": 43},
}
def param_default(ctx):
return ctx.configs.config2.default_param + 1
def config_default(ctx):
return {"val": 456}
# Test flow-level decorator configurations
@project(name=config_expr("config").project_name)
"""
# Test step level decorators with configs
@tag(
"environment(vars={'normal': config.str_value, 'stringify': config_expr('str(config.value)')})"
)
@steps(0, ["all"])
def step_all(self):
# Test flow-level decorator configs
assert_equals(current.project_name, "test_config")
# Test step-level decorator configs
assert_equals(os.environ["normal"], "foobar")
assert_equals(os.environ["stringify"], "42")
# Test parameters reading configs
assert_equals(self.default_from_config, 123)
assert_equals(self.default_from_func, 124)
# Test configs are accessible as artifacts
assert_equals(self.config.value, 42)
assert_equals(self.config["value"], 42)
assert_equals(self.config.nested.value, 43)
assert_equals(self.config["nested"]["value"], 43)
assert_equals(self.config.nested["value"], 43)
assert_equals(self.config["nested"].value, 43)
# Test parser
assert_equals(self.silly_config.baz, "amazing")
assert_equals(self.silly_config["baz"], "amazing")
assert_equals(self.config3.val, 456)
try:
self.config3["val"] = 5
raise ExpectationFailed(TypeError, "configs should be immutable")
except TypeError:
pass
try:
self.config3.val = 5
raise ExpectationFailed(TypeError, "configs should be immutable")
except TypeError:
pass
@tag("environment(**config_env)")
@steps(0, ["start"])
def step_start(self):
# Here we check the environment based on the ** notation
assert_equals(os.environ["var1"], "value1")
assert_equals(os.environ["var2"], "value2")
def check_results(self, flow, checker):
for step in flow:
checker.assert_artifact(
step.name,
"config",
{
"value": 42,
"str_value": "foobar",
"project_name": "test_config",
"nested": {"value": 43},
},
)
checker.assert_artifact(step.name, "config2", {"default_param": 123})
checker.assert_artifact(step.name, "config3", {"val": 456})
checker.assert_artifact(step.name, "silly_config", {"baz": "amazing"})
checker.assert_artifact(
step.name, "config_env", {"vars": {"var1": "value1", "var2": "value2"}}
)
| BasicConfigTest |
python | getsentry__sentry | src/sentry/spans/consumers/process/factory.py | {
"start": 1327,
"end": 9704
} | class ____(ProcessingStrategyFactory[KafkaPayload]):
"""
1. Process spans and push them to redis
2. Commit offsets for processed spans
3. Reduce the messages to find the latest timestamp to process
4. Fetch all segments are two minutes or older and expire the keys so they
aren't reprocessed
5. Produce segments to buffered-segments topic
"""
def __init__(
self,
max_batch_size: int,
max_batch_time: int,
num_processes: int,
input_block_size: int | None,
output_block_size: int | None,
flusher_processes: int | None = None,
produce_to_pipe: Callable[[KafkaPayload], None] | None = None,
kafka_slice_id: int | None = None,
):
super().__init__()
self.rebalancing_count = 0
# config
self.max_batch_size = max_batch_size
self.max_batch_time = max_batch_time
self.input_block_size = input_block_size
self.output_block_size = output_block_size
self.num_processes = num_processes
self.flusher_processes = flusher_processes
self.produce_to_pipe = produce_to_pipe
self.kafka_slice_id = kafka_slice_id
if self.num_processes != 1:
self.__pool = MultiprocessingPool(num_processes)
def create_with_partitions(
self,
commit: Commit,
partitions: Mapping[Partition, int],
) -> ProcessingStrategy[KafkaPayload]:
# TODO: remove once span buffer is live in all regions
scope = sentry_sdk.get_isolation_scope()
scope.level = "warning"
self.rebalancing_count += 1
sentry_sdk.set_tag("sentry_spans_rebalancing_count", str(self.rebalancing_count))
sentry_sdk.set_tag("sentry_spans_buffer_component", "consumer")
committer = CommitOffsets(commit)
buffer = SpansBuffer(
assigned_shards=[p.index for p in partitions],
slice_id=self.kafka_slice_id,
)
# patch onto self just for testing
flusher: ProcessingStrategy[FilteredPayload | int]
flusher = self._flusher = SpanFlusher(
buffer,
next_step=committer,
max_processes=self.flusher_processes,
produce_to_pipe=self.produce_to_pipe,
)
# The flusher must be given some time to shut down, because otherwise
# we may double-produce segments.
flusher = SetJoinTimeout(None, flusher)
if self.num_processes != 1:
run_task = run_task_with_multiprocessing(
function=partial(
process_batch,
buffer,
),
next_step=flusher,
max_batch_size=self.max_batch_size,
max_batch_time=self.max_batch_time,
pool=self.__pool,
input_block_size=self.input_block_size,
output_block_size=self.output_block_size,
)
else:
run_task = RunTask(
function=partial(
process_batch,
buffer,
),
next_step=flusher,
)
batch = BatchStep(
max_batch_size=self.max_batch_size,
max_batch_time=self.max_batch_time,
next_step=run_task,
)
def prepare_message(message: Message[KafkaPayload]) -> tuple[int, KafkaPayload]:
# We use the produce timestamp to drive the clock for flushing, so that
# consumer backlogs do not cause segments to be flushed prematurely.
# The received timestamp in the span is too old for this purpose if
# Relay starts buffering, and we don't want that effect to propagate
# into this system.
return (
int(message.timestamp.timestamp() if message.timestamp else time.time()),
message.payload,
)
add_timestamp = RunTask(
function=prepare_message,
next_step=batch,
)
# Our entire insertion process into redis is perfectly idempotent. It
# makes no sense to spend time inserting into redis during rebalancing
# when we can just parse and batch again.
return SetJoinTimeout(0.0, add_timestamp)
def shutdown(self) -> None:
if self.num_processes != 1:
self.__pool.close()
@metrics.wraps("spans.buffer.process_batch")
def process_batch(
buffer: SpansBuffer,
values: Message[ValuesBatch[tuple[int, KafkaPayload]]],
) -> int:
killswitch_config = killswitches.get_killswitch_value("spans.drop-in-buffer")
min_timestamp = None
decode_time = 0.0
spans = []
for value in values.payload:
assert isinstance(value, BrokerValue)
try:
timestamp, payload = value.payload
if min_timestamp is None or timestamp < min_timestamp:
min_timestamp = timestamp
decode_start = time.monotonic()
val = cast(SpanEvent, orjson.loads(payload.value))
decode_time += time.monotonic() - decode_start
if killswitches.value_matches(
"spans.drop-in-buffer",
killswitch_config,
{
"org_id": val.get("organization_id"),
"project_id": val.get("project_id"),
"trace_id": val.get("trace_id"),
"partition_id": value.partition.index,
},
emit_metrics=False,
):
continue
# Adding schema validation to avoid crashing the consumer downstream
segment_id = cast(str | None, attribute_value(val, "sentry.segment.id"))
validate_span_event(val, segment_id)
span = Span(
trace_id=val["trace_id"],
span_id=val["span_id"],
parent_span_id=val.get("parent_span_id"),
segment_id=segment_id,
project_id=val["project_id"],
payload=payload.value,
end_timestamp=cast(float, val["end_timestamp"]),
is_segment_span=bool(val.get("parent_span_id") is None or val.get("is_segment")),
)
spans.append(span)
except Exception:
logger.exception("spans.invalid-message")
# We only DLQ when parsing the input for now. All other errors
# beyond this point are very unlikely to pertain to a specific message:
#
# * if we get exceptions from buffer.process_spans, it's likely
# because Redis is down entirely.
# * if we get exceptions from the flusher, it's likely that there
# is a broader issue with traffic patterns where no individual
# message is at fault.
#
# in those situations it's better to halt the consumer as we're
# otherwise very likely to just DLQ everything anyway.
raise InvalidMessage(value.partition, value.offset)
# This timing is not tracked in case of an exception. This is desired
# because otherwise the ratio with other batch metrics is out of sync.
metrics.timing("spans.buffer.process_batch.decode", decode_time)
assert min_timestamp is not None
buffer.process_spans(spans, now=min_timestamp)
return min_timestamp
def validate_span_event(span_event: SpanEvent, segment_id: str | None) -> None:
"""
Checks whether the span is valid based on the ingest spans schema.
All spans that do not conform to the schema validation rules are discarded.
There are several other assertions to protect against downstream crashes, see also: INC-1453, INC-1458.
"""
if in_random_rollout("spans.process-segments.schema-validation"):
SPANS_CODEC.validate(span_event)
assert isinstance(span_event["trace_id"], str), "trace_id must be str"
assert isinstance(span_event["span_id"], str), "span_id must be str"
assert isinstance(span_event["start_timestamp"], (int, float)), "start_timestamp must be float"
assert isinstance(span_event["end_timestamp"], (int, float)), "end_timestamp must be float"
assert segment_id is None or isinstance(segment_id, str), "segment_id must be str or None"
| ProcessSpansStrategyFactory |
python | doocs__leetcode | solution/1500-1599/1557.Minimum Number of Vertices to Reach All Nodes/Solution.py | {
"start": 0,
"end": 197
} | class ____:
def findSmallestSetOfVertices(self, n: int, edges: List[List[int]]) -> List[int]:
cnt = Counter(t for _, t in edges)
return [i for i in range(n) if cnt[i] == 0]
| Solution |
python | pallets__werkzeug | src/werkzeug/exceptions.py | {
"start": 15158,
"end": 15457
} | class ____(HTTPException):
"""*414* `Request URI Too Large`
Like *413* but for too long URLs.
"""
code = 414
description = (
"The length of the requested URL exceeds the capacity limit for"
" this server. The request cannot be processed."
)
| RequestURITooLarge |
python | apache__airflow | airflow-core/src/airflow/utils/deprecation_tools.py | {
"start": 913,
"end": 8226
} | class ____(FutureWarning):
"""
Warning class for deprecated imports in Airflow.
This warning is raised when users import deprecated classes or functions
from Airflow modules that have been moved to better locations.
"""
...
def getattr_with_deprecation(
imports: dict[str, str],
module: str,
override_deprecated_classes: dict[str, str],
extra_message: str,
name: str,
):
"""
Retrieve the imported attribute from the redirected module and raises a deprecation warning.
:param imports: dict of imports and their redirection for the module
:param module: name of the module in the package to get the attribute from
:param override_deprecated_classes: override target attributes with deprecated ones. If target attribute is
found in the dictionary, it will be displayed in the warning message.
:param extra_message: extra message to display in the warning or import error message
:param name: attribute name
:return:
"""
target_class_full_name = imports.get(name)
# Handle wildcard pattern "*" - redirect all attributes to target module
# Skip Python special attributes (dunder attributes) as they shouldn't be redirected
if not target_class_full_name and "*" in imports and not (name.startswith("__") and name.endswith("__")):
target_class_full_name = f"{imports['*']}.{name}"
if not target_class_full_name:
raise AttributeError(f"The module `{module!r}` has no attribute `{name!r}`")
# Determine the warning class name (may be overridden)
warning_class_name = target_class_full_name
if override_deprecated_classes and name in override_deprecated_classes:
warning_class_name = override_deprecated_classes[name]
message = f"The `{module}.{name}` attribute is deprecated. Please use `{warning_class_name!r}`."
if extra_message:
message += f" {extra_message}."
warnings.warn(message, DeprecatedImportWarning, stacklevel=2)
# Import and return the target attribute
new_module, new_class_name = target_class_full_name.rsplit(".", 1)
try:
return getattr(importlib.import_module(new_module), new_class_name)
except ImportError as e:
error_message = (
f"Could not import `{new_module}.{new_class_name}` while trying to import `{module}.{name}`."
)
if extra_message:
error_message += f" {extra_message}."
raise ImportError(error_message) from e
def add_deprecated_classes(
module_imports: dict[str, dict[str, str]],
package: str,
override_deprecated_classes: dict[str, dict[str, str]] | None = None,
extra_message: str | None = None,
):
"""
Add deprecated attribute PEP-563 imports and warnings modules to the package.
Works for classes, functions, variables, and other module attributes.
Supports both creating virtual modules and modifying existing modules.
:param module_imports: imports to use. Format: dict[str, dict[str, str]]
- Keys are module names (creates virtual modules)
- Special key __name__ modifies the current module for direct attribute imports
- Can mix both approaches in a single call
:param package: package name (typically __name__)
:param override_deprecated_classes: override target attributes with deprecated ones.
Format: dict[str, dict[str, str]] matching the structure of module_imports
:param extra_message: extra message to display in the warning or import error message
Examples:
# Create virtual modules (e.g., for removed .py files)
add_deprecated_classes(
{"basenotifier": {"BaseNotifier": "airflow.sdk.bases.notifier.BaseNotifier"}},
package=__name__,
)
# Wildcard support - redirect all attributes to new module
add_deprecated_classes(
{"timezone": {"*": "airflow.sdk.timezone"}},
package=__name__,
)
# Current module direct imports
add_deprecated_classes(
{
__name__: {
"get_fs": "airflow.sdk.io.fs.get_fs",
"has_fs": "airflow.sdk.io.fs.has_fs",
}
},
package=__name__,
)
# Mixed behavior - both current module and submodule attributes
add_deprecated_classes(
{
__name__: {
"get_fs": "airflow.sdk.io.fs.get_fs",
"has_fs": "airflow.sdk.io.fs.has_fs",
"Properties": "airflow.sdk.io.typedef.Properties",
},
"typedef": {
"Properties": "airflow.sdk.io.typedef.Properties",
}
},
package=__name__,
)
The first example makes 'from airflow.notifications.basenotifier import BaseNotifier' work
even if 'basenotifier.py' was removed.
The second example makes 'from airflow.utils.timezone import utc' redirect to 'airflow.sdk.timezone.utc',
allowing any attribute from the deprecated module to be accessed from the new location.
The third example makes 'from airflow.io import get_fs' work with direct imports from the current module.
The fourth example handles both direct imports from the current module and submodule imports.
"""
# Handle both current module and virtual module deprecations
for module_name, imports in module_imports.items():
if module_name == package:
# Special case: modify the current module for direct attribute imports
if package not in sys.modules:
raise ValueError(f"Module {package} not found in sys.modules")
module = sys.modules[package]
# Create the __getattr__ function for current module
current_override = {}
if override_deprecated_classes and package in override_deprecated_classes:
current_override = override_deprecated_classes[package]
getattr_func = functools.partial(
getattr_with_deprecation,
imports,
package,
current_override,
extra_message or "",
)
# Set the __getattr__ function on the current module
setattr(module, "__getattr__", getattr_func)
else:
# Create virtual modules for submodule imports
full_module_name = f"{package}.{module_name}"
module_type = ModuleType(full_module_name)
if override_deprecated_classes and module_name in override_deprecated_classes:
override_deprecated_classes_for_module = override_deprecated_classes[module_name]
else:
override_deprecated_classes_for_module = {}
# Mypy is not able to derive the right function signature https://github.com/python/mypy/issues/2427
module_type.__getattr__ = functools.partial( # type: ignore[method-assign]
getattr_with_deprecation,
imports,
full_module_name,
override_deprecated_classes_for_module,
extra_message or "",
)
sys.modules.setdefault(full_module_name, module_type)
| DeprecatedImportWarning |
python | jazzband__django-simple-history | simple_history/registry_tests/migration_test_app/migrations/0004_history_date_indexing.py | {
"start": 108,
"end": 704
} | class ____(migrations.Migration):
dependencies = [
(
"migration_test_app",
"0003_alter_historicalmodelwithcustomattrforeignkey_options_and_more",
),
]
operations = [
migrations.AlterField(
model_name="historicalmodelwithcustomattrforeignkey",
name="history_date",
field=models.DateTimeField(db_index=True),
),
migrations.AlterField(
model_name="historicalyar",
name="history_date",
field=models.DateTimeField(db_index=True),
),
]
| Migration |
python | huggingface__transformers | tests/models/mobilenet_v2/test_modeling_mobilenet_v2.py | {
"start": 10190,
"end": 13105
} | class ____(unittest.TestCase):
@cached_property
def default_image_processor(self):
return (
MobileNetV2ImageProcessor.from_pretrained("google/mobilenet_v2_1.0_224") if is_vision_available() else None
)
@slow
def test_inference_image_classification_head(self):
model = MobileNetV2ForImageClassification.from_pretrained("google/mobilenet_v2_1.0_224").to(torch_device)
image_processor = self.default_image_processor
image = prepare_img()
inputs = image_processor(images=image, return_tensors="pt").to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(**inputs)
# verify the logits
expected_shape = torch.Size((1, 1001))
self.assertEqual(outputs.logits.shape, expected_shape)
expectations = Expectations(
{
(None, None): [0.2445, -1.1993, 0.1905],
("cuda", 8): [0.2445, -1.1993, 0.1905],
}
)
expected_slice = torch.tensor(expectations.get_expectation()).to(torch_device)
torch.testing.assert_close(outputs.logits[0, :3], expected_slice, rtol=2e-4, atol=2e-4)
@slow
def test_inference_semantic_segmentation(self):
model = MobileNetV2ForSemanticSegmentation.from_pretrained("google/deeplabv3_mobilenet_v2_1.0_513")
model = model.to(torch_device)
image_processor = MobileNetV2ImageProcessor.from_pretrained("google/deeplabv3_mobilenet_v2_1.0_513")
image = prepare_img()
inputs = image_processor(images=image, return_tensors="pt").to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(**inputs)
logits = outputs.logits
# verify the logits
expected_shape = torch.Size((1, 21, 65, 65))
self.assertEqual(logits.shape, expected_shape)
expectations = Expectations(
{
(None, None): [
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
],
("cuda", 8): [
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1595, -2.0977, -2.3742], [-2.4226, -2.3028, -2.6836], [-2.7820, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9645, 4.8734]],
],
}
)
expected_slice = torch.tensor(expectations.get_expectation()).to(torch_device)
torch.testing.assert_close(logits[0, :3, :3, :3], expected_slice, rtol=2e-4, atol=2e-4)
| MobileNetV2ModelIntegrationTest |
python | ApeWorX__ape | src/ape/logging.py | {
"start": 3676,
"end": 11046
} | class ____:
_mentioned_verbosity_option = False
_extra_loggers: dict[str, logging.Logger] = {}
DISABLE_LEVEL: int = 100_000
def __init__(
self,
_logger: logging.Logger,
fmt: str,
):
self.error = _logger.error
self.warning = _logger.warning
self.success = getattr(_logger, "success", _logger.info)
self.info = _logger.info
self.debug = _logger.debug
self._logger = _logger
self._did_parse_sys_argv = False
self._load_from_sys_argv()
self.fmt = fmt
@classmethod
def create(cls, fmt: Optional[str] = None) -> "ApeLogger":
fmt = fmt or DEFAULT_LOG_FORMAT
_logger = get_logger("ape", fmt=fmt)
return cls(_logger, fmt)
def format(self, fmt: Optional[str] = None):
self.fmt = fmt or DEFAULT_LOG_FORMAT
fmt = fmt or DEFAULT_LOG_FORMAT
_format_logger(self._logger, fmt)
def _load_from_sys_argv(self, default: Optional[Union[str, int, LogLevel]] = None):
"""
Load from sys.argv to beat race condition with `click`.
"""
if self._did_parse_sys_argv:
# Already parsed.
return
log_level = _get_level(level=default)
level_names = [lvl.name for lvl in LogLevel]
# Minus 2 because if `-v` is the last arg, it is not our verbosity `-v`.
num_args = len(sys.argv) - 2
for arg_i in range(1, 1 + num_args):
if sys.argv[arg_i] == "-v" or sys.argv[arg_i] == "--verbosity":
try:
level = _get_level(sys.argv[arg_i + 1].upper())
except Exception:
# Let it fail in a better spot, or is not our level.
continue
if level in level_names:
self._sys_argv = level
log_level = level
break
else:
# Not our level.
continue
self.set_level(log_level)
self._did_parse_sys_argv = True
@property
def level(self) -> int:
return self._logger.level
def set_level(self, level: Union[str, int, LogLevel]):
"""
Change the global ape logger log-level.
Args:
level (str): The name of the level or the value of the log-level.
"""
if level == self._logger.level:
return
elif isinstance(level, LogLevel):
level = level.value
elif isinstance(level, str) and level.lower().startswith("loglevel."):
# Seen in some environments.
level = level.split(".")[-1].strip()
self._logger.setLevel(level)
for _logger in self._extra_loggers.values():
_logger.setLevel(level)
@contextmanager
def at_level(self, level: Union[str, int, LogLevel]) -> Iterator:
"""
Change the log-level in a context.
Args:
level (Union[str, int, LogLevel]): The level to use.
Returns:
Iterator
"""
initial_level = self.level
self.set_level(level)
yield
self.set_level(initial_level)
def disable(self):
self.set_level(self.DISABLE_LEVEL)
@contextmanager
def disabled(self):
with self.at_level(self.DISABLE_LEVEL):
yield
def log_error(self, err: Exception):
"""
Avoids logging empty messages.
"""
message = str(err)
if message:
self._logger.error(message)
def warn_from_exception(self, err: Exception, message: str):
"""
Warn the user with the given message,
log the stack-trace of the error at the DEBUG level, and
mention how to enable DEBUG logging (only once).
"""
message = self._create_message_from_error(err, message)
self._logger.warning(message)
self.log_debug_stack_trace()
def error_from_exception(self, err: Exception, message: str):
"""
Log an error to the user with the given message,
log the stack-trace of the error at the DEBUG level, and
mention how to enable DEBUG logging (only once).
"""
message = self._create_message_from_error(err, message)
self._logger.error(message)
self.log_debug_stack_trace()
def _create_message_from_error(self, err: Exception, message: str):
err_type_name = getattr(type(err), "__name__", "Exception")
err_output = f"{err_type_name}: {err}"
message = f"{message}\n\t{err_output}"
if not self._mentioned_verbosity_option:
message += "\n\t(Use `--verbosity DEBUG` to see full stack-trace)"
self._mentioned_verbosity_option = True
return message
def log_debug_stack_trace(self):
stack_trace = traceback.format_exc()
self._logger.debug(stack_trace)
def create_logger(
self, new_name: str, handlers: Optional[Sequence[Callable[[str], str]]] = None
) -> logging.Logger:
_logger = get_logger(new_name, fmt=self.fmt, handlers=handlers)
_logger.setLevel(self.level)
self._extra_loggers[new_name] = _logger
return _logger
def _format_logger(
_logger: logging.Logger, fmt: str, handlers: Optional[Sequence[Callable[[str], str]]] = None
):
handler = ClickHandler(echo_kwargs=CLICK_ECHO_KWARGS, handlers=handlers)
formatter = ApeColorFormatter(fmt=fmt)
handler.setFormatter(formatter)
# Remove existing handler(s)
for existing_handler in _logger.handlers[:]:
if isinstance(existing_handler, ClickHandler):
_logger.removeHandler(existing_handler)
_logger.addHandler(handler)
def get_logger(
name: str,
fmt: Optional[str] = None,
handlers: Optional[Sequence[Callable[[str], str]]] = None,
) -> logging.Logger:
"""
Get a logger with the given ``name`` and configure it for usage with Ape.
Args:
name (str): The name of the logger.
fmt (Optional[str]): The format of the logger. Defaults to the Ape
logger's default format: ``"%(levelname)s%(plugin)s: %(message)s"``.
handlers (Optional[Sequence[Callable[[str], str]]]): Additional log message handlers.
Returns:
``logging.Logger``
"""
_logger = logging.getLogger(name)
_format_logger(_logger, fmt=fmt or DEFAULT_LOG_FORMAT, handlers=handlers)
return _logger
def _get_level(level: Optional[Union[str, int, LogLevel]] = None) -> str:
if level is None:
return DEFAULT_LOG_LEVEL
elif isinstance(level, LogLevel):
return level.name
elif isinstance(level, int) or (isinstance(level, str) and level.isnumeric()):
return LogLevel(int(level)).name
elif isinstance(level, str) and level.lower().startswith("loglevel."):
# Handle 'LogLevel.' prefix.
return level.split(".")[-1].strip()
return level
def sanitize_url(url: str) -> str:
"""Removes sensitive information from given URL"""
parsed = urlparse(url)
new_netloc = parsed.hostname or ""
if parsed.port:
new_netloc += f":{parsed.port}"
new_url = urlunparse(parsed._replace(netloc=new_netloc, path=""))
return f"{new_url}/{HIDDEN_MESSAGE}" if parsed.path else new_url
logger = ApeLogger.create()
| ApeLogger |
python | getsentry__sentry | src/sentry/services/filestore/s3.py | {
"start": 3882,
"end": 9060
} | class ____(File):
"""
The default file object used by the S3Boto3Storage backend.
This file implements file streaming using boto's multipart
uploading functionality. The file can be opened in read or
write mode.
This class extends Django's File class. However, the contained
data is only the data contained in the current buffer. So you
should not access the contained file object directly. You should
access the data via this class.
Warning: This file *must* be closed using the close() method in
order to properly write the file to S3. Be sure to close the file
in your application.
"""
# TODO: Read/Write (rw) mode may be a bit undefined at the moment. Needs testing.
# TODO: When Django drops support for Python 2.5, rewrite to use the
# BufferedIO streams in the Python 2.6 io module.
buffer_size = 5242880
def __init__(self, name, mode, storage, buffer_size=None):
self._storage = storage
self.name = name[len(self._storage.location) :].lstrip("/")
self._mode = mode
self.obj = storage.bucket.Object(storage._encode_name(name))
# NOTE(mattrobenolt): This is an explicit deviation from
# django-storages. This adds an extra HEAD request before
# every GET. This effectively doubles the time it takes for
# every chunk in our filestore. We explicitly are opting
# out of this behavior to avoid this overhead.
#
# if 'w' not in mode:
# # Force early RAII-style exception if object does not exist
# self.obj.load()
self._is_dirty = False
self._file = None
self._multipart = None
# 5 MB is the minimum part size (if there is more than one part).
# Amazon allows up to 10,000 parts. The default supports uploads
# up to roughly 50 GB. Increase the part size to accommodate
# for files larger than this.
if buffer_size is not None:
self.buffer_size = buffer_size
self._write_counter = 0
@property
def size(self):
return self.obj.content_length
@property
def file(self):
if self._file is None:
with metrics.timer("filestore.read", instance="s3"):
self._file = BytesIO()
if "r" in self._mode:
self._is_dirty = False
self._file.write(self.obj.get()["Body"].read())
self._file.seek(0)
if self._storage.gzip and self.obj.content_encoding == "gzip":
self._file = GzipFile(mode=self._mode, fileobj=self._file, mtime=0.0)
return self._file
@file.setter
def file(self, value):
self._file = value
def read(self, *args, **kwargs):
if "r" not in self._mode:
raise AttributeError("File was not opened in read mode.")
return super().read(*args, **kwargs)
def write(self, content):
if "w" not in self._mode:
raise AttributeError("File was not opened in write mode.")
self._is_dirty = True
if self._multipart is None:
parameters = self._storage.object_parameters.copy()
parameters["ACL"] = self._storage.default_acl
parameters["ContentType"] = (
mimetypes.guess_type(self.obj.key)[0] or self._storage.default_content_type
)
if self._storage.reduced_redundancy:
parameters["StorageClass"] = "REDUCED_REDUNDANCY"
if self._storage.encryption:
parameters["ServerSideEncryption"] = "AES256"
self._multipart = self.obj.initiate_multipart_upload(**parameters)
if self.buffer_size <= self._buffer_file_size:
self._flush_write_buffer()
return super().write(force_bytes(content))
@property
def _buffer_file_size(self):
pos = self.file.tell()
self.file.seek(0, os.SEEK_END)
length = self.file.tell()
self.file.seek(pos)
return length
def _flush_write_buffer(self):
"""
Flushes the write buffer.
"""
if self._buffer_file_size:
self._write_counter += 1
self.file.seek(0)
assert self._multipart is not None
part = self._multipart.Part(self._write_counter)
part.upload(Body=self.file.read())
def close(self):
if self._is_dirty:
self._flush_write_buffer()
assert self._multipart is not None
# TODO: Possibly cache the part ids as they're being uploaded
# instead of requesting parts from server. For now, emulating
# s3boto's behavior.
parts = [
{"ETag": part.e_tag, "PartNumber": part.part_number}
for part in self._multipart.parts.all()
]
self._multipart.complete(MultipartUpload={"Parts": parts})
else:
if self._multipart is not None:
self._multipart.abort()
if self._file is not None:
self._file.close()
self._file = None
| S3Boto3StorageFile |
python | spyder-ide__spyder | spyder/plugins/run/tests/test_run.py | {
"start": 9133,
"end": 11736
} | class ____(RunExecutor):
sig_run_invocation = Signal(tuple)
def __init__(self, parent, info, executor_name):
self.executor_configuration = []
self.handlers = {}
self.actions = {}
self.executor_name = executor_name
self.handlers['all'] = self.bind_execution_method('all')
for ext, context, prio, default_conf, req_cwd, handler, btn in info:
ConfWidget = ExampleRunExecutorConfFactory(default_conf)
context_id = RunContext[context]
self.executor_configuration.append({
'input_extension': ext,
'context': {
'name': context
},
'output_formats': [],
'configuration_widget': ConfWidget,
'requires_cwd': req_cwd,
'priority': prio
})
if handler == 'context':
self.handlers[context] = self.bind_execution_method(
f'context_{context_id}', context=context_id)
elif handler == 'ext':
self.handlers[ext] = self.bind_execution_method(
f'ext_{ext}', ext=ext)
elif handler == 'both':
self.handlers[(ext, context_id)] = self.bind_execution_method(
f'{ext}_{context_id}', ext=ext, context=context_id)
if btn:
self.actions[context_id] = None
super().__init__(parent)
def bind_execution_method(self, handler_name, ext=None, context=None):
func = gen_executor_handler(
self.executor_name, handler_name, ext, context)
meth = MethodType(func, self)
setattr(self, f'exec_{handler_name}', meth)
return meth
def on_run_available(self, run):
run.register_executor_configuration(self, self.executor_configuration)
for context_id in list(self.actions):
act = run.create_run_in_executor_button(
context_id,
self.NAME,
text=f'Run {context_id} in {self.NAME}',
tip=None,
icon=None,
shortcut_context=None,
register_shortcut=False,
add_to_menu=False
)
self.actions[context_id] = act
def on_run_teardown(self, run):
run.deregister_executor_configuration(self, self.executor_configuration)
for context_id in list(self.actions):
run.destroy_run_in_executor_button(context_id, self.NAME)
self.actions.pop(context_id)
| ExampleRunExecutorWrapper |
python | pytorch__pytorch | test/export/test_passes.py | {
"start": 12684,
"end": 58777
} | class ____(TestCase):
def setUp(self):
super().setUp()
self.MIXED_AUTOCAST_SET_GRAD_TESTS = _with_mixed_autocast_set_grad_tests()
self.SEQUENTIAL_SPLIT_INLINE_TESTS = _sequential_split_inline_tests()
self.SET_GRAD_ENABLED_TESTS = _set_grad_enabled_tests()
self.WITH_AUTOCAST_TESTS = _with_autocast_tests()
init_torchbind_implementations()
def tearDown(self):
self.SEQUENTIAL_SPLIT_INLINE_TESTS.clear()
self.SET_GRAD_ENABLED_TESTS.clear()
self.WITH_AUTOCAST_TESTS.clear()
self.MIXED_AUTOCAST_SET_GRAD_TESTS.clear()
super().tearDown()
def _check_node_users_in_the_same_graph(self, gm):
for node in gm.graph.nodes:
for user in node.users:
self.assertTrue(user.graph is gm.graph)
def test_runtime_assert_one_dim(self) -> None:
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, x):
return x.cos()
x = torch.zeros(2, 2, 3)
dim1_x = torch.export.Dim("dim1_x", min=2, max=6)
ep = torch.export.export(
M(), (x,), dynamic_shapes={"x": {1: dim1_x}}, strict=True
)
with self.assertRaisesRegex(
AssertionError,
escape("Guard failed: x.size()[1] <= 6"),
):
# expected <= 6, but got 7
ep.module()(torch.zeros(2, 7, 3))
self.assertEqual(
ep.module()(torch.ones(2, 4, 3)), M().forward(torch.ones(2, 4, 3))
)
def test_runtime_assert_multiple_dims(self) -> None:
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, x, y):
return x.cos().sum() + y.sin().sum()
x = torch.zeros(4, 2, 3)
y = torch.zeros(5, 5, 5)
dim1_x = torch.export.Dim("dim1_x", min=2, max=6)
dim0_x, dim0_y = torch.export.dims("dim0_x", "dim0_y", min=3)
ep = torch.export.export(
M(),
(x, y),
dynamic_shapes={"x": {0: dim0_x, 1: dim1_x}, "y": {0: dim0_y}},
strict=True,
)
with self.assertRaisesRegex(
AssertionError,
escape("Guard failed: x.size()[1] <= 6"),
):
# expected <= 6, but got 7
ep.module()(torch.zeros(4, 7, 3), torch.ones(5, 5, 5))
with self.assertRaisesRegex(
AssertionError,
escape("Guard failed: y.size()[0] >= 3"),
):
# expected >= 3, but got 2
ep.module()(torch.zeros(4, 2, 3), torch.ones(2, 5, 5))
def test_runtime_assert_some_dims_not_specified(self) -> None:
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, x, y):
return x.cos().sum() + y.sin().sum()
x = torch.zeros(4, 2, 3)
y = torch.zeros(5, 5, 5)
dim1_x = torch.export.Dim("dim1_x", min=2, max=6)
dim0_x = torch.export.Dim("dim0_x", min=3)
ep = torch.export.export(
M(),
(x, y),
dynamic_shapes={"x": {0: dim0_x, 1: dim1_x}, "y": None},
strict=True,
)
with self.assertRaisesRegex(
AssertionError,
escape("Guard failed: x.size()[1] <= 6"),
):
# expected <= 6, but got 7
ep.module()(torch.zeros(4, 7, 3), torch.ones(5, 5, 5))
# y is specialized to 5
with self.assertRaisesRegex(
AssertionError,
escape("Guard failed: y.size()[0] == 5"),
):
# expected 5, but got 2
ep.module()(torch.zeros(4, 2, 3), torch.ones(2, 5, 5))
# Since we didn't insert the constraint for x[1] >= 2, it should work for case where x[1] == 1
gm_result_for_1_size = ep.module()(torch.ones(3, 1, 3), torch.ones(5, 5, 5))
eager_result_for_1_size = M().forward(torch.ones(3, 1, 3), torch.ones(5, 5, 5))
self.assertEqual(gm_result_for_1_size, eager_result_for_1_size)
def test_runtime_assert_some_inps_not_used(self) -> None:
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, x, y):
return y.cos().sum()
x = torch.zeros(4, 2, 3)
y = torch.zeros(5, 5, 5)
dim1_y = torch.export.Dim("dim1_y", min=3, max=6)
ep = torch.export.export(
M(), (x, y), dynamic_shapes={"x": None, "y": {1: dim1_y}}, strict=True
)
with self.assertRaisesRegex(
AssertionError,
escape("Guard failed: x.size()[1] == 2"),
):
# expected 2, but got 7
ep.module()(torch.zeros(4, 7, 3), torch.ones(5, 5, 5))
# y is specialized to 5
with self.assertRaisesRegex(
AssertionError,
escape("Guard failed: y.size()[0] == 5"),
):
# expected 5, but got 2
ep.module()(torch.zeros(4, 2, 3), torch.ones(2, 5, 5))
# Since we didn't insert the constraint for x[1] >= 2, it should work for case where x[1] == 1
gm_result_for_1_size = ep.module()(torch.zeros(4, 2, 3), torch.ones(5, 5, 5))
eager_result_for_1_size = M().forward(torch.zeros(4, 2, 3), torch.ones(5, 5, 5))
self.assertEqual(gm_result_for_1_size, eager_result_for_1_size)
def test_view_to_view_copy(self) -> None:
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, x):
z = x.view(x.shape)
return z.cos().sum()
x = torch.zeros(4, 2, 3)
ep = export(M(), (x,), strict=True)
self.assertEqual(count_call_function(ep.graph, torch.ops.aten.view.default), 1)
ep = ep._transform_do_not_use(ReplaceViewOpsWithViewCopyOpsPass())
self.assertEqual(count_call_function(ep.graph, torch.ops.aten.view.default), 0)
def test_functionalization_with_view_copy(self) -> None:
class Module(torch.nn.Module):
def forward(self, x):
y = x + 4
y.add_(4)
z = y.view(y.shape)
return x.cos() + z.cos()
x = torch.zeros(4, 2, 3)
foo = Module()
ep = export(foo, (x,), strict=True)._transform_do_not_use(
ReplaceViewOpsWithViewCopyOpsPass()
)
# After this pass, there shouldn't be any view nodes in the graph
self.assertTrue(count_call_function(ep.graph, torch.ops.aten.view.default) == 0)
self.assertTrue(
count_call_function(ep.graph, torch.ops.aten.view_copy.default) > 0
)
def test_views_op_having_view_copy(self) -> None:
schemas = torch._C._dispatch_get_registrations_for_dispatch_key("")
aten_schemas = [s[6:] for s in schemas if s.startswith("aten::")]
for aten_schema in aten_schemas:
val = aten_schema.split(".")
assert len(val) <= 2
name = ""
overload = ""
if len(val) == 1:
name = val[0]
overload = "default"
else:
name, overload = val[0], val[1]
op_overload = getattr(getattr(torch.ops.aten, name), overload)
if torch.Tag.core in op_overload.tags and is_view_op(op_overload._schema):
self.assertIsNotNone(get_view_copy_of_view_op(op_overload._schema))
def test_custom_obj_tuple_out(self):
class MyModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.attr = torch.classes._TorchScriptTesting._Foo(10, 20)
def forward(self, x):
a = torch.ops._TorchScriptTesting.takes_foo_tuple_return(self.attr, x)
y = a[0] + a[1]
b = torch.ops._TorchScriptTesting.takes_foo(self.attr, y)
return b
m = MyModule()
inputs = (torch.ones(2, 3),)
ep = torch.export.export(m, inputs, strict=False)
inp = torch.randn(2, 3)
orig_res = m(inp)
ep_res = ep.module()(inp)
without_token_ep = _remove_effect_tokens(ep)
without_token_ep.verifier().check(without_token_ep)
without_token_res = without_token_ep.module()(inp)
self.assertTrue(torch.allclose(orig_res, ep_res))
self.assertTrue(torch.allclose(orig_res, without_token_res))
def test_remove_effect_token_kwargs(self):
class MyModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.attr = torch.classes._TorchScriptTesting._Foo(10, 20)
def forward(self, x):
a = torch.ops._TorchScriptTesting.takes_foo_tuple_return(
foo=self.attr, x=x
)
y = a[0] + a[1]
b = torch.ops._TorchScriptTesting.takes_foo(foo=self.attr, x=y)
return b
m = MyModule()
inputs = (torch.ones(2, 3),)
ep = export(m, inputs, strict=False).run_decompositions({})
without_token_ep = _remove_effect_tokens(ep)
self.assertExpectedInline(
without_token_ep.graph_module.code.strip(),
"""\
def forward(self, obj_attr, x):
takes_foo_tuple_return_default = torch.ops._TorchScriptTesting.takes_foo_tuple_return.default(foo = obj_attr, x = x); x = None
getitem_1 = takes_foo_tuple_return_default[0]
getitem_2 = takes_foo_tuple_return_default[1]; takes_foo_tuple_return_default = None
add = torch.ops.aten.add.Tensor(getitem_1, getitem_2); getitem_1 = getitem_2 = None
takes_foo_default = torch.ops._TorchScriptTesting.takes_foo.default(foo = obj_attr, x = add); obj_attr = add = None
return (takes_foo_default,)""", # noqa: B950
)
def test_fakify_script_objects(self):
for m in [
ModelsWithScriptObjectAttr.Simple(),
ModelsWithScriptObjectAttr.SimpleWithAttrInContainer(),
ModelsWithScriptObjectAttr.NestedWithAttrInContainer(),
ModelsWithScriptObjectAttr.MoreNestedWithAttrInContainer(),
]:
constant_attrs = _gather_constant_attrs(m)
fake_mode = FakeTensorMode(
shape_env=ShapeEnv(tracked_fakes=[]),
allow_non_fake_inputs=True,
)
with _fakify_script_objects(m, (), {}, fake_mode) as (
_,
_,
_,
fake_constant_attrs,
fake_to_real,
):
self.assertEqual(len(fake_constant_attrs), len(constant_attrs))
for fake_obj, fqn in fake_constant_attrs.items():
self.assertEqual(constant_attrs[fake_to_real[fake_obj]], fqn)
# TODO: _gather_constants doesn't recursively look into the pytree containers.
@unittest.expectedFailure
def test_fakify_script_objects_properly_handle_containers(self):
m = ModelsWithScriptObjectAttr.SimpleWithAttrInContainer()
fake_mode = FakeTensorMode(
shape_env=ShapeEnv(tracked_fakes=[]),
allow_non_fake_inputs=True,
)
with _fakify_script_objects(m, (), {}, fake_mode) as (
_,
_,
_,
fake_constant_attrs,
_,
):
self.assertTrue("attr" in fake_constant_attrs.values())
self.assertTrue("pytree_attr2" in fake_constant_attrs.values())
def test_runtime_assert_inline_constraints_for_item(self) -> None:
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, x):
b = x.item()
torch._check(b >= 2)
torch._check(b <= 5)
return b
x = torch.tensor([2])
mod = M()
ep = export(mod, (x,), strict=True)
with self.assertRaisesRegex(
RuntimeError, r"Runtime assertion failed for expression u[\d+] \<\= 5"
):
ep.module()(torch.tensor([6]))
new_inp = torch.tensor([5])
self.assertEqual(mod(new_inp), ep.module()(new_inp))
def test_runtime_assert_inline_constraints_for_nonzero(self) -> None:
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, x):
b = x.nonzero()
torch._check(b.shape[0] >= 3)
torch._check(b.shape[0] <= 5)
return b
x = torch.tensor([2, 1, 2, 3, 5, 0])
mod = M()
dim0_x = torch.export.Dim("dim0_x")
ep = torch.export.export(
mod, (x,), dynamic_shapes={"x": {0: dim0_x}}, strict=True
)
num_assert = count_call_function(
ep.graph, torch.ops.aten._assert_scalar.default
)
self.assertEqual(num_assert, 2)
num_constrain_range = count_call_function(
ep.graph, torch.ops.aten.sym_constrain_range.default
)
self.assertEqual(num_constrain_range, 0)
with self.assertRaisesRegex(
RuntimeError,
r"Runtime assertion failed for expression u[\d+] \>\= 3",
):
ep.module()(torch.tensor([1, 1, 0, 0, 0]))
with self.assertRaisesRegex(
RuntimeError,
r"Runtime assertion failed for expression u[\d+] \<\= 5",
):
ep.module()(torch.ones(6))
new_inp = torch.tensor([1, 1, 1, 1])
self.assertEqual(mod(new_inp), ep.module()(new_inp))
@unittest.skipIf(IS_WINDOWS, "Windows not supported")
@unittest.expectedFailure
# TODO(pianpwk): add back runtime asserts to subgraphs
def test_runtime_assert_inline_constraints_for_cond(self) -> None:
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, pred, x, y):
def true_fn(x, y):
b = x.item()
torch._check(b >= 2)
torch._check(b <= 5)
return x - b
def false_fn(x, y):
c = y.item()
torch._check(c >= 2)
torch._check(c <= 5)
return y - c
ret = cond(pred, true_fn, false_fn, [x, y])
return ret
x = torch.tensor([2])
y = torch.tensor([5])
mod = M()
ep = export(mod, (torch.tensor(True), x, y), strict=True)
with self.assertRaisesRegex(
RuntimeError, "is outside of inline constraint \\[2, 5\\]."
):
ep.module()(torch.tensor(False), torch.tensor([6]), torch.tensor([6]))
def test_math_ops(self):
class Module(torch.nn.Module):
def forward(self, x):
return (
torch.tensor([math.ceil(x.item())]),
torch.tensor([math.floor(x.item())]),
)
func = Module()
x = torch.randn(1, dtype=torch.float32)
ep = torch.export.export(func, args=(x,), strict=True)
_ExportPassBaseDeprecatedDoNotUse()(ep.graph_module)
def test_predispatch_set_grad(self):
mod_orig, mod, args = self.SET_GRAD_ENABLED_TESTS["op"]
self._check_node_users_in_the_same_graph(mod)
self.assertEqual(mod_orig(*args), mod(*args))
self.assertExpectedInline(
mod.code.strip("\n"),
"""\
def forward(self, x):
x, = fx_pytree.tree_flatten_spec(([x], {}), self._in_spec)
_guards_fn = self._guards_fn(x); _guards_fn = None
add = torch.ops.aten.add.Tensor(x, 1); x = None
sin = torch.ops.aten.sin.default(add); add = None
sum_1 = torch.ops.aten.sum.default(sin); sin = None
submod_4 = self.submod_2
add_1 = torch.ops.higher_order.wrap_with_set_grad_enabled(False, submod_4, sum_1); submod_4 = sum_1 = None
getitem = add_1[0]; add_1 = None
sub = torch.ops.aten.sub.Tensor(getitem, 1)
return pytree.tree_unflatten((getitem, sub), self._out_spec)
""",
)
mod_orig, mod, args = self.SET_GRAD_ENABLED_TESTS["op_under_no_grad"]
self._check_node_users_in_the_same_graph(mod)
self.assertEqual(mod_orig(*args), mod(*args))
self.assertExpectedInline(
mod.code.strip("\n"),
"""\
def forward(self, x):
x, = fx_pytree.tree_flatten_spec(([x], {}), self._in_spec)
_guards_fn = self._guards_fn(x); _guards_fn = None
add = torch.ops.aten.add.Tensor(x, 1); x = None
submod_4 = self.submod_1
sum_1 = torch.ops.higher_order.wrap_with_set_grad_enabled(True, submod_4, add); submod_4 = add = None
getitem = sum_1[0]; sum_1 = None
add_1 = torch.ops.aten.add.Tensor(getitem, 1); getitem = None
submod_5 = self.submod_3
sub = torch.ops.higher_order.wrap_with_set_grad_enabled(True, submod_5, add_1); submod_5 = None
getitem_1 = sub[0]; sub = None
return pytree.tree_unflatten((add_1, getitem_1), self._out_spec)
""",
)
mod_orig, mod, args = self.SET_GRAD_ENABLED_TESTS["ctx_manager"]
self._check_node_users_in_the_same_graph(mod)
self.assertEqual(mod_orig(*args), mod(*args))
self.assertExpectedInline(
mod.code.strip("\n"),
"""\
def forward(self, x):
x, = fx_pytree.tree_flatten_spec(([x], {}), self._in_spec)
_guards_fn = self._guards_fn(x); _guards_fn = None
add = torch.ops.aten.add.Tensor(x, 1); x = None
sin = torch.ops.aten.sin.default(add); add = None
sum_1 = torch.ops.aten.sum.default(sin); sin = None
submod_3 = self.submod_1
add_1 = torch.ops.higher_order.wrap_with_set_grad_enabled(False, submod_3, sum_1); submod_3 = sum_1 = None
getitem = add_1[0]; add_1 = None
sub = torch.ops.aten.sub.Tensor(getitem, 1)
return pytree.tree_unflatten((getitem, sub), self._out_spec)
""",
)
mod_orig, mod, args = self.SET_GRAD_ENABLED_TESTS["ctx_manager_under_no_grad"]
self._check_node_users_in_the_same_graph(mod)
self.assertEqual(mod_orig(*args), mod(*args))
self.assertExpectedInline(
mod.code.strip("\n"),
"""\
def forward(self, x):
x, = fx_pytree.tree_flatten_spec(([x], {}), self._in_spec)
_guards_fn = self._guards_fn(x); _guards_fn = None
add = torch.ops.aten.add.Tensor(x, 1); x = None
submod_5 = self.submod_1
sum_1 = torch.ops.higher_order.wrap_with_set_grad_enabled(True, submod_5, add); submod_5 = add = None
getitem = sum_1[0]; sum_1 = None
add_1 = torch.ops.aten.add.Tensor(getitem, 1); getitem = None
submod_6 = self.submod_3
sub = torch.ops.higher_order.wrap_with_set_grad_enabled(True, submod_6, add_1); submod_6 = None
getitem_1 = sub[0]; sub = None
return pytree.tree_unflatten((add_1, getitem_1), self._out_spec)
""",
)
mod_orig, mod, args = self.SET_GRAD_ENABLED_TESTS["ctx_manager_multi_dep"]
self._check_node_users_in_the_same_graph(mod)
self.assertEqual(mod_orig(*args), mod(*args))
self.assertExpectedInline(
mod.code.strip("\n"),
"""\
def forward(self, x):
x, = fx_pytree.tree_flatten_spec(([x], {}), self._in_spec)
_guards_fn = self._guards_fn(x); _guards_fn = None
add = torch.ops.aten.add.Tensor(x, 1); x = None
sin = torch.ops.aten.sin.default(add)
sum_1 = torch.ops.aten.sum.default(sin); sin = None
cos = torch.ops.aten.cos.default(add); add = None
sum_2 = torch.ops.aten.sum.default(cos); cos = None
submod_3 = self.submod_1
wrap_with_set_grad_enabled = torch.ops.higher_order.wrap_with_set_grad_enabled(False, submod_3, sum_1, sum_2); submod_3 = sum_1 = sum_2 = None
add_1 = wrap_with_set_grad_enabled[0]
add_2 = wrap_with_set_grad_enabled[1]; wrap_with_set_grad_enabled = None
sub = torch.ops.aten.sub.Tensor(add_1, 1)
sub_1 = torch.ops.aten.sub.Tensor(add_2, 1)
return pytree.tree_unflatten((add_1, add_2, sub, sub_1), self._out_spec)
""", # noqa: B950
)
mod_orig, mod, args = self.SET_GRAD_ENABLED_TESTS[
"ctx_manager_multi_dep_no_grad"
]
self._check_node_users_in_the_same_graph(mod)
self.assertEqual(mod_orig(*args), mod(*args))
self.assertExpectedInline(
mod.code.strip("\n"),
"""\
def forward(self, x):
x, = fx_pytree.tree_flatten_spec(([x], {}), self._in_spec)
_guards_fn = self._guards_fn(x); _guards_fn = None
add = torch.ops.aten.add.Tensor(x, 1); x = None
submod_5 = self.submod_1
wrap_with_set_grad_enabled = torch.ops.higher_order.wrap_with_set_grad_enabled(True, submod_5, add); submod_5 = add = None
sum_1 = wrap_with_set_grad_enabled[0]
sum_2 = wrap_with_set_grad_enabled[1]; wrap_with_set_grad_enabled = None
add_1 = torch.ops.aten.add.Tensor(sum_1, 1); sum_1 = None
add_2 = torch.ops.aten.add.Tensor(sum_2, 1); sum_2 = None
submod_6 = self.submod_3
wrap_with_set_grad_enabled_1 = torch.ops.higher_order.wrap_with_set_grad_enabled(True, submod_6, add_1, add_2); submod_6 = None
sub = wrap_with_set_grad_enabled_1[0]
sub_1 = wrap_with_set_grad_enabled_1[1]; wrap_with_set_grad_enabled_1 = None
return pytree.tree_unflatten((add_1, add_2, sub, sub_1), self._out_spec)
""", # noqa: B950
)
def test_sequential_split(self):
for gm, args in self.SEQUENTIAL_SPLIT_INLINE_TESTS.values():
set_grad_counts = nodes_count(gm.graph.nodes, _is_set_grad_enabled_node)
new_gm = sequential_split(gm, _is_set_grad_enabled_node)
new_set_grad_counts = nodes_count(
new_gm.graph.nodes, _is_set_grad_enabled_sub_mod
)
self.assertEqual(set_grad_counts, new_set_grad_counts)
self.assertEqual(gm(*args), new_gm(*args))
def test_sequential_split_graph(self):
gm, args = self.SEQUENTIAL_SPLIT_INLINE_TESTS["multi_dep_step2"]
new_gm = sequential_split(gm, _is_set_grad_enabled_node)
self.assertEqual(gm(*args), new_gm(*args))
self.assertExpectedInline(
new_gm.code.strip("\n"),
"""\
def forward(self, x1, x2):
x1, x2, = fx_pytree.tree_flatten_spec(([x1, x2], {}), self._in_spec)
submod_0 = self.submod_0(x1, x2); submod_0 = None
submod_1 = self.submod_1(x1, x2); x1 = x2 = None
getitem = submod_1[0]
getitem_1 = submod_1[1]; submod_1 = None
submod_2 = self.submod_2(getitem, getitem_1); getitem = getitem_1 = None
getitem_2 = submod_2[0]
getitem_3 = submod_2[1]; submod_2 = None
submod_3 = self.submod_3(getitem_2, getitem_3); getitem_2 = getitem_3 = None
getitem_4 = submod_3[0]
getitem_5 = submod_3[1]; submod_3 = None
submod_4 = self.submod_4(getitem_4, getitem_5)
getitem_6 = submod_4[0]
getitem_7 = submod_4[1]; submod_4 = None
return pytree.tree_unflatten((getitem_4, getitem_5, getitem_6, getitem_7), self._out_spec)
""",
)
self.assertExpectedInline(
new_gm.submod_1.code.strip("\n"),
"""\
def forward(self, x1, x2):
_set_grad_enabled = torch._C._set_grad_enabled(True); _set_grad_enabled = None
add = torch.ops.aten.add.Tensor(x1, 1); x1 = None
add_1 = torch.ops.aten.add.Tensor(x2, 1); x2 = None
return (add, add_1)
""",
)
self.assertExpectedInline(
new_gm.submod_2.code.strip("\n"),
"""\
def forward(self, add, add_1):
_set_grad_enabled_1 = torch._C._set_grad_enabled(False); _set_grad_enabled_1 = None
sin = torch.ops.aten.sin.default(add); add = None
cos = torch.ops.aten.cos.default(add_1); add_1 = None
return (sin, cos)
""",
)
self.assertExpectedInline(
new_gm.submod_3.code.strip("\n"),
"""\
def forward(self, sin, cos):
_set_grad_enabled_2 = torch._C._set_grad_enabled(True); _set_grad_enabled_2 = None
add_2 = torch.ops.aten.add.Tensor(sin, 1); sin = None
add_3 = torch.ops.aten.add.Tensor(cos, 1); cos = None
return (add_2, add_3)
""",
)
def test_predispatch_autocast_and_set_grad(self):
mod_orig, mod, args = self.MIXED_AUTOCAST_SET_GRAD_TESTS["multi_ctx_manager"]
self._check_node_users_in_the_same_graph(mod)
self.assertEqual(mod_orig(*args), mod(*args))
self.assertExpectedInline(
mod.code.strip("\n"),
"""\
def forward(self, x):
x, = fx_pytree.tree_flatten_spec(([x], {}), self._in_spec)
_guards_fn = self._guards_fn(x); _guards_fn = None
add = torch.ops.aten.add.Tensor(x, 1); x = None
sin = torch.ops.aten.sin.default(add); add = None
submod_3 = self.submod_2
wrap_with_set_grad_enabled = torch.ops.higher_order.wrap_with_set_grad_enabled(False, submod_3, sin); submod_3 = sin = None
add_1 = wrap_with_set_grad_enabled[0]
sub = wrap_with_set_grad_enabled[1]; wrap_with_set_grad_enabled = None
return pytree.tree_unflatten((add_1, sub), self._out_spec)
""",
)
self.assertExpectedInline(
mod.submod_2.code.strip("\n"),
"""\
def forward(self, sin):
cos = torch.ops.aten.cos.default(sin); sin = None
submod_3 = self.submod_1
add_1 = torch.ops.higher_order.wrap_with_autocast('cpu', None, False, None, submod_3, cos); submod_3 = cos = None
getitem = add_1[0]; add_1 = None
sub = torch.ops.aten.sub.Tensor(getitem, 1)
return (getitem, sub)
""",
)
self.assertExpectedInline(
mod.submod_2.submod_1.code.strip("\n"),
"""\
def forward(self, cos):
add_1 = torch.ops.aten.add.Tensor(cos, 1); cos = None
return (add_1,)
""",
)
def test_predispatch_autocast(self):
mod_orig, mod, args = self.WITH_AUTOCAST_TESTS["ctx_manager_nested"]
self._check_node_users_in_the_same_graph(mod)
self.assertEqual(mod_orig(*args), mod(*args))
self.assertExpectedInline(
mod.code.strip("\n"),
"""\
def forward(self, x):
x, = fx_pytree.tree_flatten_spec(([x], {}), self._in_spec)
_guards_fn = self._guards_fn(x); _guards_fn = None
add = torch.ops.aten.add.Tensor(x, 1); x = None
submod_3 = self.submod_1
add_1 = torch.ops.higher_order.wrap_with_autocast('cpu', None, True, None, submod_3, add); submod_3 = add = None
getitem = add_1[0]; add_1 = None
submod_4 = self.submod_2
sub = torch.ops.higher_order.wrap_with_autocast('cpu', None, True, None, submod_4, getitem); submod_4 = None
getitem_1 = sub[0]; sub = None
return pytree.tree_unflatten((getitem, getitem_1), self._out_spec)
""",
)
self.assertExpectedInline(
mod.submod_1.code.strip("\n"),
"""\
def forward(self, add):
sin = torch.ops.aten.sin.default(add); add = None
sum_1 = torch.ops.aten.sum.default(sin); sin = None
submod_2 = self.submod_1
add_1 = torch.ops.higher_order.wrap_with_autocast('cpu', None, False, None, submod_2, sum_1); submod_2 = sum_1 = None
getitem = add_1[0]; add_1 = None
return (getitem,)
""",
)
mod_orig, mod, args = self.WITH_AUTOCAST_TESTS["ctx_manager"]
self._check_node_users_in_the_same_graph(mod)
self.assertEqual(mod_orig(*args), mod(*args))
self.assertExpectedInline(
mod.code.strip("\n"),
"""\
def forward(self, x):
x, = fx_pytree.tree_flatten_spec(([x], {}), self._in_spec)
_guards_fn = self._guards_fn(x); _guards_fn = None
add = torch.ops.aten.add.Tensor(x, 1); x = None
submod_4 = self.submod_1
sum_1 = torch.ops.higher_order.wrap_with_autocast('cpu', None, True, None, submod_4, add); submod_4 = add = None
getitem = sum_1[0]; sum_1 = None
submod_5 = self.submod_2
add_1 = torch.ops.higher_order.wrap_with_autocast('cpu', None, False, None, submod_5, getitem); submod_5 = getitem = None
getitem_1 = add_1[0]; add_1 = None
submod_6 = self.submod_3
sub = torch.ops.higher_order.wrap_with_autocast('cpu', None, True, None, submod_6, getitem_1); submod_6 = None
getitem_2 = sub[0]; sub = None
return pytree.tree_unflatten((getitem_1, getitem_2), self._out_spec)
""",
)
self.assertExpectedInline(
mod.submod_1.code.strip("\n"),
"""\
def forward(self, add):
sin = torch.ops.aten.sin.default(add); add = None
sum_1 = torch.ops.aten.sum.default(sin); sin = None
return (sum_1,)
""",
)
self.assertExpectedInline(
mod.submod_2.code.strip("\n"),
"""\
def forward(self, sum_1):
add_1 = torch.ops.aten.add.Tensor(sum_1, 1); sum_1 = None
return (add_1,)
""",
)
self.assertExpectedInline(
mod.submod_3.code.strip("\n"),
"""\
def forward(self, add_1):
sub = torch.ops.aten.sub.Tensor(add_1, 1); add_1 = None
return (sub,)
""",
)
mod_orig, mod, args = self.WITH_AUTOCAST_TESTS["ctx_manager_multi_dep"]
self._check_node_users_in_the_same_graph(mod)
self.assertEqual(mod_orig(*args), mod(*args))
self.assertExpectedInline(
mod.code.strip("\n"),
"""\
def forward(self, x):
x, = fx_pytree.tree_flatten_spec(([x], {}), self._in_spec)
_guards_fn = self._guards_fn(x); _guards_fn = None
add = torch.ops.aten.add.Tensor(x, 1); x = None
submod_4 = self.submod_1
wrap_with_autocast = torch.ops.higher_order.wrap_with_autocast('cpu', None, True, None, submod_4, add); submod_4 = add = None
sum_1 = wrap_with_autocast[0]
sum_2 = wrap_with_autocast[1]; wrap_with_autocast = None
submod_5 = self.submod_2
wrap_with_autocast_1 = torch.ops.higher_order.wrap_with_autocast('cpu', None, False, None, submod_5, sum_1, sum_2); submod_5 = sum_1 = sum_2 = None
add_1 = wrap_with_autocast_1[0]
add_2 = wrap_with_autocast_1[1]; wrap_with_autocast_1 = None
submod_6 = self.submod_3
wrap_with_autocast_2 = torch.ops.higher_order.wrap_with_autocast('cpu', None, True, None, submod_6, add_1, add_2); submod_6 = None
sub = wrap_with_autocast_2[0]
sub_1 = wrap_with_autocast_2[1]; wrap_with_autocast_2 = None
return pytree.tree_unflatten((add_1, add_2, sub, sub_1), self._out_spec)
""", # noqa: B950
)
self.assertExpectedInline(
mod.submod_1.code.strip("\n"),
"""\
def forward(self, add):
sin = torch.ops.aten.sin.default(add)
sum_1 = torch.ops.aten.sum.default(sin); sin = None
cos = torch.ops.aten.cos.default(add); add = None
sum_2 = torch.ops.aten.sum.default(cos); cos = None
return (sum_1, sum_2)
""",
)
self.assertExpectedInline(
mod.submod_2.code.strip("\n"),
"""\
def forward(self, sum_1, sum_2):
add_1 = torch.ops.aten.add.Tensor(sum_1, 1); sum_1 = None
add_2 = torch.ops.aten.add.Tensor(sum_2, 1); sum_2 = None
return (add_1, add_2)
""",
)
self.assertExpectedInline(
mod.submod_3.code.strip("\n"),
"""\
def forward(self, add_1, add_2):
sub = torch.ops.aten.sub.Tensor(add_1, 1); add_1 = None
sub_1 = torch.ops.aten.sub.Tensor(add_2, 1); add_2 = None
return (sub, sub_1)
""",
)
mod_orig, mod, args = self.WITH_AUTOCAST_TESTS["ctx_manager_split"]
self._check_node_users_in_the_same_graph(mod)
self.assertEqual(mod_orig(*args), mod(*args))
self.assertExpectedInline(
mod.code.strip("\n"),
"""\
def forward(self, x):
x, = fx_pytree.tree_flatten_spec(([x], {}), self._in_spec)
_guards_fn = self._guards_fn(x); _guards_fn = None
add = torch.ops.aten.add.Tensor(x, 1); x = None
submod_4 = self.submod_1
sum_1 = torch.ops.higher_order.wrap_with_autocast('cpu', None, True, None, submod_4, add); submod_4 = add = None
getitem = sum_1[0]; sum_1 = None
add_1 = torch.ops.aten.add.Tensor(getitem, 1); getitem = None
submod_5 = self.submod_3
sub = torch.ops.higher_order.wrap_with_autocast('cpu', None, True, None, submod_5, add_1); submod_5 = None
getitem_1 = sub[0]; sub = None
return pytree.tree_unflatten((add_1, getitem_1), self._out_spec)
""",
)
self.assertExpectedInline(
mod.submod_1.code.strip("\n"),
"""\
def forward(self, add):
sin = torch.ops.aten.sin.default(add); add = None
sum_1 = torch.ops.aten.sum.default(sin); sin = None
return (sum_1,)
""",
)
self.assertExpectedInline(
mod.submod_3.code.strip("\n"),
"""\
def forward(self, add_1):
sub = torch.ops.aten.sub.Tensor(add_1, 1); add_1 = None
return (sub,)
""",
)
def test_inline_(self):
for gm, args in self.SEQUENTIAL_SPLIT_INLINE_TESTS.values():
before_str = gm.print_readable(print_output=False)
new_gm = sequential_split(gm, _is_set_grad_enabled_node)
nodes_map(
new_gm.graph.nodes,
lambda node: node_inline_(node) if node.op == "call_module" else node,
)
after_inline_str = new_gm.print_readable(print_output=False)
self.assertEqual(before_str, after_inline_str)
new_gm._guards_fn = gm._guards_fn
self.assertEqual(gm(*args), new_gm(*args))
def test_remove_auto_functionalized_pass(self) -> None:
with _scoped_library("DO_NOT_USE_TEST_ONLY", "DEF") as lib:
lib.define("custom_mutator(Tensor x, Tensor(a!) y) -> Tensor")
@impl(lib, "custom_mutator", "Meta")
def custom_mutator_meta(
x: torch.Tensor,
y: torch.Tensor,
) -> torch.Tensor:
return torch.empty_like(x)
@impl(lib, "custom_mutator", "CompositeExplicitAutograd")
def custom_mutator(
x: torch.Tensor,
y: torch.Tensor,
) -> torch.Tensor:
return x + y.add_(1)
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.state = torch.nn.Buffer(torch.zeros(1))
def forward(self, x):
return torch.ops.DO_NOT_USE_TEST_ONLY.custom_mutator(x, self.state)
mod = M()
x = torch.randn([3, 3])
ep = export(mod, (x,), strict=True)
inplace_ep = unsafe_remove_auto_functionalized_pass(ep)
nodes = inplace_ep.graph.nodes
for node in nodes:
if node.op == "call_function":
self.assertFalse(node.target is auto_functionalized)
self.assertFalse(node.target is operator.getitem)
for spec in inplace_ep.graph_signature.output_specs:
self.assertFalse("getitem" in spec.arg.name)
def test_remove_auto_functionalized_pass_tuple(self) -> None:
with _scoped_library("DO_NOT_USE_TEST_ONLY", "DEF") as lib:
lib.define(
"custom_mutator_tuple(Tensor x, Tensor(a!) y) -> (Tensor, Tensor)"
)
@impl(lib, "custom_mutator_tuple", "Meta")
def custom_mutator_tuple_meta(
x: torch.Tensor,
y: torch.Tensor,
):
return (torch.empty_like(x), torch.empty_like(x))
@impl(lib, "custom_mutator_tuple", "CompositeExplicitAutograd")
def custom_mutator_tuple(
x: torch.Tensor,
y: torch.Tensor,
):
return (x, x + y.add_(1))
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.state = torch.nn.Buffer(torch.zeros(1))
def forward(self, x):
return torch.ops.DO_NOT_USE_TEST_ONLY.custom_mutator_tuple(
x, self.state
)
mod = M()
x = torch.randn([3, 3])
ep = export(mod, (x,), strict=True).run_decompositions({})
inplace_ep = unsafe_remove_auto_functionalized_pass(ep)
graph_text = str(inplace_ep.graph)
self.assertExpectedInline(
graph_text,
"""\
graph():
%b_state : [num_users=2] = placeholder[target=b_state]
%x : [num_users=1] = placeholder[target=x]
%custom_mutator_tuple_default : [num_users=2] = call_function[target=torch.ops.DO_NOT_USE_TEST_ONLY.custom_mutator_tuple.\
default](args = (%x, %b_state), kwargs = {})
%getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%custom_mutator_tuple_default, 0), kwargs = {})
%getitem_4 : [num_users=1] = call_function[target=operator.getitem](args = (%custom_mutator_tuple_default, 1), kwargs = {})
return (b_state, getitem_3, getitem_4)""",
)
@unittest.skipIf(not TEST_CUDA, "requires cuda")
def test_move_device_to(self):
class M(torch.nn.Module):
def forward(self, x):
x = torch.ops.aten.to.device(x, device="cuda:0", dtype=torch.float32)
return x + x
ep = torch.export.export(M(), (torch.ones(3),))
ep = move_to_device_pass(ep, "cuda")
ep.graph_module.recompile()
self.assertExpectedInline(
ep.graph_module.code.strip("\n"),
"""\
def forward(self, x):
_assert_tensor_metadata_default = torch.ops.aten._assert_tensor_metadata.default(x, dtype = torch.float32, device = 'cuda', layout = torch.strided); _assert_tensor_metadata_default = None
to = torch.ops.aten.to.device(x, 'cuda', torch.float32); x = None
add = torch.ops.aten.add.Tensor(to, to); to = None
return (add,)
""", # noqa: B950
)
@unittest.skipIf(not TEST_CUDA, "requires cuda")
def test_move_device_submod(self):
class M(torch.nn.Module):
def forward(self, x):
with torch.autocast(device_type="cuda", dtype=torch.bfloat16):
x = x.to(device="cuda:0")
return x + x
ep = torch.export.export(M(), (torch.ones(3),))
ep = move_to_device_pass(ep, "cuda")
ep.graph_module.submod_1.recompile()
self.assertExpectedInline(
ep.graph_module.submod_1.code.strip("\n"),
"""\
def forward(self, arg0_1):
_assert_tensor_metadata_default = torch.ops.aten._assert_tensor_metadata.default(arg0_1, dtype = torch.float32, device = 'cuda', layout = torch.strided); _assert_tensor_metadata_default = None
to = torch.ops.aten.to.dtype_layout(arg0_1, dtype = torch.float32, layout = torch.strided, device = 'cuda'); arg0_1 = None
add = torch.ops.aten.add.Tensor(to, to); to = None
return (add,)
""", # noqa: B950
)
@unittest.skipIf(not TEST_CUDA, "requires cuda")
def test_move_to_device_pass(self):
class Model(torch.nn.Module):
def __init__(self, size=4, h_dim=10):
super().__init__()
self.rnn = torch.nn.GRU(size, h_dim, batch_first=True)
def forward(self, x):
_, states = self.rnn(x)
return states
# move the exported program from cpu to cuda:0
mod = Model()
example_inputs = (torch.rand(1, 10, 4),)
ep = export(mod, example_inputs, strict=True)
location = torch.device("cuda:0")
ep = move_to_device_pass(ep, location=location)
gm = ep.module()
test_inputs = (torch.rand(1, 10, 4).to("cuda:0"),)
outputs = gm(*test_inputs)
self.assertEqual(outputs.device, torch.device("cuda:0"))
# move it back to cpu
location = "cpu"
ep = move_to_device_pass(ep, location=location)
gm = ep.module()
test_inputs = (torch.rand(1, 10, 4).to("cpu"),)
outputs = gm(*test_inputs)
self.assertEqual(outputs.device, torch.device("cpu"))
# move it to cuda:0 again
location = {"cpu": "cuda:0"}
ep = move_to_device_pass(ep, location=location)
gm = ep.module()
test_inputs = (torch.rand(1, 10, 4).to("cuda:0"),)
outputs = gm(*test_inputs)
self.assertEqual(outputs.device, torch.device("cuda:0"))
@unittest.skipIf(not TEST_CUDA, "requires cuda")
def test_move_device_example_inputs(self):
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(4, 4)
def forward(self, x, y, z):
return self.linear(x) + y + z
# Create model with example inputs on CPU
mod = Model()
example_args = (torch.rand(4, 4), torch.rand(4, 4))
example_kwargs = {"z": torch.tensor([1.0, 2.0, 3.0, 4.0])}
# Export with example inputs
ep = export(mod, example_args, example_kwargs)
# Verify initial state - all tensors should be on CPU
self.assertEqual(ep.example_inputs[0][0].device, torch.device("cpu"))
self.assertEqual(ep.example_inputs[0][1].device, torch.device("cpu"))
self.assertEqual(ep.example_inputs[1]["z"].device, torch.device("cpu"))
# Move to CUDA
location = torch.device("cuda:0")
ep_cuda = move_to_device_pass(ep, location=location)
# Verify example_inputs moved to CUDA
self.assertEqual(ep_cuda.example_inputs[0][0].device, torch.device("cuda:0"))
self.assertEqual(ep_cuda.example_inputs[0][1].device, torch.device("cuda:0"))
self.assertEqual(ep_cuda.example_inputs[1]["z"].device, torch.device("cuda:0"))
def test_constant_folding_pass(self):
from torch.ao.quantization.observer import MappingType, PerGroup, PerToken
from torch.ao.quantization.pt2e._affine_quantization import (
AffineQuantizedMinMaxObserver,
)
from torch.ao.quantization.quantize_pt2e import convert_pt2e, prepare_pt2e
from torch.ao.quantization.quantizer import (
QuantizationAnnotation,
QuantizationSpec,
Quantizer,
)
class BackendAQuantizer(Quantizer):
def annotate(self, model: torch.fx.GraphModule) -> torch.fx.GraphModule:
for node in model.graph.nodes:
if (
node.op == "call_function"
and node.target == torch.ops.aten.linear.default
):
input_act = node.args[0]
assert isinstance(input_act, torch.fx.Node)
weight = node.args[1]
assert isinstance(weight, torch.fx.Node)
act_qspec = QuantizationSpec(
dtype=torch.uint8,
quant_min=0,
quant_max=255,
qscheme=None,
is_dynamic=False,
observer_or_fake_quant_ctr=AffineQuantizedMinMaxObserver.with_args(
# TODO: maybe align the arg name here
target_dtype=torch.uint8,
mapping_type=MappingType.SYMMETRIC,
granularity=PerToken(),
),
)
weight_qspec = QuantizationSpec(
dtype=torch.uint8,
quant_min=0,
quant_max=255,
qscheme=None,
is_dynamic=False,
observer_or_fake_quant_ctr=AffineQuantizedMinMaxObserver.with_args(
target_dtype=torch.uint8,
mapping_type=MappingType.SYMMETRIC,
granularity=PerGroup(group_size=128),
),
)
node.meta["quantization_annotation"] = QuantizationAnnotation(
input_qspec_map={
input_act: act_qspec,
weight: weight_qspec,
},
_annotated=True,
)
def validate(self, model: torch.fx.GraphModule) -> None:
pass
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(128, 20)
def forward(self, x):
return self.linear(x)
example_inputs = (torch.randn(5, 128),)
model = M()
quantizer = BackendAQuantizer()
m = torch.export.export(model.eval(), example_inputs, strict=True).module()
m = prepare_pt2e(m, quantizer)
# Calibration
m(*example_inputs)
# Get the quantized model
m_fold = copy.deepcopy(m)
m_fold = convert_pt2e(m_fold, fold_quantize=True)
# If fold, check the graph only contains frozed params and no linear_weight
FileCheck().check("_frozen_param0").check_not("linear_weight").run(m_fold.code)
m_not_fold = copy.deepcopy(m)
m_not_fold = convert_pt2e(m_not_fold, fold_quantize=False)
# If not fold, check the graph doesn't contain frozed params and contain linear_weight
FileCheck().check_not("_frozen_param0").check("linear_weight").run(
m_not_fold.code
)
if __name__ == "__main__":
run_tests()
| TestPasses |
python | huggingface__transformers | src/transformers/models/patchtst/modeling_patchtst.py | {
"start": 40185,
"end": 42096
} | class ____(nn.Module):
"""
Standardize features by calculating the mean and scaling along the first dimension, and then normalizes it by
subtracting from the mean and dividing by the standard deviation.
"""
def __init__(self, config: PatchTSTConfig):
super().__init__()
self.dim = config.scaling_dim if hasattr(config, "scaling_dim") else 1
self.keepdim = config.keepdim if hasattr(config, "keepdim") else True
self.minimum_scale = config.minimum_scale if hasattr(config, "minimum_scale") else 1e-5
def forward(
self, data: torch.Tensor, observed_indicator: torch.Tensor
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Parameters:
data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`):
input for Batch norm calculation
observed_indicator (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`):
Calculating the scale on the observed indicator.
Returns:
tuple of `torch.Tensor` of shapes
(`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`,
`(batch_size, 1, num_input_channels)`)
"""
denominator = observed_indicator.sum(self.dim, keepdim=self.keepdim)
denominator = denominator.clamp_min(1.0)
loc = (data * observed_indicator).sum(self.dim, keepdim=self.keepdim) / denominator
variance = (((data - loc) * observed_indicator) ** 2).sum(self.dim, keepdim=self.keepdim) / denominator
scale = torch.sqrt(variance + self.minimum_scale)
return (data - loc) / scale, loc, scale
# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesMeanScaler with TimeSeriesTransformer->PatchTST,TimeSeries->PatchTST
| PatchTSTStdScaler |
python | PrefectHQ__prefect | src/prefect/client/schemas/actions.py | {
"start": 24111,
"end": 24681
} | class ____(ActionBaseModel):
"""Data used by the Prefect REST API to create a block schema."""
fields: dict[str, Any] = Field(
default_factory=dict, description="The block schema's field schema"
)
block_type_id: Optional[UUID] = Field(default=None)
capabilities: list[str] = Field(
default_factory=list,
description="A list of Block capabilities",
)
version: str = Field(
default=objects.DEFAULT_BLOCK_SCHEMA_VERSION,
description="Human readable identifier for the block schema",
)
| BlockSchemaCreate |
python | numpy__numpy | benchmarks/benchmarks/bench_core.py | {
"start": 3638,
"end": 4442
} | class ____(Benchmark):
param_names = ['numaxes', 'size', 'dtype']
params = [
[1, 2, 3],
[100, 10000, 1000000],
[bool, np.int8, np.int16, np.int32, np.int64, np.float32,
np.float64, str, object]
]
def setup(self, numaxes, size, dtype):
self.x = np.arange(numaxes * size).reshape(numaxes, size)
self.x = (self.x % 3).astype(dtype)
def time_count_nonzero(self, numaxes, size, dtype):
np.count_nonzero(self.x)
def time_count_nonzero_axis(self, numaxes, size, dtype):
np.count_nonzero(self.x, axis=self.x.ndim - 1)
def time_count_nonzero_multi_axis(self, numaxes, size, dtype):
if self.x.ndim >= 2:
np.count_nonzero(self.x, axis=(
self.x.ndim - 1, self.x.ndim - 2))
| CountNonzero |
python | python__mypy | mypy/nodes.py | {
"start": 55753,
"end": 56252
} | class ____(Statement):
__slots__ = ("expr", "body", "else_body")
__match_args__ = ("expr", "body", "else_body")
expr: Expression
body: Block
else_body: Block | None
def __init__(self, expr: Expression, body: Block, else_body: Block | None) -> None:
super().__init__()
self.expr = expr
self.body = body
self.else_body = else_body
def accept(self, visitor: StatementVisitor[T]) -> T:
return visitor.visit_while_stmt(self)
| WhileStmt |
python | cherrypy__cherrypy | cherrypy/lib/httputil.py | {
"start": 4629,
"end": 6148
} | class ____(object):
"""An element (with parameters) from an HTTP header's element list."""
def __init__(self, value, params=None):
"""Initialize an HTTP header value representation."""
self.value = value
if params is None:
params = {}
self.params = params
def __cmp__(self, other):
"""Compare current HTTP header to another by value only."""
return builtins.cmp(self.value, other.value)
def __lt__(self, other):
"""Check if this header value is less than the other."""
return self.value < other.value
def __str__(self):
"""Render the HTTP header value as a string."""
p = [';%s=%s' % (k, v) for k, v in self.params.items()]
return str('%s%s' % (self.value, ''.join(p)))
def __bytes__(self):
"""Turn the HTTP header value string representation to bytes."""
return ntob(self.__str__())
def __unicode__(self):
"""Render the HTTP header value as a string."""
return ntou(self.__str__())
@staticmethod
def parse(elementstr):
"""Transform 'token;key=val' to ('token', {'key': 'val'})."""
initial_value, params = parse_header(elementstr)
return initial_value, params
@classmethod
def from_str(cls, elementstr):
"""Construct an instance from a string of the form 'token;key=val'."""
ival, params = cls.parse(elementstr)
return cls(ival, params)
q_separator = re.compile(r'; *q *=')
| HeaderElement |
python | sphinx-doc__sphinx | sphinx/domains/cpp/_ast.py | {
"start": 13319,
"end": 13870
} | class ____(ASTLiteral):
def __eq__(self, other: object) -> bool:
return isinstance(other, ASTPointerLiteral)
def __hash__(self) -> int:
return hash('nullptr')
def _stringify(self, transform: StringifyTransform) -> str:
return 'nullptr'
def get_id(self, version: int) -> str:
return 'LDnE'
def describe_signature(
self, signode: TextElement, mode: str, env: BuildEnvironment, symbol: Symbol
) -> None:
signode += addnodes.desc_sig_keyword('nullptr', 'nullptr')
| ASTPointerLiteral |
python | TheAlgorithms__Python | data_structures/binary_tree/binary_search_tree_recursive.py | {
"start": 523,
"end": 7400
} | class ____:
def __init__(self) -> None:
self.root: Node | None = None
def empty(self) -> None:
"""
Empties the tree
>>> t = BinarySearchTree()
>>> assert t.root is None
>>> t.put(8)
>>> assert t.root is not None
"""
self.root = None
def is_empty(self) -> bool:
"""
Checks if the tree is empty
>>> t = BinarySearchTree()
>>> t.is_empty()
True
>>> t.put(8)
>>> t.is_empty()
False
"""
return self.root is None
def put(self, label: int) -> None:
"""
Put a new node in the tree
>>> t = BinarySearchTree()
>>> t.put(8)
>>> assert t.root.parent is None
>>> assert t.root.label == 8
>>> t.put(10)
>>> assert t.root.right.parent == t.root
>>> assert t.root.right.label == 10
>>> t.put(3)
>>> assert t.root.left.parent == t.root
>>> assert t.root.left.label == 3
"""
self.root = self._put(self.root, label)
def _put(self, node: Node | None, label: int, parent: Node | None = None) -> Node:
if node is None:
node = Node(label, parent)
elif label < node.label:
node.left = self._put(node.left, label, node)
elif label > node.label:
node.right = self._put(node.right, label, node)
else:
msg = f"Node with label {label} already exists"
raise ValueError(msg)
return node
def search(self, label: int) -> Node:
"""
Searches a node in the tree
>>> t = BinarySearchTree()
>>> t.put(8)
>>> t.put(10)
>>> node = t.search(8)
>>> assert node.label == 8
>>> node = t.search(3)
Traceback (most recent call last):
...
ValueError: Node with label 3 does not exist
"""
return self._search(self.root, label)
def _search(self, node: Node | None, label: int) -> Node:
if node is None:
msg = f"Node with label {label} does not exist"
raise ValueError(msg)
elif label < node.label:
node = self._search(node.left, label)
elif label > node.label:
node = self._search(node.right, label)
return node
def remove(self, label: int) -> None:
"""
Removes a node in the tree
>>> t = BinarySearchTree()
>>> t.put(8)
>>> t.put(10)
>>> t.remove(8)
>>> assert t.root.label == 10
>>> t.remove(3)
Traceback (most recent call last):
...
ValueError: Node with label 3 does not exist
"""
node = self.search(label)
if node.right and node.left:
lowest_node = self._get_lowest_node(node.right)
lowest_node.left = node.left
lowest_node.right = node.right
node.left.parent = lowest_node
if node.right:
node.right.parent = lowest_node
self._reassign_nodes(node, lowest_node)
elif not node.right and node.left:
self._reassign_nodes(node, node.left)
elif node.right and not node.left:
self._reassign_nodes(node, node.right)
else:
self._reassign_nodes(node, None)
def _reassign_nodes(self, node: Node, new_children: Node | None) -> None:
if new_children:
new_children.parent = node.parent
if node.parent:
if node.parent.right == node:
node.parent.right = new_children
else:
node.parent.left = new_children
else:
self.root = new_children
def _get_lowest_node(self, node: Node) -> Node:
if node.left:
lowest_node = self._get_lowest_node(node.left)
else:
lowest_node = node
self._reassign_nodes(node, node.right)
return lowest_node
def exists(self, label: int) -> bool:
"""
Checks if a node exists in the tree
>>> t = BinarySearchTree()
>>> t.put(8)
>>> t.put(10)
>>> t.exists(8)
True
>>> t.exists(3)
False
"""
try:
self.search(label)
return True
except ValueError:
return False
def get_max_label(self) -> int:
"""
Gets the max label inserted in the tree
>>> t = BinarySearchTree()
>>> t.get_max_label()
Traceback (most recent call last):
...
ValueError: Binary search tree is empty
>>> t.put(8)
>>> t.put(10)
>>> t.get_max_label()
10
"""
if self.root is None:
raise ValueError("Binary search tree is empty")
node = self.root
while node.right is not None:
node = node.right
return node.label
def get_min_label(self) -> int:
"""
Gets the min label inserted in the tree
>>> t = BinarySearchTree()
>>> t.get_min_label()
Traceback (most recent call last):
...
ValueError: Binary search tree is empty
>>> t.put(8)
>>> t.put(10)
>>> t.get_min_label()
8
"""
if self.root is None:
raise ValueError("Binary search tree is empty")
node = self.root
while node.left is not None:
node = node.left
return node.label
def inorder_traversal(self) -> Iterator[Node]:
"""
Return the inorder traversal of the tree
>>> t = BinarySearchTree()
>>> [i.label for i in t.inorder_traversal()]
[]
>>> t.put(8)
>>> t.put(10)
>>> t.put(9)
>>> [i.label for i in t.inorder_traversal()]
[8, 9, 10]
"""
return self._inorder_traversal(self.root)
def _inorder_traversal(self, node: Node | None) -> Iterator[Node]:
if node is not None:
yield from self._inorder_traversal(node.left)
yield node
yield from self._inorder_traversal(node.right)
def preorder_traversal(self) -> Iterator[Node]:
"""
Return the preorder traversal of the tree
>>> t = BinarySearchTree()
>>> [i.label for i in t.preorder_traversal()]
[]
>>> t.put(8)
>>> t.put(10)
>>> t.put(9)
>>> [i.label for i in t.preorder_traversal()]
[8, 10, 9]
"""
return self._preorder_traversal(self.root)
def _preorder_traversal(self, node: Node | None) -> Iterator[Node]:
if node is not None:
yield node
yield from self._preorder_traversal(node.left)
yield from self._preorder_traversal(node.right)
| BinarySearchTree |
python | huggingface__transformers | src/transformers/models/trocr/modeling_trocr.py | {
"start": 12941,
"end": 17829
} | class ____(GradientCheckpointingLayer):
def __init__(self, config: TrOCRConfig, layer_idx=None):
super().__init__()
self.embed_dim = config.hidden_size
self.self_attn = TrOCRAttention(
config,
embed_dim=self.embed_dim,
num_heads=config.decoder_attention_heads,
dropout=config.attention_dropout,
is_decoder=True,
layer_idx=layer_idx,
)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
if config.is_decoder:
self.encoder_attn = TrOCRAttention(
config,
embed_dim=self.embed_dim,
num_heads=config.decoder_attention_heads,
kdim=config.cross_attention_hidden_size,
vdim=config.cross_attention_hidden_size,
dropout=config.attention_dropout,
is_decoder=True,
is_cross_attention=True,
layer_idx=layer_idx,
)
self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = True,
cache_position: Optional[torch.Tensor] = None,
):
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
encoder_hidden_states (`torch.FloatTensor`):
cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
past_key_values (`Cache`): cached past key and value projection states
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
# Self Attention
hidden_states, self_attn_weights = self.self_attn(
hidden_states=hidden_states,
past_key_values=past_key_values,
attention_mask=attention_mask,
output_attentions=output_attentions,
cache_position=cache_position,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
# Cross-Attention Block
cross_attn_weights = None
if encoder_hidden_states is not None:
residual = hidden_states
hidden_states, cross_attn_weights = self.encoder_attn(
hidden_states=hidden_states,
key_value_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
output_attentions=output_attentions,
cache_position=cache_position,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.encoder_attn_layer_norm(hidden_states)
# Fully Connected
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights, cross_attn_weights)
return outputs
@auto_docstring
| TrOCRDecoderLayer |
python | huggingface__transformers | tests/models/luke/test_modeling_luke.py | {
"start": 1414,
"end": 21504
} | class ____:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
entity_length=3,
mention_length=5,
use_attention_mask=True,
use_token_type_ids=True,
use_entity_ids=True,
use_entity_attention_mask=True,
use_entity_token_type_ids=True,
use_entity_position_ids=True,
use_labels=True,
vocab_size=99,
entity_vocab_size=10,
entity_emb_size=6,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
num_entity_classification_labels=9,
num_entity_pair_classification_labels=6,
num_entity_span_classification_labels=4,
use_entity_aware_attention=True,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.entity_length = entity_length
self.mention_length = mention_length
self.use_attention_mask = use_attention_mask
self.use_token_type_ids = use_token_type_ids
self.use_entity_ids = use_entity_ids
self.use_entity_attention_mask = use_entity_attention_mask
self.use_entity_token_type_ids = use_entity_token_type_ids
self.use_entity_position_ids = use_entity_position_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.entity_vocab_size = entity_vocab_size
self.entity_emb_size = entity_emb_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.num_entity_classification_labels = num_entity_classification_labels
self.num_entity_pair_classification_labels = num_entity_pair_classification_labels
self.num_entity_span_classification_labels = num_entity_span_classification_labels
self.scope = scope
self.use_entity_aware_attention = use_entity_aware_attention
self.encoder_seq_length = seq_length
self.key_length = seq_length
self.num_hidden_states_types = 2 # hidden_states and entity_hidden_states
def prepare_config_and_inputs(self):
# prepare words
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
attention_mask = None
if self.use_attention_mask:
attention_mask = random_attention_mask([self.batch_size, self.seq_length])
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
# prepare entities
entity_ids = ids_tensor([self.batch_size, self.entity_length], self.entity_vocab_size)
entity_attention_mask = None
if self.use_entity_attention_mask:
entity_attention_mask = random_attention_mask([self.batch_size, self.entity_length])
entity_token_type_ids = None
if self.use_token_type_ids:
entity_token_type_ids = ids_tensor([self.batch_size, self.entity_length], self.type_vocab_size)
entity_position_ids = None
if self.use_entity_position_ids:
entity_position_ids = ids_tensor(
[self.batch_size, self.entity_length, self.mention_length], self.mention_length
)
sequence_labels = None
token_labels = None
choice_labels = None
entity_labels = None
entity_classification_labels = None
entity_pair_classification_labels = None
entity_span_classification_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
entity_labels = ids_tensor([self.batch_size, self.entity_length], self.entity_vocab_size)
entity_classification_labels = ids_tensor([self.batch_size], self.num_entity_classification_labels)
entity_pair_classification_labels = ids_tensor(
[self.batch_size], self.num_entity_pair_classification_labels
)
entity_span_classification_labels = ids_tensor(
[self.batch_size, self.entity_length], self.num_entity_span_classification_labels
)
config = self.get_config()
return (
config,
input_ids,
attention_mask,
token_type_ids,
entity_ids,
entity_attention_mask,
entity_token_type_ids,
entity_position_ids,
sequence_labels,
token_labels,
choice_labels,
entity_labels,
entity_classification_labels,
entity_pair_classification_labels,
entity_span_classification_labels,
)
def get_config(self):
return LukeConfig(
vocab_size=self.vocab_size,
entity_vocab_size=self.entity_vocab_size,
entity_emb_size=self.entity_emb_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
is_decoder=False,
initializer_range=self.initializer_range,
use_entity_aware_attention=self.use_entity_aware_attention,
)
def create_and_check_model(
self,
config,
input_ids,
attention_mask,
token_type_ids,
entity_ids,
entity_attention_mask,
entity_token_type_ids,
entity_position_ids,
sequence_labels,
token_labels,
choice_labels,
entity_labels,
entity_classification_labels,
entity_pair_classification_labels,
entity_span_classification_labels,
):
model = LukeModel(config=config)
model.to(torch_device)
model.eval()
# test with words + entities
result = model(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
entity_ids=entity_ids,
entity_attention_mask=entity_attention_mask,
entity_token_type_ids=entity_token_type_ids,
entity_position_ids=entity_position_ids,
)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(
result.entity_last_hidden_state.shape, (self.batch_size, self.entity_length, self.hidden_size)
)
# test with words only
result = model(input_ids, token_type_ids=token_type_ids)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_for_masked_lm(
self,
config,
input_ids,
attention_mask,
token_type_ids,
entity_ids,
entity_attention_mask,
entity_token_type_ids,
entity_position_ids,
sequence_labels,
token_labels,
choice_labels,
entity_labels,
entity_classification_labels,
entity_pair_classification_labels,
entity_span_classification_labels,
):
config.num_labels = self.num_entity_classification_labels
model = LukeForMaskedLM(config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
entity_ids=entity_ids,
entity_attention_mask=entity_attention_mask,
entity_token_type_ids=entity_token_type_ids,
entity_position_ids=entity_position_ids,
labels=token_labels,
entity_labels=entity_labels,
)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
if entity_ids is not None:
self.parent.assertEqual(
result.entity_logits.shape, (self.batch_size, self.entity_length, self.entity_vocab_size)
)
else:
self.parent.assertIsNone(result.entity_logits)
def create_and_check_for_entity_classification(
self,
config,
input_ids,
attention_mask,
token_type_ids,
entity_ids,
entity_attention_mask,
entity_token_type_ids,
entity_position_ids,
sequence_labels,
token_labels,
choice_labels,
entity_labels,
entity_classification_labels,
entity_pair_classification_labels,
entity_span_classification_labels,
):
config.num_labels = self.num_entity_classification_labels
model = LukeForEntityClassification(config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
entity_ids=entity_ids,
entity_attention_mask=entity_attention_mask,
entity_token_type_ids=entity_token_type_ids,
entity_position_ids=entity_position_ids,
labels=entity_classification_labels,
)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_entity_classification_labels))
def create_and_check_for_entity_pair_classification(
self,
config,
input_ids,
attention_mask,
token_type_ids,
entity_ids,
entity_attention_mask,
entity_token_type_ids,
entity_position_ids,
sequence_labels,
token_labels,
choice_labels,
entity_labels,
entity_classification_labels,
entity_pair_classification_labels,
entity_span_classification_labels,
):
config.num_labels = self.num_entity_pair_classification_labels
model = LukeForEntityClassification(config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
entity_ids=entity_ids,
entity_attention_mask=entity_attention_mask,
entity_token_type_ids=entity_token_type_ids,
entity_position_ids=entity_position_ids,
labels=entity_pair_classification_labels,
)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_entity_pair_classification_labels))
def create_and_check_for_entity_span_classification(
self,
config,
input_ids,
attention_mask,
token_type_ids,
entity_ids,
entity_attention_mask,
entity_token_type_ids,
entity_position_ids,
sequence_labels,
token_labels,
choice_labels,
entity_labels,
entity_classification_labels,
entity_pair_classification_labels,
entity_span_classification_labels,
):
config.num_labels = self.num_entity_span_classification_labels
model = LukeForEntitySpanClassification(config)
model.to(torch_device)
model.eval()
entity_start_positions = ids_tensor([self.batch_size, self.entity_length], self.seq_length)
entity_end_positions = ids_tensor([self.batch_size, self.entity_length], self.seq_length)
result = model(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
entity_ids=entity_ids,
entity_attention_mask=entity_attention_mask,
entity_token_type_ids=entity_token_type_ids,
entity_position_ids=entity_position_ids,
entity_start_positions=entity_start_positions,
entity_end_positions=entity_end_positions,
labels=entity_span_classification_labels,
)
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.entity_length, self.num_entity_span_classification_labels)
)
def create_and_check_for_question_answering(
self,
config,
input_ids,
attention_mask,
token_type_ids,
entity_ids,
entity_attention_mask,
entity_token_type_ids,
entity_position_ids,
sequence_labels,
token_labels,
choice_labels,
entity_labels,
entity_classification_labels,
entity_pair_classification_labels,
entity_span_classification_labels,
):
model = LukeForQuestionAnswering(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
entity_ids=entity_ids,
entity_attention_mask=entity_attention_mask,
entity_token_type_ids=entity_token_type_ids,
entity_position_ids=entity_position_ids,
start_positions=sequence_labels,
end_positions=sequence_labels,
)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def create_and_check_for_sequence_classification(
self,
config,
input_ids,
attention_mask,
token_type_ids,
entity_ids,
entity_attention_mask,
entity_token_type_ids,
entity_position_ids,
sequence_labels,
token_labels,
choice_labels,
entity_labels,
entity_classification_labels,
entity_pair_classification_labels,
entity_span_classification_labels,
):
config.num_labels = self.num_labels
model = LukeForSequenceClassification(config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
entity_ids=entity_ids,
entity_attention_mask=entity_attention_mask,
entity_token_type_ids=entity_token_type_ids,
entity_position_ids=entity_position_ids,
labels=sequence_labels,
)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def create_and_check_for_token_classification(
self,
config,
input_ids,
attention_mask,
token_type_ids,
entity_ids,
entity_attention_mask,
entity_token_type_ids,
entity_position_ids,
sequence_labels,
token_labels,
choice_labels,
entity_labels,
entity_classification_labels,
entity_pair_classification_labels,
entity_span_classification_labels,
):
config.num_labels = self.num_labels
model = LukeForTokenClassification(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
entity_ids=entity_ids,
entity_attention_mask=entity_attention_mask,
entity_token_type_ids=entity_token_type_ids,
entity_position_ids=entity_position_ids,
labels=token_labels,
)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def create_and_check_for_multiple_choice(
self,
config,
input_ids,
attention_mask,
token_type_ids,
entity_ids,
entity_attention_mask,
entity_token_type_ids,
entity_position_ids,
sequence_labels,
token_labels,
choice_labels,
entity_labels,
entity_classification_labels,
entity_pair_classification_labels,
entity_span_classification_labels,
):
config.num_choices = self.num_choices
model = LukeForMultipleChoice(config=config)
model.to(torch_device)
model.eval()
multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_attention_mask = attention_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_entity_ids = entity_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_entity_token_type_ids = (
entity_token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
)
multiple_choice_entity_attention_mask = (
entity_attention_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
)
multiple_choice_entity_position_ids = (
entity_position_ids.unsqueeze(1).expand(-1, self.num_choices, -1, -1).contiguous()
)
result = model(
multiple_choice_inputs_ids,
attention_mask=multiple_choice_attention_mask,
token_type_ids=multiple_choice_token_type_ids,
entity_ids=multiple_choice_entity_ids,
entity_attention_mask=multiple_choice_entity_attention_mask,
entity_token_type_ids=multiple_choice_entity_token_type_ids,
entity_position_ids=multiple_choice_entity_position_ids,
labels=choice_labels,
)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
attention_mask,
token_type_ids,
entity_ids,
entity_attention_mask,
entity_token_type_ids,
entity_position_ids,
sequence_labels,
token_labels,
choice_labels,
entity_labels,
entity_classification_labels,
entity_pair_classification_labels,
entity_span_classification_labels,
) = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"attention_mask": attention_mask,
"entity_ids": entity_ids,
"entity_token_type_ids": entity_token_type_ids,
"entity_attention_mask": entity_attention_mask,
"entity_position_ids": entity_position_ids,
}
return config, inputs_dict
@require_torch
| LukeModelTester |
python | fluentpython__example-code | 07-closure-deco/strategy_best4.py | {
"start": 1558,
"end": 3143
} | class ____: # the Context
def __init__(self, customer, cart, promotion=None):
self.customer = customer
self.cart = list(cart)
self.promotion = promotion
def total(self):
if not hasattr(self, '__total'):
self.__total = sum(item.total() for item in self.cart)
return self.__total
def due(self):
if self.promotion is None:
discount = 0
else:
discount = self.promotion(self)
return self.total() - discount
def __repr__(self):
fmt = '<Order total: {:.2f} due: {:.2f}>'
return fmt.format(self.total(), self.due())
# BEGIN STRATEGY_BEST4
promos = [] # <1>
def promotion(promo_func): # <2>
promos.append(promo_func)
return promo_func
@promotion # <3>
def fidelity(order):
"""5% discount for customers with 1000 or more fidelity points"""
return order.total() * .05 if order.customer.fidelity >= 1000 else 0
@promotion
def bulk_item(order):
"""10% discount for each LineItem with 20 or more units"""
discount = 0
for item in order.cart:
if item.quantity >= 20:
discount += item.total() * .1
return discount
@promotion
def large_order(order):
"""7% discount for orders with 10 or more distinct items"""
distinct_items = {item.product for item in order.cart}
if len(distinct_items) >= 10:
return order.total() * .07
return 0
def best_promo(order): # <4>
"""Select best discount available
"""
return max(promo(order) for promo in promos)
# END STRATEGY_BEST4
| Order |
python | dagster-io__dagster | python_modules/dagster/dagster_tests/general_tests/utils_tests/log_tests/test_single_handler.py | {
"start": 158,
"end": 1365
} | class ____(logging.Handler):
def __init__(self, records):
self.records = records
super().__init__()
def emit(self, record):
self.records.append(record)
def test_log_level_filtering():
records = []
critical_records = []
debug_logger_def = construct_single_handler_logger(
"debug_handler", "debug", LogTestHandler(records)
)
critical_logger_def = construct_single_handler_logger(
"critical_handler", "critical", LogTestHandler(critical_records)
)
loggers = [
logger_def.logger_fn(
dg.InitLoggerContext(
{},
logger_def,
job_def=dg.GraphDefinition(node_defs=[], name="test").to_job(),
run_id="",
)
)
for logger_def in [debug_logger_def, critical_logger_def]
]
log_manager = DagsterLogManager.create(loggers=loggers)
log_manager.debug("Hello, there!")
messages = [x.dagster_meta["orig_message"] for x in records]
assert "Hello, there!" in messages
critical_messages = [x.dagster_meta["orig_message"] for x in critical_records]
assert "Hello, there!" not in critical_messages
| LogTestHandler |
python | nmslib__hnswlib | tests/python/bindings_test.py | {
"start": 64,
"end": 2239
} | class ____(unittest.TestCase):
def testRandomSelf(self):
dim = 16
num_elements = 10000
# Generating sample data
data = np.float32(np.random.random((num_elements, dim)))
# Declaring index
p = hnswlib.Index(space='l2', dim=dim) # possible options are l2, cosine or ip
# Initiating index
# max_elements - the maximum number of elements, should be known beforehand
# (probably will be made optional in the future)
#
# ef_construction - controls index search speed/build speed tradeoff
# M - is tightly connected with internal dimensionality of the data
# strongly affects the memory consumption
p.init_index(max_elements=num_elements, ef_construction=100, M=16)
# Controlling the recall by setting ef:
# higher ef leads to better accuracy, but slower search
p.set_ef(10)
p.set_num_threads(4) # by default using all available cores
# We split the data in two batches:
data1 = data[:num_elements // 2]
data2 = data[num_elements // 2:]
print("Adding first batch of %d elements" % (len(data1)))
p.add_items(data1)
# Query the elements for themselves and measure recall:
labels, distances = p.knn_query(data1, k=1)
self.assertAlmostEqual(np.mean(labels.reshape(-1) == np.arange(len(data1))), 1.0, 3)
# Serializing and deleting the index:
index_path = 'first_half.bin'
print("Saving index to '%s'" % index_path)
p.save_index(index_path)
del p
# Re-initiating, loading the index
p = hnswlib.Index(space='l2', dim=dim) # you can change the sa
print("\nLoading index from '%s'\n" % index_path)
p.load_index(index_path)
print("Adding the second batch of %d elements" % (len(data2)))
p.add_items(data2)
# Query the elements for themselves and measure recall:
labels, distances = p.knn_query(data, k=1)
self.assertAlmostEqual(np.mean(labels.reshape(-1) == np.arange(len(data))), 1.0, 3)
os.remove(index_path)
| RandomSelfTestCase |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/when_directives_true/package.py | {
"start": 217,
"end": 884
} | class ____(Package):
"""Package that tests True when specs on directives."""
homepage = "http://www.example.com"
url = "http://www.example.com/example-1.0.tar.gz"
version("1.0", md5="0123456789abcdef0123456789abcdef")
patch(
"https://example.com/foo.patch",
sha256="abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234",
when=True,
)
extends("extendee", when=True)
depends_on("pkg-b", when=True)
conflicts("@1.0", when=True)
resource(
url="http://www.example.com/example-1.0-resource.tar.gz",
md5="0123456789abcdef0123456789abcdef",
when=True,
)
| WhenDirectivesTrue |
python | PyCQA__pylint | tests/functional/i/invalid/invalid_exceptions/invalid_exceptions_caught.py | {
"start": 2194,
"end": 2270
} | class ____(six.with_metaclass(abc.ABCMeta, Exception)):
pass
| HasErrorInMRO |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/generator1.py | {
"start": 2619,
"end": 2894
} | class ____(TypedDict):
x: str
def generator12() -> Generator[TD1, None, None]:
yield {"x": "x"}
def generator13() -> Generator[TD1, None, None]:
# This should generate an error.
yield {"y": "x"}
def generator14() -> Iterator[TD1]:
yield {"x": "x"}
| TD1 |
python | pytorch__pytorch | torch/utils/data/datapipes/_typing.py | {
"start": 7252,
"end": 8382
} | class ____:
r"""Save type annotation in `param`."""
def __init__(self, param) -> None:
self.param = param
def __repr__(self) -> str:
return _type_repr(self.param)
def __eq__(self, other):
if isinstance(other, _DataPipeType):
return self.param == other.param
return NotImplemented
def __hash__(self):
return hash(self.param)
def issubtype(self, other):
if isinstance(other.param, _GenericAlias):
if getattr(other.param, "__origin__", None) is Generic:
return True
if isinstance(other, _DataPipeType):
return issubtype(self.param, other.param)
if isinstance(other, type):
return issubtype(self.param, other)
raise TypeError(f"Expected '_DataPipeType' or 'type', but found {type(other)}")
def issubtype_of_instance(self, other):
return issubinstance(other, self.param)
# Default type for DataPipe without annotation
_T_co = TypeVar("_T_co", covariant=True)
# pyrefly: ignore [invalid-annotation]
_DEFAULT_TYPE = _DataPipeType(Generic[_T_co])
| _DataPipeType |
python | getsentry__sentry | src/sentry/api/endpoints/source_map_debug_blue_thunder_edition.py | {
"start": 2392,
"end": 2644
} | class ____(TypedDict):
source_file: None | (ScrapingResultSuccess | ScrapingResultNotAttempted | ScrapingResultFailure)
source_map: None | (ScrapingResultSuccess | ScrapingResultNotAttempted | ScrapingResultFailure)
| SourceMapScrapingProcessResult |
python | django-mptt__django-mptt | tests/myapp/models.py | {
"start": 9833,
"end": 10106
} | class ____(MPTTModel):
name = models.CharField(max_length=50)
custom_parent = TreeForeignKey(
"self", null=True, blank=True, related_name="children", on_delete=models.CASCADE
)
class MPTTMeta:
parent_attr = "custom_parent"
| CustomParentAttrModel |
python | pandas-dev__pandas | pandas/tests/test_algos.py | {
"start": 34349,
"end": 46254
} | class ____:
def test_invalid(self):
msg = (
r"only list-like objects are allowed to be passed to isin\(\), "
r"you passed a `int`"
)
with pytest.raises(TypeError, match=msg):
algos.isin(1, 1)
with pytest.raises(TypeError, match=msg):
algos.isin(1, [1])
with pytest.raises(TypeError, match=msg):
algos.isin([1], 1)
def test_basic(self):
result = algos.isin(np.array([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series([1, 2]), Series([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series([1, 2]), {1})
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
arg = np.array(["a", "b"], dtype=object)
result = algos.isin(arg, ["a"])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series(arg), Series(["a"]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series(arg), {"a"})
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arg, [1])
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
def test_i8(self):
arr = date_range("20130101", periods=3).values
result = algos.isin(arr, [arr[0]])
expected = np.array([True, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, arr[0:2])
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, set(arr[0:2]))
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
arr = timedelta_range("1 day", periods=3).values
result = algos.isin(arr, [arr[0]])
expected = np.array([True, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, arr[0:2])
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, set(arr[0:2]))
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype1", ["m8[ns]", "M8[ns]", "M8[ns, UTC]", "period[D]"])
@pytest.mark.parametrize("dtype", ["i8", "f8", "u8"])
def test_isin_datetimelike_values_numeric_comps(self, dtype, dtype1):
# Anything but object and we get all-False shortcut
dta = date_range("2013-01-01", periods=3)._values
arr = Series(dta.view("i8")).array.view(dtype1)
comps = arr.view("i8").astype(dtype)
result = algos.isin(comps, arr)
expected = np.zeros(comps.shape, dtype=bool)
tm.assert_numpy_array_equal(result, expected)
def test_large(self):
s = date_range("20000101", periods=2000000, freq="s").values
result = algos.isin(s, s[0:2])
expected = np.zeros(len(s), dtype=bool)
expected[0] = True
expected[1] = True
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype", ["m8[ns]", "M8[ns]", "M8[ns, UTC]", "period[D]"])
def test_isin_datetimelike_all_nat(self, dtype):
# GH#56427
dta = date_range("2013-01-01", periods=3)._values
arr = Series(dta.view("i8")).array.view(dtype)
arr[0] = NaT
result = algos.isin(arr, [NaT])
expected = np.array([True, False, False], dtype=bool)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype", ["m8[ns]", "M8[ns]", "M8[ns, UTC]"])
def test_isin_datetimelike_strings_returns_false(self, dtype):
# GH#53111
dta = date_range("2013-01-01", periods=3)._values
arr = Series(dta.view("i8")).array.view(dtype)
vals = [str(x) for x in arr]
res = algos.isin(arr, vals)
assert not res.any()
vals2 = np.array(vals, dtype=str)
res2 = algos.isin(arr, vals2)
assert not res2.any()
def test_isin_dt64tz_with_nat(self):
# the all-NaT values used to get inferred to tznaive, which was evaluated
# as non-matching GH#56427
dti = date_range("2016-01-01", periods=3, tz="UTC")
ser = Series(dti)
ser[0] = NaT
res = algos.isin(ser._values, [NaT])
exp = np.array([True, False, False], dtype=bool)
tm.assert_numpy_array_equal(res, exp)
def test_categorical_from_codes(self):
# GH 16639
vals = np.array([0, 1, 2, 0])
cats = ["a", "b", "c"]
Sd = Series(Categorical([1]).from_codes(vals, cats))
St = Series(Categorical([1]).from_codes(np.array([0, 1]), cats))
expected = np.array([True, True, False, True])
result = algos.isin(Sd, St)
tm.assert_numpy_array_equal(expected, result)
def test_categorical_isin(self):
vals = np.array([0, 1, 2, 0])
cats = ["a", "b", "c"]
cat = Categorical([1]).from_codes(vals, cats)
other = Categorical([1]).from_codes(np.array([0, 1]), cats)
expected = np.array([True, True, False, True])
result = algos.isin(cat, other)
tm.assert_numpy_array_equal(expected, result)
def test_same_nan_is_in(self):
# GH 22160
# nan is special, because from " a is b" doesn't follow "a == b"
# at least, isin() should follow python's "np.nan in [nan] == True"
# casting to -> np.float64 -> another float-object somewhere on
# the way could lead jeopardize this behavior
comps = np.array([np.nan], dtype=object) # could be casted to float64
values = [np.nan]
expected = np.array([True])
result = algos.isin(comps, values)
tm.assert_numpy_array_equal(expected, result)
def test_same_nan_is_in_large(self):
# https://github.com/pandas-dev/pandas/issues/22205
s = np.tile(1.0, 1_000_001)
s[0] = np.nan
result = algos.isin(s, np.array([np.nan, 1]))
expected = np.ones(len(s), dtype=bool)
tm.assert_numpy_array_equal(result, expected)
def test_same_nan_is_in_large_series(self):
# https://github.com/pandas-dev/pandas/issues/22205
s = np.tile(1.0, 1_000_001)
series = Series(s)
s[0] = np.nan
result = series.isin(np.array([np.nan, 1]))
expected = Series(np.ones(len(s), dtype=bool))
tm.assert_series_equal(result, expected)
def test_same_object_is_in(self):
# GH 22160
# there could be special treatment for nans
# the user however could define a custom class
# with similar behavior, then we at least should
# fall back to usual python's behavior: "a in [a] == True"
class LikeNan:
def __eq__(self, other) -> bool:
return False
def __hash__(self):
return 0
a, b = LikeNan(), LikeNan()
arg = np.array([a], dtype=object)
# same object -> True
tm.assert_numpy_array_equal(algos.isin(arg, [a]), np.array([True]))
# different objects -> False
tm.assert_numpy_array_equal(algos.isin(arg, [b]), np.array([False]))
def test_different_nans(self):
# GH 22160
# all nans are handled as equivalent
comps = [float("nan")]
values = [float("nan")]
assert comps[0] is not values[0] # different nan-objects
# as list of python-objects:
result = algos.isin(np.array(comps), values)
tm.assert_numpy_array_equal(np.array([True]), result)
# as object-array:
result = algos.isin(
np.asarray(comps, dtype=object), np.asarray(values, dtype=object)
)
tm.assert_numpy_array_equal(np.array([True]), result)
# as float64-array:
result = algos.isin(
np.asarray(comps, dtype=np.float64), np.asarray(values, dtype=np.float64)
)
tm.assert_numpy_array_equal(np.array([True]), result)
def test_no_cast(self):
# GH 22160
# ensure 42 is not casted to a string
comps = np.array(["ss", 42], dtype=object)
values = ["42"]
expected = np.array([False, False])
result = algos.isin(comps, values)
tm.assert_numpy_array_equal(expected, result)
@pytest.mark.parametrize("empty", [[], Series(dtype=object), np.array([])])
def test_empty(self, empty):
# see gh-16991
vals = Index(["a", "b"])
expected = np.array([False, False])
result = algos.isin(vals, empty)
tm.assert_numpy_array_equal(expected, result)
def test_different_nan_objects(self):
# GH 22119
comps = np.array(["nan", np.nan * 1j, float("nan")], dtype=object)
vals = np.array([float("nan")], dtype=object)
expected = np.array([False, False, True])
result = algos.isin(comps, vals)
tm.assert_numpy_array_equal(expected, result)
def test_different_nans_as_float64(self):
# GH 21866
# create different nans from bit-patterns,
# these nans will land in different buckets in the hash-table
# if no special care is taken
NAN1 = struct.unpack("d", struct.pack("=Q", 0x7FF8000000000000))[0]
NAN2 = struct.unpack("d", struct.pack("=Q", 0x7FF8000000000001))[0]
assert NAN1 != NAN1
assert NAN2 != NAN2
# check that NAN1 and NAN2 are equivalent:
arr = np.array([NAN1, NAN2], dtype=np.float64)
lookup1 = np.array([NAN1], dtype=np.float64)
result = algos.isin(arr, lookup1)
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
lookup2 = np.array([NAN2], dtype=np.float64)
result = algos.isin(arr, lookup2)
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
def test_isin_int_df_string_search(self):
"""Comparing df with int`s (1,2) with a string at isin() ("1")
-> should not match values because int 1 is not equal str 1"""
df = DataFrame({"values": [1, 2]})
result = df.isin(["1"])
expected_false = DataFrame({"values": [False, False]})
tm.assert_frame_equal(result, expected_false)
def test_isin_nan_df_string_search(self):
"""Comparing df with nan value (np.nan,2) with a string at isin() ("NaN")
-> should not match values because np.nan is not equal str NaN"""
df = DataFrame({"values": [np.nan, 2]})
result = df.isin(np.array(["NaN"], dtype=object))
expected_false = DataFrame({"values": [False, False]})
tm.assert_frame_equal(result, expected_false)
def test_isin_float_df_string_search(self):
"""Comparing df with floats (1.4245,2.32441) with a string at isin() ("1.4245")
-> should not match values because float 1.4245 is not equal str 1.4245"""
df = DataFrame({"values": [1.4245, 2.32441]})
result = df.isin(np.array(["1.4245"], dtype=object))
expected_false = DataFrame({"values": [False, False]})
tm.assert_frame_equal(result, expected_false)
def test_isin_unsigned_dtype(self):
# GH#46485
ser = Series([1378774140726870442], dtype=np.uint64)
result = ser.isin([1378774140726870528])
expected = Series(False)
tm.assert_series_equal(result, expected)
| TestIsin |
python | lxml__lxml | src/lxml/html/__init__.py | {
"start": 53056,
"end": 54291
} | class ____(SetMixin):
"""
Represents the values of the checked checkboxes in a group of
checkboxes with the same name.
"""
def __init__(self, group):
self.group = group
def __iter__(self):
return iter([
el.get('value')
for el in self.group
if 'checked' in el.attrib])
def add(self, value):
for el in self.group:
if el.get('value') == value:
el.set('checked', '')
break
else:
raise KeyError("No checkbox with value %r" % value)
def remove(self, value):
for el in self.group:
if el.get('value') == value:
if 'checked' in el.attrib:
del el.attrib['checked']
else:
raise KeyError(
"The checkbox with value %r was already unchecked" % value)
break
else:
raise KeyError(
"No checkbox with value %r" % value)
def __repr__(self):
return '<%s {%s} for checkboxes name=%r>' % (
self.__class__.__name__,
', '.join([repr(v) for v in self]),
self.group.name)
| CheckboxValues |
python | fluentpython__example-code-2e | 05-data-classes/dataclass/club_wrong.py | {
"start": 66,
"end": 140
} | class ____:
name: str
guests: list = []
# end::CLUBMEMBER[]
| ClubMember |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 377497,
"end": 378623
} | class ____(sgqlc.types.Input):
"""Autogenerated input type of UpdateTeamDiscussion"""
__schema__ = github_schema
__field_names__ = ("id", "title", "body", "body_version", "pinned", "client_mutation_id")
id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="id")
"""The Node ID of the discussion to modify."""
title = sgqlc.types.Field(String, graphql_name="title")
"""The updated title of the discussion."""
body = sgqlc.types.Field(String, graphql_name="body")
"""The updated text of the discussion."""
body_version = sgqlc.types.Field(String, graphql_name="bodyVersion")
"""The current version of the body content. If provided, this update
operation will be rejected if the given version does not match the
latest version on the server.
"""
pinned = sgqlc.types.Field(Boolean, graphql_name="pinned")
"""If provided, sets the pinned state of the updated discussion."""
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
| UpdateTeamDiscussionInput |
python | getsentry__sentry | tests/sentry/workflow_engine/migrations/test_0087_relink_crons_to_compatible_issue_workflows.py | {
"start": 318,
"end": 20412
} | class ____(TestMigrations):
migrate_from = "0086_fix_cron_to_cron_workflow_links"
migrate_to = "0087_relink_crons_to_compatible_issue_workflows"
app = "workflow_engine"
def _create_issue_rule_with_workflow(
self,
project,
condition_data,
action_data=None,
frequency=5,
):
"""Helper to create an issue rule with workflow."""
if action_data is None:
action_data = [
{
"id": "sentry.mail.actions.NotifyEmailAction",
"targetIdentifier": 12345,
"targetType": "Team",
}
]
rule = self.create_project_rule(
project=project,
action_data=action_data,
condition_data=condition_data,
frequency=frequency,
)
workflow = IssueAlertMigrator(rule).run()
Rule.objects.filter(id=rule.id).update(source=RuleSource.ISSUE)
return rule, workflow
def _create_cron_detector(self, org, project, name, owner_user=None, owner_team=None):
"""Helper to create a cron detector with data source and monitor."""
detector = self.create_detector(
project=project,
name=f"detector-{name}",
type="monitor_check_in_failure",
)
monitor = self.create_monitor(
organization=org,
project=project,
name=name,
)
if owner_user:
monitor.owner_user_id = owner_user.id
monitor.owner_team_id = None
elif owner_team:
monitor.owner_team_id = owner_team.id
monitor.owner_user_id = None
else:
monitor.owner_user_id = None
monitor.owner_team_id = None
monitor.save()
data_source = DataSource.objects.create(
organization_id=org.id,
type="cron_monitor",
source_id=str(monitor.id),
)
DataSourceDetector.objects.create(
data_source=data_source,
detector=detector,
)
return detector, monitor
def setup_initial_state(self) -> None:
self.org = self.create_organization(name="test-org")
self.project1 = self.create_project(organization=self.org)
self.project2 = self.create_project(organization=self.org)
self.user1 = self.create_user(email="user1@example.com")
self.user2 = self.create_user(email="user2@example.com")
self.team1 = self.create_team(organization=self.org)
self.team2 = self.create_team(organization=self.org)
self.compatible_rule1, self.compatible_workflow1 = self._create_issue_rule_with_workflow(
project=self.project1,
condition_data=[
{"id": "sentry.rules.conditions.first_seen_event.FirstSeenEventCondition"},
{"id": "sentry.rules.conditions.regression_event.RegressionEventCondition"},
],
)
# Another compatible workflow (duplicate conditions for dedup test)
self.compatible_rule2, self.compatible_workflow2 = self._create_issue_rule_with_workflow(
project=self.project1,
condition_data=[
{"id": "sentry.rules.conditions.first_seen_event.FirstSeenEventCondition"},
{"id": "sentry.rules.conditions.regression_event.RegressionEventCondition"},
],
)
# Compatible workflow with different conditions
self.compatible_rule3, self.compatible_workflow3 = self._create_issue_rule_with_workflow(
project=self.project1,
condition_data=[
{"id": "sentry.rules.conditions.every_event.EveryEventCondition"},
],
)
# Incompatible workflow with disallowed conditions
self.incompatible_rule1, self.incompatible_workflow1 = (
self._create_issue_rule_with_workflow(
project=self.project1,
condition_data=[
{
"id": "sentry.rules.conditions.event_frequency.EventFrequencyCondition",
"interval": "1m",
"value": 10,
},
{
"id": "sentry.rules.filters.tagged_event.TaggedEventFilter",
"key": "level",
"match": "eq",
"value": "error",
},
],
)
)
# Compatible workflow in project2
self.project2_rule1, self.project2_workflow1 = self._create_issue_rule_with_workflow(
project=self.project2,
condition_data=[
{"id": "sentry.rules.conditions.first_seen_event.FirstSeenEventCondition"},
{"id": "sentry.rules.conditions.regression_event.RegressionEventCondition"},
],
)
# Unassigned workflow in project2 for detector3 (no owner)
self.project2_unassigned_rule, self.project2_unassigned_workflow = (
self._create_issue_rule_with_workflow(
project=self.project2,
condition_data=[
{"id": "sentry.rules.conditions.first_seen_event.FirstSeenEventCondition"},
{
"id": "sentry.rules.filters.assigned_to.AssignedToFilter",
"targetType": "Unassigned",
},
],
)
)
# Compatible workflow with same conditions as workflow1/2 but different actions (should NOT dedupe)
self.compatible_rule4, self.compatible_workflow4 = self._create_issue_rule_with_workflow(
project=self.project1,
condition_data=[
{"id": "sentry.rules.conditions.first_seen_event.FirstSeenEventCondition"},
{"id": "sentry.rules.conditions.regression_event.RegressionEventCondition"},
],
action_data=[
{
"id": "sentry.mail.actions.NotifyEmailAction",
"targetIdentifier": 67890, # Different target
"targetType": "Member", # Different type
}
],
)
# Mixed compatible/incompatible conditions (should be incompatible overall)
self.mixed_rule1, self.mixed_workflow1 = self._create_issue_rule_with_workflow(
project=self.project1,
condition_data=[
{
"id": "sentry.rules.conditions.first_seen_event.FirstSeenEventCondition"
}, # Compatible
{
"id": "sentry.rules.conditions.event_frequency.EventFrequencyCondition", # Incompatible
"interval": "1m",
"value": 5,
},
],
)
# Compatible workflow with same conditions but different frequency (should NOT dedupe)
self.compatible_rule5, self.compatible_workflow5 = self._create_issue_rule_with_workflow(
project=self.project1,
condition_data=[
{"id": "sentry.rules.conditions.first_seen_event.FirstSeenEventCondition"},
{"id": "sentry.rules.conditions.regression_event.RegressionEventCondition"},
],
frequency=60,
)
# Test other allowed conditions
self.compatible_rule6, self.compatible_workflow6 = self._create_issue_rule_with_workflow(
project=self.project1,
condition_data=[
{"id": "sentry.rules.conditions.reappeared_event.ReappearedEventCondition"},
{
"id": "sentry.rules.filters.age_comparison.AgeComparisonFilter",
"comparison_type": "older",
"value": 30,
"time": "minute",
},
],
)
# Project3 with no cron detectors but has issue workflows
self.project3 = self.create_project(organization=self.org)
self.project3_rule1, self.project3_workflow1 = self._create_issue_rule_with_workflow(
project=self.project3,
condition_data=[
{"id": "sentry.rules.conditions.every_event.EveryEventCondition"},
],
)
# Workflows with assigned_to conditions
self.assigned_team1_rule, self.assigned_team1_workflow = (
self._create_issue_rule_with_workflow(
project=self.project1,
condition_data=[
{"id": "sentry.rules.conditions.first_seen_event.FirstSeenEventCondition"},
{
"id": "sentry.rules.filters.assigned_to.AssignedToFilter",
"targetType": "Team",
"targetIdentifier": self.team1.id,
},
],
)
)
self.assigned_team2_rule, self.assigned_team2_workflow = (
self._create_issue_rule_with_workflow(
project=self.project1,
condition_data=[
{"id": "sentry.rules.conditions.first_seen_event.FirstSeenEventCondition"},
{
"id": "sentry.rules.filters.assigned_to.AssignedToFilter",
"targetType": "Team",
"targetIdentifier": self.team2.id,
},
],
)
)
self.assigned_user1_rule, self.assigned_user1_workflow = (
self._create_issue_rule_with_workflow(
project=self.project1,
condition_data=[
{"id": "sentry.rules.conditions.first_seen_event.FirstSeenEventCondition"},
{
"id": "sentry.rules.filters.assigned_to.AssignedToFilter",
"targetType": "Member",
"targetIdentifier": self.user1.id,
},
],
)
)
self.assigned_unassigned_rule, self.assigned_unassigned_workflow = (
self._create_issue_rule_with_workflow(
project=self.project1,
condition_data=[
{"id": "sentry.rules.conditions.first_seen_event.FirstSeenEventCondition"},
{
"id": "sentry.rules.filters.assigned_to.AssignedToFilter",
"targetType": "Unassigned",
},
],
)
)
# Workflows with issue_category conditions
self.issue_category_cron_rule, self.issue_category_cron_workflow = (
self._create_issue_rule_with_workflow(
project=self.project1,
condition_data=[
{"id": "sentry.rules.conditions.first_seen_event.FirstSeenEventCondition"},
{
"id": "sentry.rules.filters.issue_category.IssueCategoryFilter",
"value": "4", # GroupCategory.CRON
},
],
)
)
self.issue_category_error_rule, self.issue_category_error_workflow = (
self._create_issue_rule_with_workflow(
project=self.project1,
condition_data=[
{"id": "sentry.rules.conditions.first_seen_event.FirstSeenEventCondition"},
{
"id": "sentry.rules.filters.issue_category.IssueCategoryFilter",
"value": "1", # GroupCategory.ERROR
},
],
)
)
self.issue_category_performance_rule, self.issue_category_performance_workflow = (
self._create_issue_rule_with_workflow(
project=self.project1,
condition_data=[
{"id": "sentry.rules.conditions.first_seen_event.FirstSeenEventCondition"},
{
"id": "sentry.rules.filters.issue_category.IssueCategoryFilter",
"value": "2", # GroupCategory.PERFORMANCE
},
],
)
)
self.cron_detector1, self.monitor1 = self._create_cron_detector(
self.org, self.project1, "cron1", owner_team=self.team1
)
self.cron_detector2, self.monitor2 = self._create_cron_detector(
self.org, self.project1, "cron2", owner_user=self.user1
)
self.cron_detector3, self.monitor3 = self._create_cron_detector(
self.org, self.project2, "cron3"
)
self.cron_detector4, self.monitor4 = self._create_cron_detector(
self.org, self.project1, "cron4", owner_team=self.team2
)
def test_migration(self) -> None:
# Should be linked to workflows with compatible conditions
# Including assigned_to:team1 but NOT assigned_to:team2 or assigned_to:user1
# Including issue_category:CRON but NOT issue_category:ERROR or issue_category:PERFORMANCE
detector1_workflows = DetectorWorkflow.objects.filter(detector=self.cron_detector1)
detector1_workflow_ids = set(detector1_workflows.values_list("workflow_id", flat=True))
assert len(detector1_workflow_ids) == 7, (
f"detector1 should have 7 workflows, "
f"got {len(detector1_workflow_ids)}: {detector1_workflow_ids}"
)
has_duplicate = (
self.compatible_workflow1.id in detector1_workflow_ids
or self.compatible_workflow2.id in detector1_workflow_ids
)
assert has_duplicate, (
f"detector1 should have one of workflow1/workflow2 (deduped), "
f"got {detector1_workflow_ids}"
)
assert self.compatible_workflow3.id in detector1_workflow_ids, (
f"detector1 should have compatible_workflow3 (different conditions), "
f"got {detector1_workflow_ids}"
)
assert self.compatible_workflow4.id in detector1_workflow_ids, (
f"detector1 should have compatible_workflow4 (different actions), "
f"got {detector1_workflow_ids}"
)
assert self.compatible_workflow5.id in detector1_workflow_ids, (
f"detector1 should have compatible_workflow5 (different frequency), "
f"got {detector1_workflow_ids}"
)
assert self.compatible_workflow6.id in detector1_workflow_ids, (
f"detector1 should have compatible_workflow6 (age_comparison), "
f"got {detector1_workflow_ids}"
)
assert self.incompatible_workflow1.id not in detector1_workflow_ids, (
f"detector1 should not have incompatible_workflow1, " f"got {detector1_workflow_ids}"
)
assert self.assigned_team1_workflow.id in detector1_workflow_ids, (
f"detector1 (team1 owner) should have assigned_team1_workflow, "
f"got {detector1_workflow_ids}"
)
assert self.assigned_team2_workflow.id not in detector1_workflow_ids, (
f"detector1 (team1 owner) should not have assigned_team2_workflow, "
f"got {detector1_workflow_ids}"
)
assert self.assigned_user1_workflow.id not in detector1_workflow_ids, (
f"detector1 (team1 owner) should not have assigned_user1_workflow, "
f"got {detector1_workflow_ids}"
)
assert self.assigned_unassigned_workflow.id not in detector1_workflow_ids, (
f"detector1 (team1 owner) should not have assigned_unassigned_workflow, "
f"got {detector1_workflow_ids}"
)
assert self.mixed_workflow1.id not in detector1_workflow_ids, (
f"detector1 should not have mixed_workflow1 (has incompatible conditions), "
f"got {detector1_workflow_ids}"
)
# Test issue_category conditions
assert self.issue_category_cron_workflow.id in detector1_workflow_ids, (
f"detector1 should have issue_category_cron_workflow (category=CRON), "
f"got {detector1_workflow_ids}"
)
assert self.issue_category_error_workflow.id not in detector1_workflow_ids, (
f"detector1 should not have issue_category_error_workflow (category=ERROR), "
f"got {detector1_workflow_ids}"
)
assert self.issue_category_performance_workflow.id not in detector1_workflow_ids, (
f"detector1 should not have issue_category_performance_workflow (category=PERFORMANCE), "
f"got {detector1_workflow_ids}"
)
detector2_workflows = DetectorWorkflow.objects.filter(detector=self.cron_detector2)
detector2_workflow_ids = set(detector2_workflows.values_list("workflow_id", flat=True))
assert self.assigned_user1_workflow.id in detector2_workflow_ids, (
f"detector2 (user1 owner) should have assigned_user1_workflow, "
f"got {detector2_workflow_ids}"
)
assert self.assigned_team1_workflow.id not in detector2_workflow_ids, (
f"detector2 (user1 owner) should not have assigned_team1_workflow, "
f"got {detector2_workflow_ids}"
)
assert self.assigned_unassigned_workflow.id not in detector2_workflow_ids, (
f"detector2 (user1 owner) should not have assigned_unassigned_workflow, "
f"got {detector2_workflow_ids}"
)
# Test issue_category conditions for detector2
assert self.issue_category_cron_workflow.id in detector2_workflow_ids, (
f"detector2 should have issue_category_cron_workflow (category=CRON), "
f"got {detector2_workflow_ids}"
)
assert self.issue_category_error_workflow.id not in detector2_workflow_ids, (
f"detector2 should not have issue_category_error_workflow (category=ERROR), "
f"got {detector2_workflow_ids}"
)
assert self.issue_category_performance_workflow.id not in detector2_workflow_ids, (
f"detector2 should not have issue_category_performance_workflow (category=PERFORMANCE), "
f"got {detector2_workflow_ids}"
)
detector3_workflows = DetectorWorkflow.objects.filter(detector=self.cron_detector3)
detector3_workflow_ids = set(detector3_workflows.values_list("workflow_id", flat=True))
expected_detector3_workflows = {
self.project2_workflow1.id,
self.project2_unassigned_workflow.id,
}
assert detector3_workflow_ids == expected_detector3_workflows, (
f"detector3 (no owner) should have project2 workflows including unassigned, "
f"expected {expected_detector3_workflows}, got {detector3_workflow_ids}"
)
assert (
self.compatible_workflow1.id not in detector3_workflow_ids
), "detector3 should not be linked to project1 workflows"
assert (
self.compatible_workflow2.id not in detector3_workflow_ids
), "detector3 should not be linked to project1 workflows"
detector4_workflows = DetectorWorkflow.objects.filter(detector=self.cron_detector4)
detector4_workflow_ids = set(detector4_workflows.values_list("workflow_id", flat=True))
assert self.assigned_team2_workflow.id in detector4_workflow_ids, (
f"detector4 (team2 owner) should have assigned_team2_workflow, "
f"got {detector4_workflow_ids}"
)
assert self.assigned_team1_workflow.id not in detector4_workflow_ids, (
f"detector4 (team2 owner) should not have assigned_team1_workflow, "
f"got {detector4_workflow_ids}"
)
assert self.assigned_unassigned_workflow.id not in detector4_workflow_ids, (
f"detector4 (team2 owner) should not have assigned_unassigned_workflow, "
f"got {detector4_workflow_ids}"
)
| RelinkCronsToCompatibleIssueWorkflowsTest |
python | facelessuser__pymdown-extensions | tests/test_extensions/test_magiclink.py | {
"start": 3848,
"end": 4940
} | class ____(util.MdCase):
"""Test cases for social link shortener."""
extension = [
'pymdownx.magiclink'
]
extension_configs = {
'pymdownx.magiclink': {
'social_url_shortener': True
}
}
def test_user(self):
"""Test user shortening."""
# Test #! original syntax
self.check_markdown(
r'https://x.com/someuser',
r'<p><a class="magiclink magiclink-x magiclink-mention" href="https://x.com/someuser" title="X User: someuser">@someuser</a></p>' # noqa: E501
)
def test_no_repo(self):
"""Test that repository shortening does not happen."""
self.check_markdown(
r'https://github.com/facelessuser',
r'<p><a href="https://github.com/facelessuser">https://github.com/facelessuser</a></p>'
)
def test_excluded(self):
"""Test excluded user."""
self.check_markdown(
r'https://x.com/home',
r'<p><a href="https://x.com/home">https://x.com/home</a></p>'
)
| TestMagicLinkShortnerSocial |
python | tiangolo__fastapi | tests/test_duplicate_models_openapi.py | {
"start": 120,
"end": 155
} | class ____(BaseModel):
pass
| Model |
python | django__django | tests/admin_views/tests.py | {
"start": 171209,
"end": 172265
} | class ____(TestCase):
"""
Test behavior of a view protected by the staff_member_required decorator.
"""
def test_secure_view_shows_login_if_not_logged_in(self):
secure_url = reverse("secure_view")
response = self.client.get(secure_url)
self.assertRedirects(
response, "%s?next=%s" % (reverse("admin:login"), secure_url)
)
response = self.client.get(secure_url, follow=True)
self.assertTemplateUsed(response, "admin/login.html")
self.assertEqual(response.context[REDIRECT_FIELD_NAME], secure_url)
def test_staff_member_required_decorator_works_with_argument(self):
"""
Staff_member_required decorator works with an argument
(redirect_field_name).
"""
secure_url = "/test_admin/admin/secure-view2/"
response = self.client.get(secure_url)
self.assertRedirects(
response, "%s?myfield=%s" % (reverse("admin:login"), secure_url)
)
@override_settings(ROOT_URLCONF="admin_views.urls")
| SecureViewTests |
python | walkccc__LeetCode | solutions/2819. Minimum Relative Loss After Buying Chocolates/2819.py | {
"start": 0,
"end": 2337
} | class ____:
def minimumRelativeLosses(
self,
prices: list[int],
queries: list[list[int]],
) -> list[int]:
ans = []
prices.sort()
prefix = list(itertools.accumulate(prices, initial=0))
for k, m in queries:
countFront = self._getCountFront(k, m, prices)
countBack = m - countFront
ans.append(self._getRelativeLoss(countFront, countBack, k, prefix))
return ans
def _getCountFront(
self,
k: int,
m: int,
prices: list[int],
) -> int:
"""Returns `countFront` for query (k, m).
Returns `countFront` for query (k, m) s.t. picking the first `countFront`
and the last `m - countFront` chocolates is optimal.
Define loss[i] := the relative loss of picking `prices[i]`.
1. For prices[i] <= k, Bob pays prices[i] while Alice pays 0.
Thus, loss[i] = prices[i] - 0 = prices[i].
2. For prices[i] > k, Bob pays k while Alice pays prices[i] - k.
Thus, loss[i] = k - (prices[i] - k) = 2 * k - prices[i].
By observation, we deduce that it is always better to pick from the front
or the back since loss[i] is increasing for 1. and is decreasing for 2.
Assume that picking `left` chocolates from the left and `right = m - left`
chocolates from the right is optimal. Therefore, we are selecting
chocolates from `prices[0..left - 1]` and `prices[n - right..n - 1]`.
To determine the optimal `left` in each iteration, we simply compare
`loss[left]` with `loss[n - right]` if `loss[left] < loss[n - right]`,
it's worth increasing `left`.
"""
n = len(prices)
countNoGreaterThanK = bisect.bisect_right(prices, k)
l = 0
r = min(countNoGreaterThanK, m)
while l < r:
mid = (l + r) // 2
right = m - mid
# Picking prices[mid] is better than picking prices[n - right].
if prices[mid] < 2 * k - prices[n - right]:
l = mid + 1
else:
r = mid
return l
def _getRelativeLoss(
self,
countFront: int,
countBack: int,
k: int,
prefix: list[int],
) -> int:
"""
Returns the relative loss of picking `countFront` and `countBack`
chocolates.
"""
lossFront = prefix[countFront]
lossBack = 2 * k * countBack - (prefix[-1] - prefix[-countBack - 1])
return lossFront + lossBack
| Solution |
python | ray-project__ray | release/ray_release/job_manager/kuberay_job_manager.py | {
"start": 605,
"end": 7838
} | class ____:
def __init__(self):
self.cluster_startup_timeout = 600
self.job_id = None
self._kuberay_service_token = None
def run_and_wait(
self,
job_name: str,
image: str,
cmd_to_run: str,
timeout: int,
env_vars: Dict[str, Any],
working_dir: Optional[str] = None,
pip: Optional[List[str]] = None,
compute_config: Optional[Dict[str, Any]] = None,
autoscaler_config: Optional[Dict[str, Any]] = None,
) -> Tuple[int, float]:
self.job_name = job_name
self._run_job(
job_name,
image,
cmd_to_run,
env_vars,
working_dir,
pip,
compute_config,
autoscaler_config,
)
return self._wait_job(timeout)
def _run_job(
self,
job_name: str,
image: str,
cmd_to_run: str,
env_vars: Dict[str, Any],
working_dir: Optional[str] = None,
pip: Optional[List[str]] = None,
compute_config: Optional[Dict[str, Any]] = None,
autoscaler_config: Optional[Dict[str, Any]] = None,
) -> None:
logger.info(f"Executing {cmd_to_run} with {env_vars} via RayJob CRD")
request = {
"namespace": DEFAULT_KUBERAY_NAMESPACE,
"name": job_name,
"entrypoint": cmd_to_run,
"ray_image": image,
"compute_config": compute_config,
"runtime_env": {
"env_vars": env_vars,
"pip": pip or [],
"working_dir": working_dir,
},
"autoscaler_config": autoscaler_config,
}
url = f"{KUBERAY_SERVER_URL}/api/v1/jobs"
token = self._get_kuberay_server_token()
if not token:
raise Exception("Failed to get KubeRay service token")
headers = {
"Authorization": "Bearer " + token,
"Content-Type": "application/json",
}
logger.info(f"Submitting KubeRay job request: {request}")
response = requests.post(url, json=request, headers=headers)
response.raise_for_status()
def _wait_job(self, timeout_sec: int = 7200) -> Tuple[int, float]:
"""
Wait for the job to start and enter a terminal state.
If the job does not start within the timeout, terminate it and raise an error.
If the job enters a terminal state, return the return code and the duration.
Args:
timeout: The timeout for the job to start and enter a terminal state.
Returns:
Tuple[int, float]: The return code and the duration.
"""
start_timestamp = time.time()
next_status_timestamp = start_timestamp + JOB_STATUS_CHECK_INTERVAL
deadline_timestamp = start_timestamp + self.cluster_startup_timeout
job_running = False
while True:
now = time.time()
if now >= deadline_timestamp:
self._terminate_job()
if not job_running:
raise JobStartupTimeout(
"Cluster did not start within "
f"{self.cluster_startup_timeout} seconds."
)
raise CommandTimeout(f"Job timed out after {timeout_sec} seconds")
if now >= next_status_timestamp:
if job_running:
logger.info(
f"... job still running ... ({int(now - start_timestamp)} seconds, {int(deadline_timestamp - now)} seconds to timeout)"
)
else:
logger.info(
f"... job not yet running ... ({int(now - start_timestamp)} seconds, {int(deadline_timestamp - now)} seconds to timeout)"
)
next_status_timestamp += JOB_STATUS_CHECK_INTERVAL
status = self._get_job_status()
logger.info(f"Current job status: {status}")
if not job_running and status in ["RUNNING", "ERRORED"]:
logger.info("Job started")
job_running = True
deadline_timestamp = now + timeout_sec
if status in ["SUCCEEDED", "FAILED", "ERRORED", "CANCELLED"]:
logger.info(f"Job entered terminal state {status}")
duration = time.time() - start_timestamp
retcode = job_status_to_return_code[status]
break
time.sleep(JOB_STATUS_CHECK_INTERVAL)
duration = time.time() - start_timestamp
return retcode, duration
def _get_job(self) -> Dict[str, Any]:
url = f"{KUBERAY_SERVER_URL}/api/v1/jobs?namespace={DEFAULT_KUBERAY_NAMESPACE}&names={self.job_name}"
token = self._get_kuberay_server_token()
if not token:
raise Exception("Failed to get KubeRay service token")
headers = {
"Authorization": "Bearer " + token,
}
response = requests.get(url, headers=headers)
response.raise_for_status()
response_json = response.json()
if "jobs" not in response_json or len(response_json["jobs"]) == 0:
raise Exception(f"No jobs found for {self.job_name}")
if len(response_json["jobs"]) > 1:
raise Exception(f"Multiple jobs found for {self.job_name}")
return response_json["jobs"][0]
def _get_job_id(self) -> str:
job = self._get_job()
if job.get("id"):
self.job_id = job["id"]
return self.job_id
else:
raise Exception(f"Job {self.job_name} does not have an ID")
def _get_job_status(self) -> str:
job = self._get_job()
return job["status"]
def _get_kuberay_server_token(self) -> Optional[str]:
# Use cached token if available
if self._kuberay_service_token:
return self._kuberay_service_token
session = boto3.session.Session()
client = session.client("secretsmanager", region_name="us-west-2")
try:
secret_response = client.get_secret_value(
SecretId=KUBERAY_SERVICE_SECRET_KEY_SECRET_NAME
)
kuberay_service_secret_key = secret_response["SecretString"]
except (boto3.exceptions.Boto3Error, botocore.exceptions.ClientError) as e:
logger.error(
f"Failed to get KubeRay service token from AWS Secrets Manager: {e}"
)
return None
except Exception as e:
logger.error(f"Failed to get KubeRay service token: {e}")
return None
login_url = f"{KUBERAY_SERVER_URL}/api/v1/login"
login_request = {"secret_key": kuberay_service_secret_key}
login_response = requests.post(login_url, json=login_request)
login_response.raise_for_status()
# Cache the token as instance variable
self._kuberay_service_token = login_response.json()["token"]
return self._kuberay_service_token
def fetch_results(self) -> None:
# TODO: implement this
pass
def _terminate_job(self) -> None:
# TODO: implement this
pass
| KubeRayJobManager |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 548031,
"end": 548443
} | class ____(sgqlc.types.Type):
"""An edge in a connection."""
__schema__ = github_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
"""A cursor for use in pagination."""
node = sgqlc.types.Field("CreatedIssueContribution", graphql_name="node")
"""The item at the end of the edge."""
| CreatedIssueContributionEdge |
python | pytorch__pytorch | test/dynamo/test_modes.py | {
"start": 3960,
"end": 24181
} | class ____(torch._dynamo.test_case.TestCase):
@classmethod
def setUpClass(cls):
cls.default_device_old = torch.get_default_device()
super().setUpClass()
@classmethod
def tearDownClass(cls):
torch.set_default_device(cls.default_device_old)
super().tearDownClass()
def setUp(self):
torch.set_default_device(None)
torch._dynamo.reset()
def tearDown(self):
torch.set_default_device(None)
torch._dynamo.reset()
def _run_torch_function_mode_guard_test(self):
class TestMode1(BaseTorchFunctionMode):
pass
class TestMode2(BaseTorchFunctionMode):
pass
cnt = torch._dynamo.testing.CompileCounter()
@torch.compile(backend=cnt.__call__)
def fn(x):
return x + 1
inp = torch.ones(2, 2)
fn(inp)
self.assertEqual(cnt.frame_count, 1)
with TestMode1():
fn(inp)
self.assertEqual(cnt.frame_count, 2)
with TestMode1(), TestMode2():
fn(inp)
self.assertEqual(cnt.frame_count, 3)
with TestMode2(), TestMode1():
fn(inp)
self.assertEqual(cnt.frame_count, 4)
with TestMode1():
fn(inp)
self.assertEqual(cnt.frame_count, 4)
@torch._dynamo.config.patch("enable_cpp_guard_manager", False)
def test_torch_function_mode_guards_py(self):
self._run_torch_function_mode_guard_test()
def test_torch_function_mode_guards_cpp(self):
self._run_torch_function_mode_guard_test()
@requires_gpu
def test_torch_function_mode_preserves_cuda_rng_state(self):
class ConstantReturnMode(TorchFunctionMode):
def __torch_function__(self, func, types, args=(), kwargs=None):
return -42
@torch._dynamo.optimize("eager")
def fn():
with ConstantReturnMode():
return 123
self.assertEqual(fn(), 123)
def test_stack_state_mutation_default_device(self):
m = BaseTorchFunctionMode()
m1 = BaseTorchFunctionMode()
with m, m1:
@torch.compile(fullgraph=True)
def fn(x):
torch.set_default_device("cpu")
_pop_torch_function_stack()
fn(torch.ones(2, 2))
_push_on_torch_function_stack(m1)
stack = _get_current_function_mode_stack()
self.assertIsInstance(stack[0], DeviceContext)
self.assertEqual(stack[0].device, torch.device("cpu"))
self.assertIs(stack[1], m)
self.assertIs(stack[2], m1)
def test_stack_state_clear_default_device(self):
@torch.compile(fullgraph=True)
def fn(x):
torch.set_default_device(None)
return x + 1
fn(torch.ones(2, 2))
stack = _get_current_function_mode_stack()
self.assertEqual(len(stack), 0)
m = BaseTorchFunctionMode()
m1 = BaseTorchFunctionMode()
# Stack populated, add device
with m, m1:
@torch.compile(fullgraph=True)
def fn(x):
torch.set_default_device("cpu")
torch.set_default_device(None)
torch.set_default_device("cpu")
return x + 1
fn(torch.ones(2, 2))
stack = _get_current_function_mode_stack()
self.assertEqual(stack[0].device, torch.device("cpu"))
self.assertIs(stack[1], m)
self.assertIs(stack[2], m1)
# Stack populated, remove device
torch.set_default_device("cpu")
with m, m1:
@torch.compile(fullgraph=True)
def fn(x):
torch.set_default_device(None)
return x + 1
fn(torch.ones(2, 2))
stack = _get_current_function_mode_stack()
self.assertIs(stack[0], m)
self.assertIs(stack[1], m1)
@torch.compile(fullgraph=True)
def fn(x):
torch.set_default_device("cpu")
torch.set_default_device("cpu")
return x + 1
fn(torch.ones(2, 2))
stack = _get_current_function_mode_stack()
self.assertEqual(stack[0].device, torch.device("cpu"))
torch.set_default_device(None)
def test_pop_torch_function_mode(self):
m = BaseTorchFunctionMode()
with m:
@torch.compile(fullgraph=True)
def fn(x):
_pop_torch_function_stack()
return x + 1
fn(torch.ones(2, 2))
self.assertEqual(_len_torch_function_stack(), 0)
# reset stack so __exit__ doesn't crash
_push_on_torch_function_stack(m)
self.assertEqual(_len_torch_function_stack(), 0)
def test_is_torch_function_all_disabled(self):
@torch.compile(fullgraph=True)
def fn(x):
return (
torch._C._is_torch_function_all_disabled(),
torch.add(x, 1.0),
)
input = torch.ones(2, 2)
res, _ = fn(input)
self.assertFalse(res)
def test_error_empty_stack_pop_torch_function_mode(self):
@torch.compile(fullgraph=True)
def fn(x):
_pop_torch_function_stack()
return x + 1
self.assertRaisesRegex(
torch._dynamo.exc.Unsupported,
"Attempted to pop from empty torch function mode stack",
lambda: fn(torch.ones(2, 2)),
)
def test_push_torch_function_mode(self):
m = BaseTorchFunctionMode()
with m:
@torch.compile(fullgraph=True)
def fn(x, m):
_push_on_torch_function_stack(m)
return x + 1
fn(torch.ones(2, 2), m)
self.assertEqual(_len_torch_function_stack(), 2)
# reset stack state
_pop_torch_function_stack()
self.assertEqual(_len_torch_function_stack(), 0)
def test_len_torch_function_mode(self):
m = BaseTorchFunctionMode()
with m:
@torch.compile(fullgraph=True)
def fn(x):
z = _len_torch_function_stack()
return x + z
res = fn(torch.ones(2, 2))
self.assertEqual(res, torch.ones(2, 2) + 1)
self.assertEqual(_len_torch_function_stack(), 1)
def test_intermedate_torch_function_mode_construction_mutation(self):
class TestMode(BaseTorchFunctionMode):
def __init__(self, x):
self.x = x
@torch.compile(fullgraph=True)
def fn(x):
z = TestMode(2)
z.y = 2
return x + 1, z
fn(torch.ones(2, 2))
def test_torch_function_mode_enabled_guard(self):
cnt = torch._dynamo.testing.CompileCounter()
inp = torch.ones(2, 2)
@torch.compile(backend=cnt.__call__)
def fn(x):
return x + 1
with BaseTorchFunctionMode(), torch._C.DisableTorchFunctionSubclass():
with torch._C.DisableTorchFunction():
fn(inp)
fn(inp)
self.assertEqual(cnt.frame_count, 2)
def test_nested_torch_function_mode(self):
mode_1_called = False
mode_2_called = False
def reset_state():
nonlocal mode_1_called
nonlocal mode_2_called
mode_1_called = False
mode_2_called = False
ones = torch.ones(2, 2)
zeros = torch.zeros(2, 2)
class TestMode1(BaseTorchFunctionMode):
def __torch_function__(self, func, types, args, kwargs=None):
if not kwargs:
kwargs = {}
nonlocal mode_1_called
mode_1_called = True
if func == torch.add:
return zeros
return super().__torch_function__(func, types, args, kwargs)
class TestMode2(BaseTorchFunctionMode):
def __torch_function__(self, func, types, args, kwargs=None):
if not kwargs:
kwargs = {}
nonlocal mode_2_called
mode_2_called = True
if func == torch.mul:
return ones
return super().__torch_function__(func, types, args, kwargs)
def fn(x):
return torch.add(x, 3)
def fn_2(x):
return torch.mul(x, 3) + torch.add(x, 3)
inp = torch.ones(2, 2) + 1
for fn_i in [fn, fn_2]:
fn_opt = torch.compile(fn_i, fullgraph=True)
with TestMode1(), TestMode2():
expected = fn_i(inp), mode_1_called, mode_2_called
reset_state()
actual = fn_opt(inp), mode_1_called, mode_2_called
reset_state()
self.assertEqual(expected, actual)
def test_torch_function_mode_disable(self):
class TestSubclass(torch.Tensor):
@classmethod
def __torch_function__(cls, func, types, args, kwargs=None):
if not kwargs:
kwargs = {}
if func == torch.add:
return torch.ones(2, 2)
return super().__torch_function__(func, types, args, kwargs)
class TestMode(BaseTorchFunctionMode):
def __torch_function__(self, func, types, args, kwargs=None):
if not kwargs:
kwargs = {}
if func == torch.add:
return torch.zeros(2, 2)
return super().__torch_function__(func, types, args, kwargs)
def fn(x):
return torch.add(x, 3)
inp = (torch.ones(2, 2) + 1).as_subclass(TestSubclass)
fn_opt = torch.compile(fn, fullgraph=True)
with TestMode():
with torch._C.DisableTorchFunctionSubclass():
expected = fn(inp)
actual = fn_opt(inp)
self.assertEqual(expected, actual)
with torch._C.DisableTorchFunction():
expected = fn(inp)
actual = fn_opt(inp)
self.assertEqual(expected, actual)
def test_torch_function_mode_highest_priority(self):
class TestSubclass(torch.Tensor):
@classmethod
def __torch_function__(cls, func, types, args, kwargs=None):
if not kwargs:
kwargs = {}
if func == torch.add:
return torch.ones(2, 2)
return super().__torch_function__(func, types, args, kwargs)
def fn(x):
return torch.add(x, 3)
inp = (torch.ones(2, 2) + 1).as_subclass(TestSubclass)
fn_opt = torch.compile(fn, fullgraph=True)
with TestMode():
expected = fn(inp)
actual = fn_opt(inp)
self.assertEqual(expected, actual)
def test_torch_function_mode_enter_exit(self):
def fn(x, y):
with TestMode():
o = torch.add(x, 3)
return torch.add(o, y)
inp = (torch.ones(2, 2) + 1, torch.ones(2, 2) + 2)
fn_opt = torch.compile(fn, fullgraph=True)
expected = fn(*inp)
actual = fn_opt(*inp)
self.assertEqual(expected, actual)
def test_torch_function_mode_graph_break(self):
def fn(x, y):
with TestMode():
torch._dynamo.graph_break()
o = torch.add(x, 3)
return torch.add(o, y)
inp = (torch.ones(2, 2) + 1, torch.ones(2, 2) + 2)
fn_opt = torch.compile(fn)
expected = fn(*inp)
actual = fn_opt(*inp)
self.assertEqual(expected, actual)
def test_torch_function_mode_and_pop_graph_break(self):
def fn(x, y):
with TestMode():
z = _pop_torch_function_stack()
torch._dynamo.graph_break()
_push_on_torch_function_stack(z)
o = torch.add(x, 3)
return torch.add(o, y)
inp = (torch.ones(2, 2) + 1, torch.ones(2, 2) + 2)
fn_opt = torch.compile(fn)
expected = fn(*inp)
actual = fn_opt(*inp)
self.assertEqual(expected, actual)
def test_torch_function_mode_restore_on_exc(self):
@torch._dynamo.disable()
def err():
raise RuntimeError("test")
@torch.compile()
def fn(x):
with TestMode():
x += 1
err()
x += 2
return x
try:
fn(torch.ones(2, 2))
except RuntimeError:
pass
self.assertEqual(_len_torch_function_stack(), 0)
def test_torch_function_mode_and_pop_graph_break_mutation(self):
def fn(x, y):
with TestMode():
z = _pop_torch_function_stack()
z.y = 5
torch._dynamo.graph_break()
_push_on_torch_function_stack(z)
o = torch.add(x, 3)
o = torch.mul(o, z.y)
return torch.add(o, y)
inp = (torch.ones(2, 2) + 1, torch.ones(2, 2) + 2)
fn_opt = torch.compile(fn)
expected = fn(*inp)
actual = fn_opt(*inp)
self.assertEqual(expected, actual)
# Needs larger cache size since we recompile for each op
@patch.object(torch._dynamo.config, "recompile_limit", 48)
def test_builtin_equivalent_funcs(self):
from torch._dynamo.variables.builtin import (
BUILTIN_TO_TENSOR_FN_MAP,
BUILTIN_TO_TENSOR_RFN_MAP,
)
from torch._dynamo.variables.torch_function import (
bin_int_ops,
bin_ops,
tensor_and_int_ops,
un_int_ops,
un_ops,
)
expected_func = None
valid = False
class FuncEquivMode(BaseTorchFunctionMode):
def __torch_function__(self, func, types, args=(), kwargs=None):
nonlocal expected_func
nonlocal valid
if not kwargs:
kwargs = {}
if torch._dynamo.is_compiling():
valid = expected_func == func
return super().__torch_function__(func, types, args, kwargs)
inp0 = torch.ones(1, 1)
inp1 = torch.ones(1, 1)
inp0_int = torch.ones(1, 1, dtype=torch.int32)
inp1_int = torch.ones(1, 1, dtype=torch.int32)
@torch.compile(fullgraph=True)
def fn_un(op, inp):
return op(inp)
@torch.compile(fullgraph=True)
def fn_un_int(op, inp):
return op(inp)
@torch.compile(fullgraph=True)
def fn_bin(op, inp0, inp1):
return op(inp0, inp1)
@torch.compile(fullgraph=True)
def fn_bin_int(op, inp0, inp1):
return op(inp0, inp1)
@torch.compile(fullgraph=True)
def fn_tensor_and_int(op, inp0, inp1):
return op(inp0, inp1)
setups_and_oplists = [
(lambda o: fn_un(o, inp0), un_ops),
(lambda o: fn_un_int(o, inp0_int), un_int_ops),
(lambda o: fn_bin(o, inp0, inp1), bin_ops),
(lambda o: fn_bin_int(o, inp0_int, inp1_int), bin_int_ops),
(lambda o: fn_tensor_and_int(o, inp0_int, 0), tensor_and_int_ops),
]
# gather the reverse functions
rsetups_and_oplists = [
(
lambda o: fn_bin(o, 1, inp1),
bin_ops,
), # Get r* ops, (ex. __sub__(int, Tensor) -> __rsub__(Tensor, int))
(lambda o: fn_bin_int(o, 1, inp1_int), bin_int_ops),
(lambda o: fn_tensor_and_int(o, 0, inp0_int), tensor_and_int_ops),
]
skips = {operator.not_} # Has local scalar dense call which graph breaks
rskips = {
operator.matmul,
operator.imatmul,
operator.getitem,
} # Doesn't type check with reversed args
def run_checks(setups_and_oplists, skips, ref_map):
nonlocal valid
nonlocal expected_func
for setup_fn, op_list in setups_and_oplists:
for op in op_list:
if op in skips or op not in ref_map:
continue
with FuncEquivMode():
expected_func = ref_map[op]
setup_fn(op)
self.assertTrue(valid)
expected_func = None
valid = False
run_checks(setups_and_oplists, skips, BUILTIN_TO_TENSOR_FN_MAP)
run_checks(rsetups_and_oplists, rskips, BUILTIN_TO_TENSOR_RFN_MAP)
def test_expand(self):
from torch.distributions import (
AffineTransform,
ComposeTransform,
Normal,
TanhTransform,
TransformedDistribution,
)
# https://github.com/pytorch/pytorch/issues/141232
with torch.device("cpu"):
@torch.compile(fullgraph=True)
def func(a):
d = TransformedDistribution(
Normal(a, 1),
ComposeTransform([TanhTransform(), AffineTransform(2, 2)]),
)
b = d.log_prob(d.rsample((10,)))
return b
func(torch.randn(3))
@requires_gpu
def test_flex_attention(self):
import torch
from torch.nn.attention.flex_attention import create_block_mask, flex_attention
torch.set_default_device(device_type)
flex_attention = torch.compile(flex_attention, dynamic=False)
prefix_lengths = torch.arange(8)
def prefix_lm(b, h, q, kv):
return prefix_lengths[b] >= kv
# This runs in fullgraph already
create_block_mask(
prefix_lm, 8, None, 512, 512, _compile=True, device=device_type
)
def test_register_hook(self):
import functools
def my_hook(grad, *, k=0):
return grad + k
hook = functools.partial(my_hook, k=3)
class MyMod(torch.nn.Module):
def forward(self, x):
x.register_hook(hook)
y = x.mul(2)
z = y.mul(3)
return (z,)
mod = MyMod()
x = torch.ones(4, requires_grad=True)
with torch.device("cpu"):
torch.compile(mod, fullgraph=True)(x)
@requires_gpu
@skipIfXpu(msg="XPU does not support flex attention")
def test_hop(self):
import torch
import torch._higher_order_ops
from torch.nn.attention.flex_attention import (
flex_attention as flex_attention_eager,
)
with torch.device(GPU_TYPE):
flex_attention = torch.compile(flex_attention_eager, dynamic=False)
with self.assertRaisesRegex(
torch._dynamo.exc.Unsupported,
"raised exception HopDetectionError([ConstantVariable(str: 'test')])",
):
# This runs in fullgraph already
with TestModeRaises():
flex_attention(
torch.ones(2, 2, 2, 2),
torch.ones(2, 2, 2, 2),
torch.ones(2, 2, 2, 2),
)
@requires_gpu
@skipIfXpu(msg="XPU does not support flex attention")
def test_hop_eager(self):
import torch
import torch._higher_order_ops
from torch.nn.attention.flex_attention import (
flex_attention as flex_attention_eager,
)
with torch.device(GPU_TYPE):
with self.assertRaisesRegex(
torch._dynamo.exc.Unsupported,
"raised exception HopDetectionError([ConstantVariable(str: 'test')])",
):
with TestModeRaises():
flex_attention_eager(
torch.ones(2, 2, 2, 2),
torch.ones(2, 2, 2, 2),
torch.ones(2, 2, 2, 2),
)
if __name__ == "__main__":
from torch._dynamo.test_case import run_tests
run_tests()
| TorchFunctionModeTests |
python | django__django | django/db/models/aggregates.py | {
"start": 666,
"end": 1358
} | class ____(Func):
arity = 1
template = " FILTER (WHERE %(expressions)s)"
def as_sql(self, compiler, connection, **extra_context):
if not connection.features.supports_aggregate_filter_clause:
raise NotSupportedError(
"Aggregate filter clauses are not supported on this database backend."
)
try:
return super().as_sql(compiler, connection, **extra_context)
except FullResultSet:
return "", ()
@property
def condition(self):
return self.source_expressions[0]
def __str__(self):
return self.arg_joiner.join(str(arg) for arg in self.source_expressions)
| AggregateFilter |
python | realpython__materials | arcade-platformer/arcade_platformer/arcade_platformer.py | {
"start": 7462,
"end": 9435
} | class ____(arcade.View):
"""Shown when a level is completed"""
def __init__(
self, game_view: arcade.View, victory_sound: arcade.Sound
) -> None:
super().__init__()
# Store a reference to the underlying view
self.game_view = game_view
# Play the victory sound
arcade.play_sound(victory_sound)
# Store a semi-transparent color to use as an overlay
self.fill_color = arcade.make_transparent_color(
arcade.color.WHITE, transparency=150
)
def on_draw(self) -> None:
"""Draw the underlying screen, blurred, then the victory text"""
# First, draw the underlying view
# This also calls start_render(), so no need to do it again
self.game_view.on_draw()
# Now create a filled rect that covers the current viewport
# We get the viewport size from the game view
arcade.draw_lrtb_rectangle_filled(
left=self.game_view.view_left,
right=self.game_view.view_left + game.SCREEN_WIDTH,
top=self.game_view.view_bottom + game.SCREEN_HEIGHT,
bottom=self.game_view.view_bottom,
color=self.fill_color,
)
# Now show the victory text
arcade.draw_text(
"SUCCESS! Press Enter for next level...",
start_x=self.game_view.view_left + 90,
start_y=self.game_view.view_bottom + 300,
color=arcade.color.INDIGO,
font_size=40,
)
def on_key_press(self, key: int, modifiers: int) -> None:
"""Start the next level when the user presses Enter
Arguments:
key -- Which key was pressed
modifiers -- What modifiers were active
"""
if key == arcade.key.ENTER:
self.game_view.level += 1
self.game_view.setup()
self.window.show_view(self.game_view)
# Game Over View, shown when the game is over
| VictoryView |
python | milvus-io__pymilvus | pymilvus/client/types.py | {
"start": 24467,
"end": 25990
} | class ____:
def __init__(self, resource_group: Any) -> None:
self._name = resource_group.name
self._capacity = resource_group.capacity
self._num_available_node = resource_group.num_available_node
self._num_loaded_replica = resource_group.num_loaded_replica
self._num_outgoing_node = resource_group.num_outgoing_node
self._num_incoming_node = resource_group.num_incoming_node
self._config = resource_group.config
self._nodes = [NodeInfo(node) for node in resource_group.nodes]
def __repr__(self) -> str:
return f"""ResourceGroupInfo:
<name:{self.name}>,
<capacity:{self.capacity}>,
<num_available_node:{self.num_available_node}>,
<num_loaded_replica:{self.num_loaded_replica}>,
<num_outgoing_node:{self.num_outgoing_node}>,
<num_incoming_node:{self.num_incoming_node}>,
<config:{self.config}>,
<nodes:{self.nodes}>"""
@property
def name(self):
return self._name
@property
def capacity(self):
return self._capacity
@property
def num_available_node(self):
return self._num_available_node
@property
def num_loaded_replica(self):
return self._num_loaded_replica
@property
def num_outgoing_node(self):
return self._num_outgoing_node
@property
def num_incoming_node(self):
return self._num_incoming_node
@property
def config(self):
return self._config
@property
def nodes(self):
return self._nodes
| ResourceGroupInfo |
python | tensorflow__tensorflow | tensorflow/python/ops/image_ops_test.py | {
"start": 246438,
"end": 265482
} | class ____(test_util.TensorFlowTestCase, parameterized.TestCase):
_FORWARD_COMPATIBILITY_HORIZONS = [
(2020, 1, 1),
(2020, 7, 14),
(2525, 1, 1), # future behavior
]
def testBmpChannels(self):
for horizon in self._FORWARD_COMPATIBILITY_HORIZONS:
with compat.forward_compatibility_horizon(*horizon):
with test_util.use_gpu():
base = "tensorflow/core/lib/bmp/testdata"
# `rgba_transparent.bmp` has 4 channels with transparent pixels.
# Test consistency between `decode_image` and `decode_bmp` functions.
bmp0 = io_ops.read_file(os.path.join(base, "rgba_small.bmp"))
image0 = image_ops.decode_image(bmp0, channels=4)
image1 = image_ops.decode_bmp(bmp0, channels=4)
image0, image1 = self.evaluate([image0, image1])
self.assertAllEqual(image0, image1)
# Test that 3 channels is returned with user request of `channels=3`
# even though image has 4 channels.
# Note that this operation simply drops 4th channel information. This
# is the same behavior as `decode_png`.
# e.g. pixel values [25, 25, 25, 100] becomes [25, 25, 25].
bmp1 = io_ops.read_file(os.path.join(base, "rgb_small.bmp"))
image2 = image_ops.decode_bmp(bmp0, channels=3)
image3 = image_ops.decode_bmp(bmp1)
image2, image3 = self.evaluate([image2, image3])
self.assertAllEqual(image2, image3)
# Test that 4 channels is returned with user request of `channels=4`
# even though image has 3 channels. Alpha channel should be set to
# UINT8_MAX.
bmp3 = io_ops.read_file(os.path.join(base, "rgb_small_255.bmp"))
bmp4 = io_ops.read_file(os.path.join(base, "rgba_small_255.bmp"))
image4 = image_ops.decode_bmp(bmp3, channels=4)
image5 = image_ops.decode_bmp(bmp4)
image4, image5 = self.evaluate([image4, image5])
self.assertAllEqual(image4, image5)
# Test that 3 channels is returned with user request of `channels=3`
# even though image has 1 channel (grayscale).
bmp6 = io_ops.read_file(os.path.join(base, "grayscale_small.bmp"))
bmp7 = io_ops.read_file(
os.path.join(base, "grayscale_small_3channels.bmp"))
image6 = image_ops.decode_bmp(bmp6, channels=3)
image7 = image_ops.decode_bmp(bmp7)
image6, image7 = self.evaluate([image6, image7])
self.assertAllEqual(image6, image7)
# Test that 4 channels is returned with user request of `channels=4`
# even though image has 1 channel (grayscale). Alpha channel should be
# set to UINT8_MAX.
bmp9 = io_ops.read_file(
os.path.join(base, "grayscale_small_4channels.bmp"))
image8 = image_ops.decode_bmp(bmp6, channels=4)
image9 = image_ops.decode_bmp(bmp9)
image8, image9 = self.evaluate([image8, image9])
self.assertAllEqual(image8, image9)
def testJpegUint16(self):
for horizon in self._FORWARD_COMPATIBILITY_HORIZONS:
with compat.forward_compatibility_horizon(*horizon):
with self.cached_session():
base = "tensorflow/core/lib/jpeg/testdata"
jpeg0 = io_ops.read_file(os.path.join(base, "jpeg_merge_test1.jpg"))
image0 = image_ops.decode_image(jpeg0, dtype=dtypes.uint16)
image1 = image_ops.convert_image_dtype(image_ops.decode_jpeg(jpeg0),
dtypes.uint16)
image0, image1 = self.evaluate([image0, image1])
self.assertAllEqual(image0, image1)
def testPngUint16(self):
for horizon in self._FORWARD_COMPATIBILITY_HORIZONS:
with compat.forward_compatibility_horizon(*horizon):
with self.cached_session():
base = "tensorflow/core/lib/png/testdata"
png0 = io_ops.read_file(os.path.join(base, "lena_rgba.png"))
image0 = image_ops.decode_image(png0, dtype=dtypes.uint16)
image1 = image_ops.convert_image_dtype(
image_ops.decode_png(png0, dtype=dtypes.uint16), dtypes.uint16)
image0, image1 = self.evaluate([image0, image1])
self.assertAllEqual(image0, image1)
# NumPy conversions should happen before
x = np.random.randint(256, size=(4, 4, 3), dtype=np.uint16)
x_str = image_ops_impl.encode_png(x)
x_dec = image_ops_impl.decode_image(
x_str, channels=3, dtype=dtypes.uint16)
self.assertAllEqual(x, x_dec)
def testGifUint16(self):
for horizon in self._FORWARD_COMPATIBILITY_HORIZONS:
with compat.forward_compatibility_horizon(*horizon):
with self.cached_session():
base = "tensorflow/core/lib/gif/testdata"
gif0 = io_ops.read_file(os.path.join(base, "scan.gif"))
image0 = image_ops.decode_image(gif0, dtype=dtypes.uint16)
image1 = image_ops.convert_image_dtype(image_ops.decode_gif(gif0),
dtypes.uint16)
image0, image1 = self.evaluate([image0, image1])
self.assertAllEqual(image0, image1)
def testBmpUint16(self):
for horizon in self._FORWARD_COMPATIBILITY_HORIZONS:
with compat.forward_compatibility_horizon(*horizon):
with self.cached_session():
base = "tensorflow/core/lib/bmp/testdata"
bmp0 = io_ops.read_file(os.path.join(base, "lena.bmp"))
image0 = image_ops.decode_image(bmp0, dtype=dtypes.uint16)
image1 = image_ops.convert_image_dtype(image_ops.decode_bmp(bmp0),
dtypes.uint16)
image0, image1 = self.evaluate([image0, image1])
self.assertAllEqual(image0, image1)
def testJpegFloat32(self):
for horizon in self._FORWARD_COMPATIBILITY_HORIZONS:
with compat.forward_compatibility_horizon(*horizon):
with self.cached_session():
base = "tensorflow/core/lib/jpeg/testdata"
jpeg0 = io_ops.read_file(os.path.join(base, "jpeg_merge_test1.jpg"))
image0 = image_ops.decode_image(jpeg0, dtype=dtypes.float32)
image1 = image_ops.convert_image_dtype(image_ops.decode_jpeg(jpeg0),
dtypes.float32)
image0, image1 = self.evaluate([image0, image1])
self.assertAllEqual(image0, image1)
def testPngFloat32(self):
for horizon in self._FORWARD_COMPATIBILITY_HORIZONS:
with compat.forward_compatibility_horizon(*horizon):
with self.cached_session():
base = "tensorflow/core/lib/png/testdata"
png0 = io_ops.read_file(os.path.join(base, "lena_rgba.png"))
image0 = image_ops.decode_image(png0, dtype=dtypes.float32)
image1 = image_ops.convert_image_dtype(
image_ops.decode_png(png0, dtype=dtypes.uint16), dtypes.float32)
image0, image1 = self.evaluate([image0, image1])
self.assertAllEqual(image0, image1)
def testGifFloat32(self):
for horizon in self._FORWARD_COMPATIBILITY_HORIZONS:
with compat.forward_compatibility_horizon(*horizon):
with self.cached_session():
base = "tensorflow/core/lib/gif/testdata"
gif0 = io_ops.read_file(os.path.join(base, "scan.gif"))
image0 = image_ops.decode_image(gif0, dtype=dtypes.float32)
image1 = image_ops.convert_image_dtype(image_ops.decode_gif(gif0),
dtypes.float32)
image0, image1 = self.evaluate([image0, image1])
self.assertAllEqual(image0, image1)
def testBmpFloat32(self):
for horizon in self._FORWARD_COMPATIBILITY_HORIZONS:
with compat.forward_compatibility_horizon(*horizon):
with self.cached_session():
base = "tensorflow/core/lib/bmp/testdata"
bmp0 = io_ops.read_file(os.path.join(base, "lena.bmp"))
image0 = image_ops.decode_image(bmp0, dtype=dtypes.float32)
image1 = image_ops.convert_image_dtype(image_ops.decode_bmp(bmp0),
dtypes.float32)
image0, image1 = self.evaluate([image0, image1])
self.assertAllEqual(image0, image1)
def testExpandAnimations(self):
for horizon in self._FORWARD_COMPATIBILITY_HORIZONS:
with compat.forward_compatibility_horizon(*horizon):
with self.cached_session():
base = "tensorflow/core/lib/gif/testdata"
gif0 = io_ops.read_file(os.path.join(base, "scan.gif"))
# Test `expand_animations=False` case.
image0 = image_ops.decode_image(
gif0, dtype=dtypes.float32, expand_animations=False)
# image_ops.decode_png() handles GIFs and returns 3D tensors
animation = image_ops.decode_gif(gif0)
first_frame = array_ops.gather(animation, 0)
image1 = image_ops.convert_image_dtype(first_frame, dtypes.float32)
image0, image1 = self.evaluate([image0, image1])
self.assertLen(image0.shape, 3)
self.assertAllEqual(list(image0.shape), [40, 20, 3])
self.assertAllEqual(image0, image1)
# Test `expand_animations=True` case.
image2 = image_ops.decode_image(gif0, dtype=dtypes.float32)
image3 = image_ops.convert_image_dtype(animation, dtypes.float32)
image2, image3 = self.evaluate([image2, image3])
self.assertLen(image2.shape, 4)
self.assertAllEqual(list(image2.shape), [12, 40, 20, 3])
self.assertAllEqual(image2, image3)
def testImageCropAndResize(self):
if test_util.is_gpu_available():
op = image_ops_impl.crop_and_resize_v2(
image=array_ops.zeros((2, 1, 1, 1)),
boxes=[[1.0e+40, 0, 0, 0]],
box_indices=[1],
crop_size=[1, 1])
self.evaluate(op)
else:
message = "Boxes contains at least one element that is not finite"
with self.assertRaisesRegex((errors.InvalidArgumentError, ValueError),
message):
op = image_ops_impl.crop_and_resize_v2(
image=array_ops.zeros((2, 1, 1, 1)),
boxes=[[1.0e+40, 0, 0, 0]],
box_indices=[1],
crop_size=[1, 1])
self.evaluate(op)
def testImageCropAndResizeWithInvalidInput(self):
with self.session():
with self.assertRaises((errors.InvalidArgumentError, ValueError)):
op = image_ops_impl.crop_and_resize_v2(
image=np.ones((1, 1, 1, 1)),
boxes=np.ones((11, 4)),
box_indices=np.ones((11)),
crop_size=[2065374891, 1145309325])
self.evaluate(op)
def testImageCropAndResizeWithNon1DBoxes(self):
with self.assertRaisesRegex((errors.InvalidArgumentError, ValueError),
"must be rank 1"):
op = image_ops_impl.crop_and_resize_v2(
image=np.ones((2, 2, 2, 2)),
boxes=np.ones((0, 4)),
box_indices=np.ones((0, 1)),
crop_size=[1, 1])
self.evaluate(op)
@parameterized.named_parameters(
("_jpeg", "JPEG", "jpeg_merge_test1.jpg"),
("_png", "PNG", "lena_rgba.png"),
("_gif", "GIF", "scan.gif"),
)
def testWrongOpBmp(self, img_format, filename):
base_folder = "tensorflow/core/lib"
base_path = os.path.join(base_folder, img_format.lower(), "testdata")
err_msg = "Trying to decode " + img_format + " format using DecodeBmp op"
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError), err_msg):
img_bytes = io_ops.read_file(os.path.join(base_path, filename))
img = image_ops.decode_bmp(img_bytes)
self.evaluate(img)
@parameterized.named_parameters(
("_jpeg", image_ops.decode_jpeg, "DecodeJpeg"),
("_png", image_ops.decode_png, "DecodePng"),
("_gif", image_ops.decode_gif, "DecodeGif"),
)
def testWrongOp(self, decode_op, op_used):
base = "tensorflow/core/lib/bmp/testdata"
bmp0 = io_ops.read_file(os.path.join(base, "rgba_small.bmp"))
err_msg = ("Trying to decode BMP format using a wrong op. Use `decode_bmp` "
"or `decode_image` instead. Op used: ") + op_used
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError), err_msg):
img = decode_op(bmp0)
self.evaluate(img)
@parameterized.named_parameters(
("_png", "PNG", "lena_rgba.png"),
("_gif", "GIF", "scan.gif"),
("_bmp", "BMP", "rgba_small.bmp"),
)
def testWrongOpJpeg(self, img_format, filename):
base_folder = "tensorflow/core/lib"
base_path = os.path.join(base_folder, img_format.lower(), "testdata")
err_msg = ("DecodeAndCropJpeg operation can run on JPEG only, but "
"detected ") + img_format
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError), err_msg):
img_bytes = io_ops.read_file(os.path.join(base_path, filename))
img = image_ops.decode_and_crop_jpeg(img_bytes, [1, 1, 2, 2])
self.evaluate(img)
def testGifFramesWithDiffSize(self):
"""Test decoding an animated GIF.
This test verifies that `decode_image` op can decode animated GIFs whose
first frame does not fill the canvas. The unoccupied areas should be filled
with zeros (black).
`squares.gif` is animated with two images of different sizes. It
alternates between a smaller image of size 10 x 10 and a larger image of
size 16 x 16. Because it starts animating with the smaller image, the first
frame does not fill the canvas. (Canvas size is equal to max frame width x
max frame height.)
`red_black.gif` has just a single image in a GIF format. It is the same
image as the smaller image (size 10 x 10) of the two images in
`squares.gif`. The only difference is that its background (canvas - smaller
image) is pre-filled with zeros (black); it is the groundtruth.
"""
base = "tensorflow/core/lib/gif/testdata"
gif_bytes0 = io_ops.read_file(os.path.join(base, "squares.gif"))
image0 = image_ops.decode_image(gif_bytes0, dtype=dtypes.float32,
expand_animations=False)
gif_bytes1 = io_ops.read_file(os.path.join(base, "red_black.gif"))
image1 = image_ops.decode_image(gif_bytes1, dtype=dtypes.float32)
image1_0 = array_ops.gather(image1, 0)
image0, image1_0 = self.evaluate([image0, image1_0])
self.assertAllEqual(image0, image1_0)
def testDecodeImageShapeInferenceInDataPipeline(self):
"""Test that decode_image sets proper shape inference in tf.data pipelines.
This test verifies the fix for the issue where tf.image.decode_image
followed by tf.image.resize would fail with "ValueError: 'images' contains
no shape" when used in tf.data.Dataset.map() operations.
The fix ensures that when expand_animations=False, the output tensor shape
is properly set to [None, None, channels] for known channel counts or
[None, None, None] for unknown channel counts, enabling proper shape
inference for subsequent operations like resize.
"""
# Create 2x2 RGB test image.
test_image = constant_op.constant(
[[[255, 0, 0], [0, 255, 0]], [[0, 0, 255], [255, 255, 0]]],
dtype=dtypes.uint8,
)
jpeg_bytes = gen_image_ops.encode_jpeg(test_image)
def process_image_fixed(image_bytes):
"""Process function using expand_animations=False for shape inference."""
decoded = image_ops.decode_image(
image_bytes, channels=3, expand_animations=False
)
resized = image_ops.resize_images(decoded, [224, 224])
return resized
with self.cached_session():
# Test tf.data pipeline with decode_image + resize.
dataset_fixed = dataset_ops.Dataset.from_tensor_slices([jpeg_bytes])
dataset_fixed = dataset_fixed.map(process_image_fixed)
# Use get_single_element for graph mode compatibility.
processed_image = get_single_element.get_single_element(dataset_fixed)
processed_image_val = self.evaluate(processed_image)
self.assertEqual(processed_image_val.shape, (224, 224, 3))
# Verify shape inference with expand_animations=False.
decoded_fixed = image_ops.decode_image(
jpeg_bytes, channels=3, expand_animations=False
)
self.assertEqual(decoded_fixed.shape.rank, 3)
# Check shape compatibility in both graph and eager modes.
shape_list = decoded_fixed.get_shape().as_list()
self.assertEqual(shape_list[2], 3)
self.assertTrue(shape_list[0] is None or shape_list[0] == 2)
self.assertTrue(shape_list[1] is None or shape_list[1] == 2)
# Test different channel configurations.
# Note: decode_image only supports channels 0, 1, 3, 4 (not 2).
for channels in [0, 1, 3, 4]:
if channels == 0:
# Use auto-detection with RGB JPEG.
test_bytes = jpeg_bytes
elif channels == 1:
# Create grayscale test image.
gray_image = constant_op.constant(
[[128, 64], [192, 32]], dtype=dtypes.uint8
)
gray_image = array_ops.expand_dims(gray_image, -1)
test_bytes = gen_image_ops.encode_png(gray_image)
elif channels == 4:
# Create RGBA test image using PNG (JPEG doesn't support 4 channels).
rgba_image = constant_op.constant(
[
[[255, 0, 0, 255], [0, 255, 0, 128]],
[[0, 0, 255, 64], [255, 255, 0, 192]],
],
dtype=dtypes.uint8,
)
test_bytes = gen_image_ops.encode_png(rgba_image)
else:
# Use RGB JPEG for 3-channel tests.
test_bytes = jpeg_bytes
decoded = image_ops.decode_image(
test_bytes, channels=channels, expand_animations=False
)
self.assertEqual(decoded.shape.rank, 3)
if channels == 0:
# Auto-detection case - shape depends on the image format used.
expected_shape = [None, None, None]
elif channels <= 4:
expected_shape = [None, None, channels]
else:
expected_shape = [None, None, None]
# Shape must be compatible for resize operations.
self.assertTrue(decoded.shape.is_compatible_with(expected_shape))
# Test automatic channel detection.
decoded_unknown = image_ops.decode_image(
jpeg_bytes, expand_animations=False
)
self.assertEqual(decoded_unknown.shape.rank, 3)
# Check shape compatibility with automatic channel detection.
shape_list_unknown = decoded_unknown.get_shape().as_list()
self.assertTrue(
shape_list_unknown[0] is None or shape_list_unknown[0] == 2
)
self.assertTrue(
shape_list_unknown[1] is None or shape_list_unknown[1] == 2
)
self.assertTrue(
shape_list_unknown[2] is None or shape_list_unknown[2] == 3
)
if __name__ == "__main__":
googletest.main()
| DecodeImageTest |
python | jazzband__django-oauth-toolkit | oauth2_provider/exceptions.py | {
"start": 1418,
"end": 1560
} | class ____(InvalidRequestFatalError):
description = "The ID Token is expired, revoked, malformed, or otherwise invalid."
| InvalidIDTokenError |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 361135,
"end": 362203
} | class ____(sgqlc.types.relay.Connection):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = (
"edges",
"filtered_count",
"nodes",
"page_count",
"page_info",
"total_count",
"updated_at",
)
edges = sgqlc.types.Field(
sgqlc.types.list_of("IssueTimelineItemsEdge"), graphql_name="edges"
)
filtered_count = sgqlc.types.Field(
sgqlc.types.non_null(Int), graphql_name="filteredCount"
)
nodes = sgqlc.types.Field(
sgqlc.types.list_of("IssueTimelineItems"), graphql_name="nodes"
)
page_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="pageCount")
page_info = sgqlc.types.Field(
sgqlc.types.non_null("PageInfo"), graphql_name="pageInfo"
)
total_count = sgqlc.types.Field(
sgqlc.types.non_null(Int), graphql_name="totalCount"
)
updated_at = sgqlc.types.Field(
sgqlc.types.non_null(DateTime), graphql_name="updatedAt"
)
| IssueTimelineItemsConnection |
python | scipy__scipy | scipy/stats/_multivariate.py | {
"start": 158935,
"end": 162646
} | class ____(multi_rv_generic):
r"""A matrix-valued U(N) random variable.
Return a random unitary matrix.
The `dim` keyword specifies the dimension N.
Methods
-------
rvs(dim=None, size=1, random_state=None)
Draw random samples from U(N).
Parameters
----------
dim : scalar
Dimension of matrices.
seed : {None, int, np.random.RandomState, np.random.Generator}, optional
Used for drawing random variates.
If `seed` is `None`, the `~np.random.RandomState` singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used, seeded
with seed.
If `seed` is already a ``RandomState`` or ``Generator`` instance,
then that object is used.
Default is `None`.
Notes
-----
This class is similar to `ortho_group`.
References
----------
.. [1] F. Mezzadri, "How to generate random matrices from the classical
compact groups", :arXiv:`math-ph/0609050v2`.
Examples
--------
>>> import numpy as np
>>> from scipy.stats import unitary_group
>>> x = unitary_group.rvs(3)
>>> np.dot(x, x.conj().T)
array([[ 1.00000000e+00, 1.13231364e-17, -2.86852790e-16],
[ 1.13231364e-17, 1.00000000e+00, -1.46845020e-16],
[ -2.86852790e-16, -1.46845020e-16, 1.00000000e+00]]) # may vary
This generates one random matrix from U(3). The dot product confirms that
it is unitary up to machine precision.
Alternatively, the object may be called (as a function) to fix the `dim`
parameter, return a "frozen" unitary_group random variable:
>>> rv = unitary_group(5)
See Also
--------
ortho_group
"""
def __init__(self, seed=None):
super().__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__)
def __call__(self, dim=None, seed=None):
"""Create a frozen (U(N)) n-dimensional unitary matrix distribution.
See `unitary_group_frozen` for more information.
"""
return unitary_group_frozen(dim, seed=seed)
def _process_parameters(self, dim):
"""Dimension N must be specified; it cannot be inferred."""
if dim is None or not np.isscalar(dim) or dim < 0 or dim != int(dim):
raise ValueError("Dimension of rotation must be specified,"
"and must be a scalar nonnegative integer.")
return dim
def rvs(self, dim, size=1, random_state=None):
"""Draw random samples from U(N).
Parameters
----------
dim : integer
Dimension of space (N).
size : integer, optional
Number of samples to draw (default 1).
Returns
-------
rvs : ndarray or scalar
Random size N-dimensional matrices, dimension (size, dim, dim)
"""
random_state = self._get_random_state(random_state)
size = int(size)
dim = self._process_parameters(dim)
size = (size,) if size > 1 else ()
z = 1/math.sqrt(2)*(random_state.normal(size=size + (dim, dim)) +
1j*random_state.normal(size=size + (dim, dim)))
q, r = np.linalg.qr(z)
# The last two dimensions are the rows and columns of R matrices.
# Extract the diagonals. Note that this eliminates a dimension.
d = r.diagonal(offset=0, axis1=-2, axis2=-1)
# Add back a dimension for proper broadcasting: we're dividing
# each row of each R matrix by the diagonal of the R matrix.
q *= (d/abs(d))[..., np.newaxis, :] # to broadcast properly
return q
unitary_group = unitary_group_gen()
| unitary_group_gen |
python | Textualize__textual | src/textual/_parser.py | {
"start": 387,
"end": 523
} | class ____(NamedTuple):
"""Reads a single character."""
timeout: float | None = None
"""Optional timeout in seconds."""
| Read1 |
python | great-expectations__great_expectations | contrib/experimental/great_expectations_experimental/expectations/expect_column_values_to_match_thai.py | {
"start": 451,
"end": 2546
} | class ____(RegexBasedColumnMapExpectation):
"""Expect column values to contain Thai Language.
Args:
column (str): \
A integer column that consist of Thai language.
"""
# These values will be used to configure the metric created by your expectation
regex_camel_name = "RegexName"
regex = "[\u0e00-\u0e7f]+"
semantic_type_name_plural = None
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"data": {
"english": ["hello", "world"],
"thai": ["สวัสดี", "ชาวโลก"],
},
"only_for": ["pandas", "spark"],
"tests": [
{
"title": "positive_test",
"exact_match_out": False,
"in": {"column": "thai"},
"out": {
"success": True,
},
"include_in_gallery": True,
},
{
"title": "negative_test",
"exact_match_out": False,
"in": {"column": "english"},
"out": {
"success": False,
},
"include_in_gallery": True,
},
],
}
]
# Here your regex is used to create a custom metric for this expectation
map_metric = RegexBasedColumnMapExpectation.register_metric(
regex_camel_name=regex_camel_name,
regex_=regex,
)
# This object contains metadata for display in the public Gallery
library_metadata = {
"tags": ["regex", "thai"], # Tags for this Expectation in the Gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@swittchawa", # Don't forget to add your github handle here!
],
}
if __name__ == "__main__":
ExpectColumnValuesToMatchThai().print_diagnostic_checklist()
| ExpectColumnValuesToMatchThai |
python | scipy__scipy | scipy/stats/_discrete_distns.py | {
"start": 22270,
"end": 27863
} | class ____(rv_discrete):
r"""A negative hypergeometric discrete random variable.
Consider a box containing :math:`M` balls:, :math:`n` red and
:math:`M-n` blue. We randomly sample balls from the box, one
at a time and *without* replacement, until we have picked :math:`r`
blue balls. `nhypergeom` is the distribution of the number of
red balls :math:`k` we have picked.
%(before_notes)s
Notes
-----
The symbols used to denote the shape parameters (`M`, `n`, and `r`) are not
universally accepted. See the Examples for a clarification of the
definitions used here.
The probability mass function is defined as,
.. math:: f(k; M, n, r) = \frac{{{k+r-1}\choose{k}}{{M-r-k}\choose{n-k}}}
{{M \choose n}}
for :math:`k \in [0, n]`, :math:`n \in [0, M]`, :math:`r \in [0, M-n]`,
and the binomial coefficient is:
.. math:: \binom{n}{k} \equiv \frac{n!}{k! (n - k)!}.
It is equivalent to observing :math:`k` successes in :math:`k+r-1`
samples with :math:`k+r`'th sample being a failure. The former
can be modelled as a hypergeometric distribution. The probability
of the latter is simply the number of failures remaining
:math:`M-n-(r-1)` divided by the size of the remaining population
:math:`M-(k+r-1)`. This relationship can be shown as:
.. math:: NHG(k;M,n,r) = HG(k;M,n,k+r-1)\frac{(M-n-(r-1))}{(M-(k+r-1))}
where :math:`NHG` is probability mass function (PMF) of the
negative hypergeometric distribution and :math:`HG` is the
PMF of the hypergeometric distribution.
%(after_notes)s
Examples
--------
>>> import numpy as np
>>> from scipy.stats import nhypergeom
>>> import matplotlib.pyplot as plt
Suppose we have a collection of 20 animals, of which 7 are dogs.
Then if we want to know the probability of finding a given number
of dogs (successes) in a sample with exactly 12 animals that
aren't dogs (failures), we can initialize a frozen distribution
and plot the probability mass function:
>>> M, n, r = [20, 7, 12]
>>> rv = nhypergeom(M, n, r)
>>> x = np.arange(0, n+2)
>>> pmf_dogs = rv.pmf(x)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, pmf_dogs, 'bo')
>>> ax.vlines(x, 0, pmf_dogs, lw=2)
>>> ax.set_xlabel('# of dogs in our group with given 12 failures')
>>> ax.set_ylabel('nhypergeom PMF')
>>> plt.show()
Instead of using a frozen distribution we can also use `nhypergeom`
methods directly. To for example obtain the probability mass
function, use:
>>> prb = nhypergeom.pmf(x, M, n, r)
And to generate random numbers:
>>> R = nhypergeom.rvs(M, n, r, size=10)
To verify the relationship between `hypergeom` and `nhypergeom`, use:
>>> from scipy.stats import hypergeom, nhypergeom
>>> M, n, r = 45, 13, 8
>>> k = 6
>>> nhypergeom.pmf(k, M, n, r)
0.06180776620271643
>>> hypergeom.pmf(k, M, n, k+r-1) * (M - n - (r-1)) / (M - (k+r-1))
0.06180776620271644
See Also
--------
hypergeom, binom, nbinom
References
----------
.. [1] Negative Hypergeometric Distribution on Wikipedia
https://en.wikipedia.org/wiki/Negative_hypergeometric_distribution
.. [2] Negative Hypergeometric Distribution from
http://www.math.wm.edu/~leemis/chart/UDR/PDFs/Negativehypergeometric.pdf
"""
def _shape_info(self):
return [_ShapeInfo("M", True, (0, np.inf), (True, False)),
_ShapeInfo("n", True, (0, np.inf), (True, False)),
_ShapeInfo("r", True, (0, np.inf), (True, False))]
def _get_support(self, M, n, r):
return 0, n
def _argcheck(self, M, n, r):
cond = (n >= 0) & (n <= M) & (r >= 0) & (r <= M-n)
cond &= _isintegral(M) & _isintegral(n) & _isintegral(r)
return cond
def _rvs(self, M, n, r, size=None, random_state=None):
@_vectorize_rvs_over_shapes
def _rvs1(M, n, r, size, random_state):
# invert cdf by calculating all values in support, scalar M, n, r
a, b = self.support(M, n, r)
ks = np.arange(a, b+1)
cdf = self.cdf(ks, M, n, r)
ppf = interp1d(cdf, ks, kind='next', fill_value='extrapolate')
rvs = ppf(random_state.uniform(size=size)).astype(int)
if size is None:
return rvs.item()
return rvs
return _rvs1(M, n, r, size=size, random_state=random_state)
def _logpmf(self, k, M, n, r):
return xpx.apply_where(
(r != 0) | (k != 0), (k, M, n, r),
lambda k, M, n, r:
(-betaln(k+1, r) + betaln(k+r, 1)
- betaln(n-k+1, M-r-n+1) + betaln(M-r-k+1, 1)
+ betaln(n+1, M-n+1) - betaln(M+1, 1)),
fill_value=0.0)
def _pmf(self, k, M, n, r):
# same as the following but numerically more precise
# return comb(k+r-1, k) * comb(M-r-k, n-k) / comb(M, n)
return exp(self._logpmf(k, M, n, r))
def _stats(self, M, n, r):
# Promote the datatype to at least float
# mu = rn / (M-n+1)
M, n, r = 1.*M, 1.*n, 1.*r
mu = r*n / (M-n+1)
var = r*(M+1)*n / ((M-n+1)*(M-n+2)) * (1 - r / (M-n+1))
# The skew and kurtosis are mathematically
# intractable so return `None`. See [2]_.
g1, g2 = None, None
return mu, var, g1, g2
nhypergeom = nhypergeom_gen(name='nhypergeom')
# FIXME: Fails _cdfvec
| nhypergeom_gen |
python | pytorch__pytorch | test/functorch/test_control_flow.py | {
"start": 13070,
"end": 13162
} | class ____:
def __call__(self, *operands):
return reduce_func(*operands)
| ReduceObj |
python | doocs__leetcode | lcof/面试题15. 二进制中1的个数/Solution2.py | {
"start": 0,
"end": 159
} | class ____:
def hammingWeight(self, n: int) -> int:
ans = 0
while n:
n -= n & (-n)
ans += 1
return ans
| Solution |
python | doocs__leetcode | solution/1100-1199/1134.Armstrong Number/Solution.py | {
"start": 0,
"end": 194
} | class ____:
def isArmstrong(self, n: int) -> bool:
k = len(str(n))
s, x = 0, n
while x:
s += (x % 10) ** k
x //= 10
return s == n
| Solution |
python | ray-project__ray | release/train_tests/benchmark/config.py | {
"start": 809,
"end": 883
} | class ____(TaskConfig):
TASK_NAME: ClassVar[str] = "recsys"
| RecsysConfig |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/protocol50.py | {
"start": 176,
"end": 240
} | class ____(Protocol[V_co]):
def f(self, /) -> V_co: ...
| Proto1 |
python | pydata__xarray | xarray/tests/test_plot.py | {
"start": 95848,
"end": 96380
} | class ____(PlotTestCase):
@pytest.fixture(autouse=True)
def setUp(self) -> None:
self.darray = xr.tutorial.scatter_example_dataset()
def test_legend_labels(self) -> None:
fg = self.darray.A.plot.line(col="x", row="w", hue="z")
all_legend_labels = [t.get_text() for t in fg.figlegend.texts]
# labels in legend should be ['0', '1', '2', '3']
assert sorted(all_legend_labels) == ["0", "1", "2", "3"]
@pytest.mark.filterwarnings("ignore:tight_layout cannot")
| TestFacetedLinePlotsLegend |
python | getsentry__sentry | src/sentry/identity/vsts/provider.py | {
"start": 5327,
"end": 6285
} | class ____(OAuth2CallbackView):
def get_access_token(self, pipeline: IdentityPipeline, code: str) -> Response:
data = {
"client_assertion_type": "urn:ietf:params:oauth:client-assertion-type:jwt-bearer",
"client_assertion": self.client_secret,
"grant_type": "urn:ietf:params:oauth:grant-type:jwt-bearer",
"assertion": code,
"redirect_uri": pipeline.config.get("redirect_url"),
}
headers = {
"Content-Type": "application/x-www-form-urlencoded",
"Content-Length": "1322",
}
return safe_urlopen(self.access_token_url, data=data, headers=headers)
# TODO(ecosystem): Make this the default provider
# We created this new flow in order to quickly update the DevOps integration to use
# the new Azure AD OAuth2 flow.
# This is a temporary solution until we can fully migrate to the new flow once customers are migrated
| VSTSOAuth2CallbackView |
python | huggingface__transformers | tests/models/altclip/test_processing_altclip.py | {
"start": 804,
"end": 942
} | class ____(ProcessorTesterMixin, unittest.TestCase):
processor_class = AltCLIPProcessor
model_id = "BAAI/AltCLIP"
| AltClipProcessorTest |
python | uqfoundation__dill | dill/tests/test_session.py | {
"start": 3228,
"end": 3440
} | class ____(Calendar):
def weekdays(self):
return [day_name[i] for i in self.iterweekdays()]
cal = CalendarSubclass()
selfref = __main__
# Setup global namespace for session saving tests.
| CalendarSubclass |
python | google__jax | jax/_src/lax/control_flow/solves.py | {
"start": 6941,
"end": 20383
} | class ____(collections.namedtuple(
'_LinearSolveTuple', 'matvec, vecmat, solve, transpose_solve')):
def transpose(self):
return type(self)(self.vecmat, self.matvec, self.transpose_solve, self.solve)
def _split_linear_solve_args(args, const_lengths):
params_list = split_list(args, list(const_lengths))
return _LinearSolveTuple(*params_list[:-1]), params_list[-1]
def _transpose_one_output(linear_fun, primals):
transpose_fun = api.linear_transpose(linear_fun, primals)
def transposed_fun(x):
(y,) = transpose_fun(x)
return y
return transposed_fun
def _flatten(args):
return [x for arg in args for x in arg]
def _check_shapes(func_name, expected_name, actual, expected):
actual_shapes = _map(np.shape, tree_leaves(actual))
expected_shapes = _map(np.shape, tree_leaves(expected))
if actual_shapes != expected_shapes:
raise ValueError(
f"{func_name}() output shapes must match {expected_name}, "
f"got {actual_shapes} and {expected_shapes}")
@partial(api_boundary, repro_api_name="jax.custom_linear_solve")
def custom_linear_solve(
matvec: Callable,
b: Any,
solve: Callable[[Callable, Any], Any],
transpose_solve: Callable[[Callable, Any], Any] | None = None,
symmetric=False, has_aux=False):
"""Perform a matrix-free linear solve with implicitly defined gradients.
This function allows for overriding or defining gradients for a linear
solve directly via implicit differentiation at the solution, rather than by
differentiating *through* the solve operation. This can sometimes be much faster
or more numerically stable, or differentiating through the solve operation
may not even be implemented (e.g., if ``solve`` uses ``lax.while_loop``).
Required invariant::
x = solve(matvec, b) # solve the linear equation
assert matvec(x) == b # not checked
Args:
matvec: linear function to invert. Must be differentiable.
b: constant right handle side of the equation. May be any nested structure
of arrays.
solve: higher level function that solves for solution to the linear
equation, i.e., ``solve(matvec, x) == x`` for all ``x`` of the same form
as ``b``. This function need not be differentiable.
transpose_solve: higher level function for solving the transpose linear
equation, i.e., ``transpose_solve(vecmat, x) == x``, where ``vecmat`` is
the transpose of the linear map ``matvec`` (computed automatically with
autodiff). Required for backwards mode automatic differentiation, unless
``symmetric=True``, in which case ``solve`` provides the default value.
symmetric: bool indicating if it is safe to assume the linear map
corresponds to a symmetric matrix, i.e., ``matvec == vecmat``.
has_aux: bool indicating whether the ``solve`` and ``transpose_solve`` functions
return auxiliary data like solver diagnostics as a second argument.
Returns:
Result of ``solve(matvec, b)``, with gradients defined assuming that the
solution ``x`` satisfies the linear equation ``matvec(x) == b``.
"""
if transpose_solve is None and symmetric:
transpose_solve = solve
b_flat, in_args_tree = tree_flatten((b,))
b_avals = tuple(_map(core.get_aval, b_flat))
tree, = treedef_children(in_args_tree)
def _shape_checked(fun, name, has_aux):
def f(x):
y = fun(x)
_check_shapes(name, "b", y, b_flat)
return y
def f_aux(x):
y, aux = fun(x)
_check_shapes(name, "b", y, b_flat)
return y, aux
return f_aux if has_aux else f
matvec_debug = api_util.debug_info("custom_linear_solve",
matvec, (b,), {})
# no auxiliary data assumed for matvec
matvec_jaxpr, matvec_consts, out_tree = _initial_style_jaxpr(
_shape_checked(matvec, "matvec", False), in_args_tree, b_avals,
matvec_debug)
_check_tree("matvec", "b", out_tree, tree, False)
solve_debug = api_util.debug_info("custom_linear_solve solve",
solve, (matvec, b), {},
static_argnums=(0,))
solve_jaxpr, solve_consts, out_tree = _initial_style_jaxpr(
_shape_checked(partial(solve, matvec), "solve", has_aux), in_args_tree, b_avals,
solve_debug)
_check_tree("solve", "b", out_tree, tree, has_aux)
if transpose_solve is None:
vecmat_jaxpr = tr_solve_jaxpr = None
vecmat_consts = tr_solve_consts = []
else:
transpose_solve_debug = api_util.debug_info(
"custom_linear_solve transpose_solve", transpose_solve,
(matvec, b), {}, static_argnums=(0,))
if symmetric:
vecmat = matvec
vecmat_jaxpr = matvec_jaxpr
vecmat_consts = matvec_consts
else:
vecmat = _transpose_one_output(matvec, b)
vecmat_jaxpr, vecmat_consts, out_tree = _initial_style_jaxpr(
vecmat, in_args_tree, b_avals, transpose_solve_debug)
assert out_tree == tree
tr_solve_jaxpr, tr_solve_consts, out_tree = _initial_style_jaxpr(
_shape_checked(partial(transpose_solve, vecmat), "transpose_solve", has_aux),
in_args_tree, b_avals, transpose_solve_debug)
_check_tree("transpose_solve", "b", out_tree, tree, has_aux)
all_consts = [matvec_consts, vecmat_consts, solve_consts, tr_solve_consts]
const_lengths = _LinearSolveTuple(*_map(len, all_consts))
jaxprs = _LinearSolveTuple(
matvec_jaxpr, vecmat_jaxpr, solve_jaxpr, tr_solve_jaxpr)
args = _flatten(all_consts) + b_flat
args = core.standard_insert_pvary(*args)
out_flat = linear_solve_p.bind(*args, const_lengths=const_lengths, jaxprs=jaxprs)
return tree_unflatten(out_tree, out_flat)
def _linear_solve_abstract_eval(*args, const_lengths, jaxprs):
args_to_raise = args[sum(const_lengths):]
# raise aux_args to shaped arrays as well if present
# number of aux args is the difference in out_avals
# of solve and matvec (since they map to the same vector space)
num_aux = len(jaxprs.solve.out_avals) - len(jaxprs.matvec.out_avals)
if num_aux > 0:
args_to_raise += tuple(jaxprs.solve.out_avals[-num_aux:])
out_vma = core.standard_vma_rule('linear_solve', *args_to_raise)
return (tuple(a.update(vma=out_vma) for a in args_to_raise),
jaxprs.solve.effects)
def _custom_linear_solve_impl(*args, const_lengths, jaxprs):
params, b = _split_linear_solve_args(args, const_lengths)
x = core.jaxpr_as_fun(jaxprs.solve)(*(params.solve + b))
return x
def _tangent_linear_map(func: Callable, params, params_dot,
debug_info: core.DebugInfo,
*x):
"""Compute the tangent of a linear map.
Assuming ``func(*params, *x)`` is linear in ``x`` and computes ``A @ x``,
this function computes ``∂A @ x``.
"""
assert any(type(p) is not ad_util.Zero for p in params_dot)
zeros = _map(ad_util.Zero.from_primal_value, x)
_, out_tangent = ad.jvp(lu.wrap_init(func, debug_info=debug_info)).call_wrapped(
params + list(x), params_dot + zeros)
return out_tangent
def _custom_linear_solve_jvp(primals, tangents, const_lengths, jaxprs):
# A x - b = 0
# ∂A x + A ∂x - ∂b = 0
# ∂x = A^{-1} (∂b - ∂A x)
kwargs = dict(const_lengths=const_lengths, jaxprs=jaxprs)
x = linear_solve_p.bind(*primals, **kwargs)
params, _ = _split_linear_solve_args(primals, const_lengths)
params_dot, b_dot = _split_linear_solve_args(tangents, const_lengths)
num_x_leaves = len(b_dot)
# x is a flat tree with possible aux values appended
# since x_tree == b_tree == b_dot_tree, we can cut off
# aux values with len info provided by b_dot tree here
x_leaves, _ = split_list(x, [num_x_leaves])
if all(type(p) is ad_util.Zero for p in params_dot.matvec):
# no need to evaluate matvec_tangents
rhs = b_dot
else:
matvec_tangents = _tangent_linear_map(
core.jaxpr_as_fun(jaxprs.matvec), params.matvec, params_dot.matvec,
jaxprs.matvec.jaxpr.debug_info, *x_leaves)
rhs = _map(ad.add_tangents, b_dot, _map(operator.neg, matvec_tangents))
x_dot = linear_solve_p.bind(*(_flatten(params) + rhs), **kwargs)
# split into x tangents and aux tangents (these become zero)
dx_leaves, daux_leaves = split_list(x_dot, [num_x_leaves])
daux_leaves = _map(ad_util.Zero.from_primal_value, daux_leaves)
x_dot = dx_leaves + daux_leaves
return x, x_dot
def _linear_solve_transpose_rule(cotangent, *primals, const_lengths, jaxprs):
if jaxprs.transpose_solve is None:
raise TypeError('transpose_solve required for backwards mode automatic '
'differentiation of custom_linear_solve')
params, b = _split_linear_solve_args(primals, const_lengths)
if any(ad.is_undefined_primal(x) for xs in params for x in xs):
raise NotImplementedError("open an issue at https://github.com/google/jax !!")
assert all(ad.is_undefined_primal(x) for x in b) # TODO(mattjj): why?
x_cotangent, other_cotangents = split_list(cotangent, [len(b)])
if any(type(ct) is not ad_util.Zero for ct in other_cotangents):
raise NotImplementedError("open an issue at https://github.com/google/jax !!")
del other_cotangents
x_cotangent_ = _map(ad_util.instantiate, x_cotangent)
cotangent_b_full = linear_solve_p.bind(
*_flatten(params.transpose()), *x_cotangent_,
const_lengths=const_lengths.transpose(), jaxprs=jaxprs.transpose())
cotangent_b, _ = split_list(cotangent_b_full, [len(b)])
return [None] * sum(const_lengths) + cotangent_b
def _linear_solve_batching_rule(axis_data, args, dims, const_lengths, jaxprs):
orig_bat = [d is not batching.not_mapped for d in dims]
params, b = _split_linear_solve_args(args, const_lengths)
params_dims, b_dims = _split_linear_solve_args(dims, const_lengths)
params_bat, orig_b_bat = _split_linear_solve_args(orig_bat, const_lengths)
(matvec, vecmat, solve, solve_t) = jaxprs
(matvec_bat, vecmat_bat, solve_bat, solve_t_bat) = params_bat
# number of operator out avals is assumed to be the same for matvec/vecmat
num_operator_out_avals = len(matvec.out_avals)
num_aux = len(solve.out_avals) - num_operator_out_avals
# Fixpoint computation of which parts of x and b are batched; we need to
# ensure this is consistent between all four jaxprs
b_bat = orig_b_bat
x_bat = [False] * len(solve.out_avals)
for i in range(1 + len(orig_b_bat) + len(solve.out_avals)):
# Apply vecmat and solve -> new batched parts of x
solve_jaxpr_batched, solve_x_bat = batching.batch_jaxpr(
solve, axis_data, solve_bat + b_bat, instantiate=x_bat)
if vecmat is None:
vecmat_jaxpr_batched = None
x_bat_out = solve_x_bat
else:
vecmat_jaxpr_batched, vecmat_x_bat = batching.batch_jaxpr(
vecmat, axis_data, vecmat_bat + b_bat, instantiate=b_bat)
# batch all aux data by default
x_bat_out = _map(operator.or_, vecmat_x_bat + [True] * num_aux, solve_x_bat)
# keep a slice of only the linear operator part of solve's avals
x_bat_noaux = x_bat_out[:num_operator_out_avals]
# Apply matvec and solve_t -> new batched parts of b
matvec_jaxpr_batched, matvec_b_bat = batching.batch_jaxpr(
matvec, axis_data, matvec_bat + x_bat_noaux, instantiate=b_bat)
if solve_t is None:
solve_t_jaxpr_batched = None
b_bat_out = _map(operator.or_, matvec_b_bat, orig_b_bat)
else:
solve_t_jaxpr_batched, solve_t_b_aux_bat = batching.batch_jaxpr(
solve_t, axis_data, solve_t_bat + x_bat_noaux, instantiate=x_bat_out)
assert len(solve_t_b_aux_bat) == len(orig_b_bat) + num_aux
solve_t_b_bat, _ = split_list(solve_t_b_aux_bat, [len(orig_b_bat)])
b_bat_out = _map(lambda m, s, o: m or s or o, matvec_b_bat, solve_t_b_bat,
orig_b_bat)
if x_bat_out == x_bat and b_bat_out == b_bat:
break
else:
x_bat = x_bat_out
b_bat = b_bat_out
else:
assert False, "Fixedpoint not reached"
batched_jaxprs = _LinearSolveTuple(matvec_jaxpr_batched, vecmat_jaxpr_batched,
solve_jaxpr_batched, solve_t_jaxpr_batched)
# Move batched axes to the front
new_params = [
batching.moveaxis(x, d, 0)
if d is not batching.not_mapped and d != 0 else x
for x, d in zip(_flatten(params), _flatten(params_dims))
]
# Broadcast out b if necessary
new_b = [
batching.broadcast(x, axis_data.size, 0, axis_data.explicit_mesh_axis)
if now_bat and not was_bat else
batching.moveaxis(x, d, 0) if now_bat and d != 0 else x
for x, d, was_bat, now_bat in zip(b, b_dims, orig_b_bat, b_bat)
]
outs = linear_solve_p.bind(
*(new_params + new_b),
const_lengths=const_lengths,
jaxprs=batched_jaxprs)
out_dims = [0 if batched else batching.not_mapped for batched in solve_x_bat]
return outs, out_dims
linear_solve_p = core.Primitive('custom_linear_solve')
linear_solve_p.multiple_results = True
linear_solve_p.def_impl(_custom_linear_solve_impl)
linear_solve_p.def_effectful_abstract_eval(_linear_solve_abstract_eval)
ad.primitive_jvps[linear_solve_p] = _custom_linear_solve_jvp
pxla.register_initial_style_primitive(linear_solve_p)
mlir.register_lowering(
linear_solve_p, mlir.lower_fun(_custom_linear_solve_impl,
multiple_results=True))
ad.primitive_transposes[linear_solve_p] = _linear_solve_transpose_rule
batching.fancy_primitive_batchers[linear_solve_p] = _linear_solve_batching_rule
| _LinearSolveTuple |
python | doocs__leetcode | solution/2200-2299/2219.Maximum Sum Score of Array/Solution.py | {
"start": 0,
"end": 231
} | class ____:
def maximumSumScore(self, nums: List[int]) -> int:
l, r = 0, sum(nums)
ans = -inf
for x in nums:
l += x
ans = max(ans, l, r)
r -= x
return ans
| Solution |
python | google__jax | docs/autodidax.py | {
"start": 2916,
"end": 5458
} | class ____(NamedTuple):
name: str
add_p = Primitive('add')
mul_p = Primitive('mul')
neg_p = Primitive("neg")
sin_p = Primitive("sin")
cos_p = Primitive("cos")
reduce_sum_p = Primitive("reduce_sum")
greater_p = Primitive("greater")
less_p = Primitive("less")
transpose_p = Primitive("transpose")
broadcast_p = Primitive("broadcast")
def add(x, y): return bind1(add_p, x, y)
def mul(x, y): return bind1(mul_p, x, y)
def neg(x): return bind1(neg_p, x)
def sin(x): return bind1(sin_p, x)
def cos(x): return bind1(cos_p, x)
def greater(x, y): return bind1(greater_p, x, y)
def less(x, y): return bind1(less_p, x, y)
def transpose(x, perm): return bind1(transpose_p, x, perm=perm)
def broadcast(x, shape, axes): return bind1(broadcast_p, x, shape=shape, axes=axes)
def reduce_sum(x, axis=None):
if axis is None:
axis = tuple(range(np.ndim(x)))
if type(axis) is int:
axis = (axis,)
return bind1(reduce_sum_p, x, axis=axis)
def bind1(prim, *args, **params):
out, = bind(prim, *args, **params)
return out
# -
# We'll set up array data types and infix operator methods in a moment.
#
# A `Primitive` is just an object with a name, to which we attach our
# interpretation rules (one for each transformation). The `bind` function is our
# interception point: it'll figure out which transformation rule to apply, based
# on how the arguments are boxed in tracers and what interpreters are active.
#
# The functions that user code calls, like `add` and `sin`, are just wrappers
# around calls to `bind`. These wrappers let us control how arguments are passed
# to `bind`, and in particular we follow a handy internal convention: when we
# call `bind`, we pass values representing array data as positional arguments,
# and we pass metadata like the `axis` argument to `reduce_sum_p` via keyword. This
# calling convention simplifies some core logic (since e.g. instances of the
# `Tracer` class to be defined below can only occur in positional arguments to
# `bind`). The wrappers can also provide docstrings!
#
# We represent active interpreters as a stack. The stack is just a simple
# `list`, and each element is a container with an integer level (corresponding
# to the element's height in the stack), an interpreter type (which we'll call a
# `trace_type`), and an optional field for any global data the interpreter
# needs. We call each element a `MainTrace`, though maybe "Interpreter" would be
# more descriptive.
# +
from collections.abc import Sequence
from contextlib import contextmanager
from typing import Any
| Primitive |
python | ray-project__ray | rllib/utils/minibatch_utils.py | {
"start": 7960,
"end": 8208
} | class ____(MiniBatchIteratorBase):
def __init__(self, batch: MultiAgentBatch, **kwargs):
super().__init__(batch, **kwargs)
self._batch = batch
def __iter__(self):
yield self._batch
@DeveloperAPI
| MiniBatchDummyIterator |
python | kamyu104__LeetCode-Solutions | Python/is-graph-bipartite.py | {
"start": 39,
"end": 719
} | class ____(object):
def isBipartite(self, graph):
"""
:type graph: List[List[int]]
:rtype: bool
"""
color = {}
for node in xrange(len(graph)):
if node in color:
continue
stack = [node]
color[node] = 0
while stack:
curr = stack.pop()
for neighbor in graph[curr]:
if neighbor not in color:
stack.append(neighbor)
color[neighbor] = color[curr] ^ 1
elif color[neighbor] == color[curr]:
return False
return True
| Solution |
python | kamyu104__LeetCode-Solutions | Python/last-stone-weight.py | {
"start": 48,
"end": 445
} | class ____(object):
def lastStoneWeight(self, stones):
"""
:type stones: List[int]
:rtype: int
"""
max_heap = [-x for x in stones]
heapq.heapify(max_heap)
for i in xrange(len(stones)-1):
x, y = -heapq.heappop(max_heap), -heapq.heappop(max_heap)
heapq.heappush(max_heap, -abs(x-y))
return -max_heap[0]
| Solution |
python | google__jax | jax/experimental/mosaic/gpu/fragmented_array.py | {
"start": 24811,
"end": 34804
} | class ____:
"""Convert the array to 1D and then shard across threads."""
shape: tuple[int, ...]
vec_size: int
def __post_init__(self):
if np.prod(self.shape) % (self.vec_size * WARPGROUP_SIZE) != 0:
raise ValueError((self, WARPGROUP_SIZE))
@classmethod
def from_shaped_type(cls, shaped_ty: ir.Type) -> WGStridedFragLayout | None:
"""Returns a WGStridedFragLayout for the given shaped type.
Return None if the shaped type cannot have a strided layout.
"""
if not ir.ShapedType.isinstance(shaped_ty):
raise TypeError(shaped_ty)
shaped_ty = ir.ShapedType(shaped_ty)
if (bitwidth := mgpu.bitwidth(shaped_ty.element_type)) % 8:
return None
bw = bitwidth // 8
assert 8 % bw == 0 and 8 // bw != 0, bw
if math.prod(shaped_ty.shape) % WARPGROUP_SIZE != 0:
return None
max_vec_size = np.prod(shaped_ty.shape) // WARPGROUP_SIZE
return cls(
shape=tuple(shaped_ty.shape), vec_size=min(8 // bw, max_vec_size)
)
def registers_element_type(self, t: ir.Type) -> ir.Type:
return ir.VectorType.get((self.vec_size,), t)
def registers_shape(self, shape: tuple[int, ...]) -> tuple[int, ...]:
"""Returns the shape of the register array needed to represent an array of the given logical shape."""
if shape != self.shape:
raise ValueError(f"Shape {shape} is not compatible with {self}")
return (math.prod(self.shape) // (WARPGROUP_SIZE * self.vec_size),)
def shape_from_registers_shape(
self, shape: tuple[int, ...]
) -> tuple[int, ...]:
del shape # Unused.
return self.shape
def thread_idxs(self, shape):
assert shape == self.shape
index = ir.IndexType.get()
for v in self.linear_thread_idxs():
res = []
for dim in reversed(self.shape):
dim = c(dim, index)
res.append(arith.remui(v, dim))
v = arith.divui(v, dim)
res.reverse()
yield res
def linear_thread_idxs(self):
"""The indexes to be used for vector load/store WGStridedFragLayout.
Yields:
The indices of the vector that correspond to the current thread.
"""
index = ir.IndexType.get()
cardinality = np.prod(self.shape)
assert cardinality % (WARPGROUP_SIZE * self.vec_size) == 0
reg_num = cardinality // (WARPGROUP_SIZE * self.vec_size)
tidx = arith.remui(gpu.thread_id(gpu.Dimension.x), c(WARPGROUP_SIZE, index))
off = arith.muli(tidx, c(self.vec_size, tidx.type))
for i in range(reg_num):
yield arith.addi(off, c(i * WARPGROUP_SIZE * self.vec_size, tidx.type))
FragmentedLayout = WGSplatFragLayout | WGStridedFragLayout | TiledLayout
WGMMA_COL_LAYOUT = TiledLayout(
Tiling(((8,), (2,))),
warp_dims=(Replicated(4),),
lane_dims=(Replicated(8), -2),
vector_dim=-1,
)
WGMMA_ROW_LAYOUT = TiledLayout(
Tiling(((64,), (16,), (8,), (1,))),
warp_dims=(-4,),
lane_dims=(-2, Replicated(4)),
vector_dim=-1,
)
# The tiled layout is equivalent to one described here in PTX documentation:
# https://docs.nvidia.com/cuda/parallel-thread-execution/#wgmma-64n16-d
# In this layout, we partition the 64x8 tiles over 4 warps into 16x8 tiles.
# Then, we further split the 16x8 tiles into 8x8 submatrices which are the unit
# of data that is split across a warp. Since 8*8 = 64, but a warp has only 32
# threads, we vectorize pairs of elements along columns.
# The assignment of elements to warp lanes is as follows:
#
# 0 0 1 1 2 2 3 3
# 4 4 5 5 6 6 7 7
# 8 8 9 9 10 10 11 11
# 12 12 13 13 14 14 15 15
# ...
WGMMA_LAYOUT = TiledLayout(
Tiling(((64, 8), (16, 8), (8, 8), (2,))),
warp_dims=(-7,),
lane_dims=(-3, -2),
vector_dim=-1,
)
# This is the same as WGMMA_LAYOUT, only with a vector length of 1. LLVM now
# treats <2 x float> as a native PTX type and uses 64-bit registers to store
# them. This, in turn, means that we have to explode them into 32-bit registers
# right before WGMMA, which makes ptxas very unhappy and causes it to insert
# lots of WGMMA waits that absolutely tank the performance. As a workaround,
# we use this layout when 32-bit data with WGMMA_LAYOUT is used to initialize
# a WGMMAAccumulator, to ensure that the LLVM accumulator registers will always
# be represented as 32-bit PTX registers.
WGMMA_LAYOUT_ACC_32BIT = TiledLayout(
Tiling(((64, 8), (16, 8), (8, 8), (2,), (1,))),
warp_dims=(-8,),
lane_dims=(-4, -3),
vector_dim=-1,
)
# The tiled layout is equivalent to one described here in PTX documentation:
# https://docs.nvidia.com/cuda/parallel-thread-execution/#wgmma-64n32-a
# In this layout, we partition the 64x16 tiles over 4 warps into 16x16 tiles.
# Then, we further split the 16x16 tiles into 8x16 submatrices which are the unit
# of data that is split across a warp. Since 8*16 = 128, but a warp has only 32
# threads, we vectorize quadruplets of elements along columns.
# The assignment of elements to warp lanes is as follows:
#
# 0 0 0 0 1 1 1 1 2 2 2 2 3 3 3 3
# 4 4 4 4 5 5 5 5 6 6 6 6 7 7 7 7
# 8 8 8 8 9 9 9 9 10 10 10 10 11 11 11 11
# 12 12 12 12 13 13 13 13 14 14 14 14 15 15 15 15
# ...
WGMMA_LAYOUT_8BIT = TiledLayout(
Tiling(((64, 16), (16, 16), (8, 16), (4,))),
warp_dims=(-7,),
lane_dims=(-3, -2),
vector_dim=-1,
)
# This tiled layout is similar to the WGMMA layout, only the unit at which we
# assign submatrices to warps grows from 8x8 to 8x16. The elements within each
# submatrix are assigned to threads in the following way:
#
# 0 0 0 0 2 2 2 2 1 1 1 1 3 3 3 3
# 4 4 4 4 6 6 6 6 5 5 5 5 7 7 7 7
# ...
#
# Our vector length is twice the size of that of WGMMA_LAYOUT, which lets us use
# 32-bit SMEM loads/stores when dealing with 8-bit values. The conversion
# to the WGMMA layout only requires communication between with index differing
# in their 2 bit (i.e. 0 and 1, 2 and 4), so the conversion to WGMMA_LAYOUT
# only requires a single warp shuffle (plus permutes local to each thread).
WGMMA_LAYOUT_UPCAST_2X = TiledLayout(
Tiling(((64, 16), (16, 16), (8, 16), (8,), (4,))),
warp_dims=(-8,),
lane_dims=(-4, -2, -3),
vector_dim=-1,
)
# This layout should be used when upcasting 4-bit elements to 16-bit, for the
# purpose of passing them into WGMMA later. The core matrices stored by a warp
# are 8x32, because each of the 4 threads in a row holds 8 elements in a single
# vector. Note that unlike WGMMA_LAYOUT_UPCAST_2X, we assign columns to each
# group of 4 threads in order (as opposed to the swapping between 1 and 2,
# 5 and 6, etc. that WGMMA_LAYOUT_UPCAST_2X does).
WGMMA_LAYOUT_UPCAST_4X = TiledLayout(
Tiling(((64, 32), (16, 32), (8, 32), (8,))),
warp_dims=(-7,),
lane_dims=(-3, -2),
vector_dim=-1,
)
# This tiled layout is similar to WGMMA_LAYOUT. There, each warp stores a 8x8
# submatrix in the following way (we only show the first 4 rows for brevity):
#
# 0 0 1 1 2 2 3 3
# 4 4 5 5 6 6 7 7
# 8 8 9 9 10 10 11 11
# 12 12 13 13 14 14 15 15
# ...
#
# This tiled layout stores the same 8x8 submatrix in the following way:
#
# 0 4 1 5 2 6 3 7
# 0 4 1 5 2 6 3 7
# 8 12 9 13 10 14 11 15
# 8 12 9 13 10 14 11 15
# ...
#
# You can see that we have taken 2x2 submatrices from the above layout and
# transposed them. The assignment of lanes to elements is such that in both
# layouts the same two lanes map to a single 2x2 submatrix, making the transpose
# very cheap (one shuffle and permute suffices to change between those layouts).
WGMMA_TRANSPOSED_LAYOUT = TiledLayout(
Tiling(((64, 8), (16, 8), (8, 8), (2, 2), (2, 1))),
warp_dims=(-10,),
lane_dims=(-6, -3, -5),
vector_dim=-2,
)
# Like WGMMA_LAYOUT, only each warp holds a 32xN strip instead of 16xN.
TCGEN05_LAYOUT = TiledLayout(
Tiling(((128, 8), (32, 8), (8, 8), (2,))),
warp_dims=(-7,),
lane_dims=(-3, -2),
vector_dim=-1,
)
# Like WGMMA_TRANSPOSED_LAYOUT, only each warp holds a 32xN strip instead of 16xN.
TCGEN05_TRANSPOSED_LAYOUT = TiledLayout(
Tiling(((128, 8), (32, 8), (8, 8), (2, 2), (2, 1))),
warp_dims=(-10,),
lane_dims=(-6, -3, -5),
vector_dim=-2,
)
# TCGEN05_ROW_LAYOUT is to TCGEN05_LAYOUT as WGMMA_ROW_LAYOUT is to
# WGMMA_LAYOUT.
TCGEN05_ROW_LAYOUT = TiledLayout(
Tiling(tiles=((128,), (32,), (8,), (1,))),
warp_dims=(-4,),
lane_dims=(-2, Replicated(times=4)),
vector_dim=-1,
)
# TCGEN05_COL_LAYOUT is to TCGEN05_LAYOUT as WGMMA_COL_LAYOUT is to
# WGMMA_LAYOUT.
TCGEN05_COL_LAYOUT = TiledLayout(
Tiling(tiles=((8,), (2,))),
warp_dims=(Replicated(times=4),),
lane_dims=(Replicated(times=8), -2),
vector_dim=-1,
)
def tmem_native_layout(vector_length: int):
"""A layout resembling the logical organization of TMEM.
The 128 rows in a tile are assigned to 128 lanes in the warpgroup. Useful when
the result needs to be processed in registers and then stored back into TMEM.
Usually shouldn't be used if the result is to be written back to SMEM, as
there is no good way to store it without bank conflicts, but it still
sometimes pays off.
"""
return TiledLayout(
Tiling(((128, vector_length), (32, vector_length))),
warp_dims=(-4,),
lane_dims=(-2,),
vector_dim=-1,
)
# We use a vector_dim of 2, to be able to make sure that the vectors are always
# a multiple of 32-bits, even when the data is 16-bits.
TMEM_NATIVE_LAYOUT = tmem_native_layout(2)
# A layout for the row indices used by TMA gather4/scatter4 instructions.
# Index 00 01 02 03 04 05 06 07 08 09 10 11 12 13 14 15 16 17 ...
# Warp <--- 0 ---> <--- 1 ---> <--- 2 ---> <--- 3 ---> <--- 0 --
TMA_GATHER_INDICES_LAYOUT = TiledLayout(
Tiling(((16,), (4,))),
warp_dims=(-2,),
lane_dims=(Replicated(32),),
vector_dim=-1,
)
@jax.tree_util.register_pytree_node_class
@dataclasses.dataclass(init=False, frozen=True, slots=True)
| WGStridedFragLayout |
python | facebookresearch__faiss | tests/test_factory.py | {
"start": 7734,
"end": 8028
} | class ____(unittest.TestCase):
def test_clone(self):
index = faiss.index_factory(16, 'IVF10,PQ4np')
xb = faiss.rand((1000, 16))
index.train(xb)
index.add(xb)
index2 = faiss.clone_index(index)
assert index2.ntotal == index.ntotal
| TestCloneIVFPQ |
python | readthedocs__readthedocs.org | readthedocs/proxito/tests/test_old_redirects.py | {
"start": 44567,
"end": 55066
} | class ____(BaseDocServing):
def test_redirect_exact_redirect_with_wildcard_crossdomain(self):
"""
Avoid redirecting to an external site unless the external site is in to_url.
We also test by trying to bypass the protocol check with the special chars listed at
https://github.com/python/cpython/blob/c3ffbbdf3d5645ee07c22649f2028f9dffc762ba/Lib/urllib/parse.py#L80-L81.
"""
fixture.get(
Redirect,
project=self.project,
redirect_type=EXACT_REDIRECT,
from_url="/*",
to_url="/en/latest/:splat",
)
urls = [
# Plain protocol, these are caught by the slash redirect.
(
"http://project.dev.readthedocs.io/http://my.host/path.html",
"/http:/my.host/path.html",
),
(
"http://project.dev.readthedocs.io//my.host/path.html",
"/my.host/path.html",
),
# Trying to bypass the protocol check by including a `\r` char.
(
"http://project.dev.readthedocs.io/http:/%0D/my.host/path.html",
"http://project.dev.readthedocs.io/en/latest/http://my.host/path.html",
),
(
"http://project.dev.readthedocs.io/%0D/my.host/path.html",
"http://project.dev.readthedocs.io/en/latest/my.host/path.html",
),
# Trying to bypass the protocol check by including a `\t` char.
(
"http://project.dev.readthedocs.io/http:/%09/my.host/path.html",
"http://project.dev.readthedocs.io/en/latest/http://my.host/path.html",
),
(
"http://project.dev.readthedocs.io/%09/my.host/path.html",
"http://project.dev.readthedocs.io/en/latest/my.host/path.html",
),
]
for url, expected_location in urls:
r = self.client.get(url, headers={"host": "project.dev.readthedocs.io"})
self.assertEqual(r.status_code, 302, url)
self.assertEqual(r["Location"], expected_location, url)
def test_redirect_exact_with_wildcard_crossdomain_with_newline_chars(self):
fixture.get(
Redirect,
project=self.project,
redirect_type=EXACT_REDIRECT,
from_url="/*",
to_url="/en/latest/:splat",
)
urls = [
(
"http://project.dev.readthedocs.io/http:/%0A/my.host/path.html",
"http://project.dev.readthedocs.io/en/latest/http://my.host/path.html",
),
(
"http://project.dev.readthedocs.io/%0A/my.host/path.html",
"http://project.dev.readthedocs.io/en/latest/my.host/path.html",
),
]
for url, expected_location in urls:
r = self.client.get(url, headers={"host": "project.dev.readthedocs.io"})
self.assertEqual(r.status_code, 302, url)
self.assertEqual(r["Location"], expected_location, url)
def test_redirect_exact_with_wildcard_crossdomain(self):
self.project.versioning_scheme = SINGLE_VERSION_WITHOUT_TRANSLATIONS
self.project.save()
fixture.get(
Redirect,
project=self.project,
redirect_type=EXACT_REDIRECT,
from_url="/en/latest/*",
to_url="/:splat",
)
urls = [
(
"http://project.dev.readthedocs.io/en/latest/%0D/example.com/path.html",
"http://project.dev.readthedocs.io//example.com/path.html",
),
# These are caught by the slash redirect.
(
"http://project.dev.readthedocs.io/en/latest//example.com",
"/en/latest/example.com",
),
(
"http://project.dev.readthedocs.io/en/latest/https://example.com",
"/en/latest/https:/example.com",
),
]
for url, expected_location in urls:
r = self.client.get(url, headers={"host": "project.dev.readthedocs.io"})
self.assertEqual(r.status_code, 302, url)
self.assertEqual(r["Location"], expected_location, url)
def test_redirect_html_to_clean_url_crossdomain(self):
"""
Avoid redirecting to an external site unless the external site is in to_url
"""
fixture.get(
Redirect,
project=self.project,
redirect_type=HTML_TO_CLEAN_URL_REDIRECT,
)
urls = [
# Plain protocol, these are caught by the slash redirect.
(
"http://project.dev.readthedocs.io/http://my.host/path.html",
"/http:/my.host/path.html",
),
(
"http://project.dev.readthedocs.io//my.host/path.html",
"/my.host/path.html",
),
# Trying to bypass the protocol check by including a `\r` char.
(
"http://project.dev.readthedocs.io/http:/%0D/my.host/path.html",
"http://project.dev.readthedocs.io/en/latest/http://my.host/path/",
),
(
"http://project.dev.readthedocs.io/%0D/my.host/path.html",
"http://project.dev.readthedocs.io/en/latest/my.host/path/",
),
]
for url, expected_location in urls:
r = self.client.get(url, headers={"host": "project.dev.readthedocs.io"})
self.assertEqual(r.status_code, 302, url)
self.assertEqual(r["Location"], expected_location, url)
def test_redirect_clean_url_to_html_crossdomain(self):
"""Avoid redirecting to an external site unless the external site is in to_url."""
fixture.get(
Redirect,
project=self.project,
redirect_type=CLEAN_URL_TO_HTML_REDIRECT,
)
urls = [
# Plain protocol, these are caught by the slash redirect.
(
"http://project.dev.readthedocs.io/http://my.host/path/",
"/http:/my.host/path/",
),
("http://project.dev.readthedocs.io//my.host/path/", "/my.host/path/"),
# Trying to bypass the protocol check by including a `\r` char.
(
"http://project.dev.readthedocs.io/http:/%0D/my.host/path/",
"http://project.dev.readthedocs.io/en/latest/http://my.host/path.html",
),
(
"http://project.dev.readthedocs.io/%0D/my.host/path/",
"http://project.dev.readthedocs.io/en/latest/my.host/path.html",
),
]
for url, expected_location in urls:
r = self.client.get(url, headers={"host": "project.dev.readthedocs.io"})
self.assertEqual(r.status_code, 302, url)
self.assertEqual(r["Location"], expected_location, url)
def test_redirect_using_projects_prefix(self):
"""
Test that we can support redirects using the ``/projects/`` prefix.
https://github.com/readthedocs/readthedocs.org/issues/7552
"""
redirect = fixture.get(
Redirect,
project=self.project,
redirect_type=EXACT_REDIRECT,
from_url="/projects/*",
to_url="https://example.com/projects/:splat",
)
self.assertEqual(self.project.redirects.count(), 1)
r = self.client.get(
"/projects/deleted-subproject/en/latest/guides/install.html",
headers={"host": "project.dev.readthedocs.io"},
)
self.assertEqual(r.status_code, 302)
self.assertEqual(
r["Location"],
"https://example.com/projects/deleted-subproject/en/latest/guides/install.html",
)
redirect.from_url = "/projects/not-found/*"
redirect.to_url = "/projects/subproject/:splat"
redirect.save()
r = self.client.get(
"/projects/not-found/en/latest/guides/install.html",
headers={"host": "project.dev.readthedocs.io"},
)
self.assertEqual(r.status_code, 302)
self.assertEqual(
r["Location"],
"http://project.dev.readthedocs.io/projects/subproject/en/latest/guides/install.html",
)
def test_page_redirect_crossdomain(self):
fixture.get(
Redirect,
project=self.project,
redirect_type=PAGE_REDIRECT,
from_url="/install.html",
to_url="https://example.com/",
)
r = self.client.get(
"/en/latest/install.html", headers={"host": "project.dev.readthedocs.io"}
)
self.assertEqual(r.status_code, 302)
self.assertEqual(r["Location"], "https://example.com/")
def test_page_redirect_with_wildcard_crossdomain(self):
fixture.get(
Redirect,
project=self.project,
redirect_type=PAGE_REDIRECT,
from_url="/tutorial/*",
to_url="https://example.com/:splat",
)
r = self.client.get(
"/en/latest/tutorial/install.html",
headers={"host": "project.dev.readthedocs.io"},
)
self.assertEqual(r.status_code, 302)
self.assertEqual(r["Location"], "https://example.com/install.html")
def test_exact_redirect_crossdomain(self):
fixture.get(
Redirect,
project=self.project,
redirect_type=EXACT_REDIRECT,
from_url="/en/latest/install.html",
to_url="https://example.com/",
)
r = self.client.get(
"/en/latest/install.html", headers={"host": "project.dev.readthedocs.io"}
)
self.assertEqual(r.status_code, 302)
self.assertEqual(r["Location"], "https://example.com/")
def test_exact_redirect_with_wildcard_crossdomain(self):
fixture.get(
Redirect,
project=self.project,
redirect_type=EXACT_REDIRECT,
from_url="/en/latest/*",
to_url="https://example.com/:splat",
)
r = self.client.get(
"/en/latest/install.html", headers={"host": "project.dev.readthedocs.io"}
)
self.assertEqual(r.status_code, 302)
self.assertEqual(r["Location"], "https://example.com/install.html")
| UserRedirectCrossdomainTest |
python | apache__airflow | helm-tests/tests/helm_tests/security/test_rbac.py | {
"start": 4092,
"end": 21821
} | class ____:
"""Tests RBAC."""
def _get_values_with_version(self, values, version):
if version != "default":
values["airflowVersion"] = version
return values
def _is_airflow_3_or_above(self, version):
return version == "default" or (parse_version(version) >= parse_version("3.0.0"))
def _get_object_tuples(self, version, sa: bool = True, dedicated_workers_sa: None | bool = None):
tuples = copy(DEPLOYMENT_NO_RBAC_NO_SA_KIND_NAME_TUPLES)
if version in {"default", "3.0.0"}:
tuples.append(("Service", "test-rbac-triggerer"))
tuples.append(("StatefulSet", "test-rbac-triggerer"))
else:
tuples.append(("Deployment", "test-rbac-triggerer"))
if version == "2.3.2":
tuples.append(("Secret", "test-rbac-result-backend"))
if self._is_airflow_3_or_above(version):
tuples.extend(
(
("Service", "test-rbac-api-server"),
("Deployment", "test-rbac-api-server"),
("Deployment", "test-rbac-dag-processor"),
("Secret", "test-rbac-api-secret-key"),
("Secret", "test-rbac-jwt-secret"),
)
)
if sa:
tuples.append(("ServiceAccount", "test-rbac-api-server"))
tuples.append(("ServiceAccount", "test-rbac-dag-processor"))
else:
tuples.extend(
(
("Service", "test-rbac-webserver"),
("Deployment", "test-rbac-webserver"),
("Secret", "test-rbac-webserver-secret-key"),
)
)
if sa:
tuples.append(("ServiceAccount", "test-rbac-webserver"))
if dedicated_workers_sa is not None:
if dedicated_workers_sa:
tuples.append(("ServiceAccount", "test-rbac-worker-celery"))
tuples.append(("ServiceAccount", "test-rbac-worker-kubernetes"))
else:
tuples.append(("ServiceAccount", "test-rbac-worker"))
return tuples
@parametrize_version
@pytest.mark.parametrize(
"workers_values",
[
{"serviceAccount": {"create": False}},
{
"useWorkerDedicatedServiceAccounts": True,
"celery": {"serviceAccount": {"create": False}},
"kubernetes": {"serviceAccount": {"create": False}},
},
],
)
def test_deployments_no_rbac_no_sa(self, version, workers_values):
k8s_objects = render_chart(
"test-rbac",
values=self._get_values_with_version(
values={
"fullnameOverride": "test-rbac",
"executor": "CeleryExecutor,KubernetesExecutor",
"rbac": {"create": False},
"cleanup": {
"enabled": True,
"serviceAccount": {
"create": False,
},
},
"databaseCleanup": {
"enabled": True,
"serviceAccount": {
"create": False,
},
},
"pgbouncer": {
"enabled": True,
"serviceAccount": {
"create": False,
},
},
"redis": {"serviceAccount": {"create": False}},
"scheduler": {"serviceAccount": {"create": False}},
"dagProcessor": {"serviceAccount": {"create": False}},
"webserver": {"serviceAccount": {"create": False}},
"apiServer": {"serviceAccount": {"create": False}},
"workers": workers_values,
"triggerer": {"serviceAccount": {"create": False}},
"statsd": {"serviceAccount": {"create": False}},
"createUserJob": {"serviceAccount": {"create": False}},
"migrateDatabaseJob": {"serviceAccount": {"create": False}},
"flower": {"enabled": True, "serviceAccount": {"create": False}},
},
version=version,
),
)
list_of_kind_names_tuples = [
(k8s_object["kind"], k8s_object["metadata"]["name"]) for k8s_object in k8s_objects
]
assert sorted(list_of_kind_names_tuples) == sorted(self._get_object_tuples(version, sa=False))
@parametrize_version
@pytest.mark.parametrize("dedicated_workers_sa", [False, True])
def test_deployments_no_rbac_with_sa(self, version, dedicated_workers_sa):
k8s_objects = render_chart(
"test-rbac",
values=self._get_values_with_version(
values={
"fullnameOverride": "test-rbac",
"executor": "CeleryExecutor,KubernetesExecutor",
"rbac": {"create": False},
"cleanup": {"enabled": True},
"databaseCleanup": {"enabled": True},
"flower": {"enabled": True},
"pgbouncer": {"enabled": True},
"workers": {"useWorkerDedicatedServiceAccounts": dedicated_workers_sa},
},
version=version,
),
)
list_of_kind_names_tuples = [
(k8s_object["kind"], k8s_object["metadata"]["name"]) for k8s_object in k8s_objects
]
real_list_of_kind_names = (
self._get_object_tuples(version, dedicated_workers_sa=dedicated_workers_sa)
+ SERVICE_ACCOUNT_NAME_TUPLES
)
assert sorted(list_of_kind_names_tuples) == sorted(real_list_of_kind_names)
@parametrize_version
@pytest.mark.parametrize(
"workers_values",
[
{"serviceAccount": {"create": False}},
{
"useWorkerDedicatedServiceAccounts": True,
"celery": {"serviceAccount": {"create": False}},
"kubernetes": {"serviceAccount": {"create": False}},
},
],
)
def test_deployments_with_rbac_no_sa(self, version, workers_values):
k8s_objects = render_chart(
"test-rbac",
values=self._get_values_with_version(
values={
"fullnameOverride": "test-rbac",
"executor": "CeleryExecutor,KubernetesExecutor",
"cleanup": {
"enabled": True,
"serviceAccount": {
"create": False,
},
},
"databaseCleanup": {
"enabled": True,
"serviceAccount": {
"create": False,
},
},
"scheduler": {"serviceAccount": {"create": False}},
"dagProcessor": {"serviceAccount": {"create": False}},
"webserver": {"serviceAccount": {"create": False}},
"apiServer": {"serviceAccount": {"create": False}},
"workers": workers_values,
"triggerer": {"serviceAccount": {"create": False}},
"flower": {"enabled": True, "serviceAccount": {"create": False}},
"statsd": {"serviceAccount": {"create": False}},
"redis": {"serviceAccount": {"create": False}},
"pgbouncer": {
"enabled": True,
"serviceAccount": {
"create": False,
},
},
"createUserJob": {"serviceAccount": {"create": False}},
"migrateDatabaseJob": {"serviceAccount": {"create": False}},
},
version=version,
),
)
list_of_kind_names_tuples = [
(k8s_object["kind"], k8s_object["metadata"]["name"]) for k8s_object in k8s_objects
]
real_list_of_kind_names = self._get_object_tuples(version, sa=False) + RBAC_ENABLED_KIND_NAME_TUPLES
assert sorted(list_of_kind_names_tuples) == sorted(real_list_of_kind_names)
@parametrize_version
@pytest.mark.parametrize("dedicated_workers_sa", [False, True])
def test_deployments_with_rbac_with_sa(self, version, dedicated_workers_sa):
k8s_objects = render_chart(
"test-rbac",
values=self._get_values_with_version(
values={
"fullnameOverride": "test-rbac",
"executor": "CeleryExecutor,KubernetesExecutor",
"cleanup": {"enabled": True},
"databaseCleanup": {"enabled": True},
"flower": {"enabled": True},
"pgbouncer": {"enabled": True},
"workers": {"useWorkerDedicatedServiceAccounts": dedicated_workers_sa},
},
version=version,
),
)
list_of_kind_names_tuples = [
(k8s_object["kind"], k8s_object["metadata"]["name"]) for k8s_object in k8s_objects
]
real_list_of_kind_names = (
self._get_object_tuples(version, dedicated_workers_sa=dedicated_workers_sa)
+ SERVICE_ACCOUNT_NAME_TUPLES
+ RBAC_ENABLED_KIND_NAME_TUPLES
)
assert sorted(list_of_kind_names_tuples) == sorted(real_list_of_kind_names)
def test_service_account_custom_names(self):
k8s_objects = render_chart(
"test-rbac",
values={
"airflowVersion": "3.0.0",
"fullnameOverride": "test-rbac",
"cleanup": {
"enabled": True,
"serviceAccount": {
"name": CUSTOM_CLEANUP_NAME,
},
},
"databaseCleanup": {
"enabled": True,
"serviceAccount": {
"name": CUSTOM_DATABASE_CLEANUP_NAME,
},
},
"scheduler": {"serviceAccount": {"name": CUSTOM_SCHEDULER_NAME}},
"dagProcessor": {"serviceAccount": {"name": CUSTOM_DAG_PROCESSOR_NAME}},
"apiServer": {"serviceAccount": {"name": CUSTOM_API_SERVER_NAME}},
"workers": {"serviceAccount": {"name": CUSTOM_WORKER_NAME}},
"triggerer": {"serviceAccount": {"name": CUSTOM_TRIGGERER_NAME}},
"flower": {"enabled": True, "serviceAccount": {"name": CUSTOM_FLOWER_NAME}},
"statsd": {"serviceAccount": {"name": CUSTOM_STATSD_NAME}},
"redis": {"serviceAccount": {"name": CUSTOM_REDIS_NAME}},
"postgresql": {"serviceAccount": {"create": True, "name": CUSTOM_POSTGRESQL_NAME}},
"pgbouncer": {
"enabled": True,
"serviceAccount": {
"name": CUSTOM_PGBOUNCER_NAME,
},
},
"createUserJob": {"serviceAccount": {"name": CUSTOM_CREATE_USER_JOBS_NAME}},
"migrateDatabaseJob": {"serviceAccount": {"name": CUSTOM_MIGRATE_DATABASE_JOBS_NAME}},
},
)
list_of_sa_names = [
k8s_object["metadata"]["name"]
for k8s_object in k8s_objects
if k8s_object["kind"] == "ServiceAccount"
]
assert sorted(list_of_sa_names) == sorted(CUSTOM_SERVICE_ACCOUNT_NAMES)
def test_workers_service_account_custom_name(self):
k8s_objects = render_chart(
"test-rbac",
values={
"airflowVersion": "3.0.0",
"fullnameOverride": "test-rbac",
"executor": "CeleryExecutor,KubernetesExecutor",
"workers": {
"useWorkerDedicatedServiceAccounts": True,
"celery": {"serviceAccount": {"name": CUSTOM_WORKER_CELERY_NAME}},
"kubernetes": {"serviceAccount": {"name": CUSTOM_WORKER_KUBERNETES_NAME}},
},
},
show_only=[
"templates/workers/worker-celery-serviceaccount.yaml",
"templates/workers/worker-kubernetes-serviceaccount.yaml",
],
)
list_of_sa_names = [
k8s_object["metadata"]["name"]
for k8s_object in k8s_objects
if k8s_object["kind"] == "ServiceAccount"
]
assert len(k8s_objects) == 2
assert sorted(list_of_sa_names) == [CUSTOM_WORKER_CELERY_NAME, CUSTOM_WORKER_KUBERNETES_NAME]
def test_webserver_service_account_name_airflow_2(self):
k8s_objects = render_chart(
"test-rbac",
values={
"airflowVersion": "2.10.5",
"fullnameOverride": "test-rbac",
"webserver": {"serviceAccount": {"name": CUSTOM_WEBSERVER_NAME}},
},
show_only=["templates/webserver/webserver-serviceaccount.yaml"],
)
sa_name = jmespath.search("metadata.name", k8s_objects[0])
assert sa_name == CUSTOM_WEBSERVER_NAME
def test_service_account_custom_names_in_objects(self):
k8s_objects = render_chart(
"test-rbac",
values={
"airflowVersion": "3.0.0",
"fullnameOverride": "test-rbac",
"cleanup": {
"enabled": True,
"serviceAccount": {
"name": CUSTOM_CLEANUP_NAME,
},
},
"databaseCleanup": {
"enabled": True,
"serviceAccount": {
"name": CUSTOM_DATABASE_CLEANUP_NAME,
},
},
"scheduler": {"serviceAccount": {"name": CUSTOM_SCHEDULER_NAME}},
"dagProcessor": {"serviceAccount": {"name": CUSTOM_DAG_PROCESSOR_NAME}},
"apiServer": {"serviceAccount": {"name": CUSTOM_API_SERVER_NAME}},
"workers": {"serviceAccount": {"name": CUSTOM_WORKER_NAME}},
"triggerer": {"serviceAccount": {"name": CUSTOM_TRIGGERER_NAME}},
"flower": {"enabled": True, "serviceAccount": {"name": CUSTOM_FLOWER_NAME}},
"statsd": {"serviceAccount": {"name": CUSTOM_STATSD_NAME}},
"redis": {"serviceAccount": {"name": CUSTOM_REDIS_NAME}},
"postgresql": {"serviceAccount": {"name": CUSTOM_POSTGRESQL_NAME}},
"pgbouncer": {
"enabled": True,
"serviceAccount": {
"name": CUSTOM_PGBOUNCER_NAME,
},
},
"createUserJob": {"serviceAccount": {"name": CUSTOM_CREATE_USER_JOBS_NAME}},
"migrateDatabaseJob": {"serviceAccount": {"name": CUSTOM_MIGRATE_DATABASE_JOBS_NAME}},
},
)
list_of_sa_names_in_objects = []
for k8s_object in k8s_objects:
name = (
jmespath.search("spec.template.spec.serviceAccountName", k8s_object)
or jmespath.search(
"spec.jobTemplate.spec.template.spec.serviceAccountName",
k8s_object,
)
or None
)
if name and name not in list_of_sa_names_in_objects:
list_of_sa_names_in_objects.append(name)
assert sorted(list_of_sa_names_in_objects) == sorted(CUSTOM_SERVICE_ACCOUNT_NAMES)
def test_workers_celery_service_account_custom_names_in_objects(self):
k8s_objects = render_chart(
"test-rbac",
values={
"airflowVersion": "3.0.0",
"fullnameOverride": "test-rbac",
"workers": {
"useWorkerDedicatedServiceAccounts": True,
"celery": {"serviceAccount": {"name": CUSTOM_WORKER_CELERY_NAME}},
},
},
show_only=[
"templates/workers/worker-deployment.yaml",
],
)
assert (
jmespath.search("spec.template.spec.serviceAccountName", k8s_objects[0])
== CUSTOM_WORKER_CELERY_NAME
)
def test_service_account_without_resource(self):
k8s_objects = render_chart(
"test-rbac",
values={
"airflowVersion": "3.0.0",
"fullnameOverride": "test-rbac",
"executor": "LocalExecutor",
"cleanup": {"enabled": False},
"pgbouncer": {"enabled": False},
"redis": {"enabled": False},
"flower": {"enabled": False},
"statsd": {"enabled": False},
"webserver": {"defaultUser": {"enabled": False}},
},
)
list_of_sa_names = [
k8s_object["metadata"]["name"]
for k8s_object in k8s_objects
if k8s_object["kind"] == "ServiceAccount"
]
service_account_names = [
"test-rbac-scheduler",
"test-rbac-dag-processor",
"test-rbac-api-server",
"test-rbac-triggerer",
"test-rbac-migrate-database-job",
]
assert sorted(list_of_sa_names) == sorted(service_account_names)
| TestRBAC |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typedDictClosed2.py | {
"start": 130,
"end": 306
} | class ____(TypedDict, extra_items=int):
name: str
def func1(movie: Movie1) -> None:
del movie["year"]
# This should generate an error.
del movie["name"]
| Movie1 |
python | MongoEngine__mongoengine | docs/code/tumblelog.py | {
"start": 152,
"end": 302
} | class ____(Document):
email = StringField(required=True)
first_name = StringField(max_length=50)
last_name = StringField(max_length=50)
| User |
python | django__django | django/views/generic/dates.py | {
"start": 20079,
"end": 20423
} | class ____(BaseDayArchiveView):
"""
Base view for a list of objects published today.
This requires subclassing to provide a response mixin.
"""
def get_dated_items(self):
"""Return (date_list, items, extra_context) for this request."""
return self._get_dated_items(datetime.date.today())
| BaseTodayArchiveView |
python | django__django | tests/redirects_tests/tests.py | {
"start": 628,
"end": 3315
} | class ____(TestCase):
@classmethod
def setUpTestData(cls):
cls.site = Site.objects.get(pk=settings.SITE_ID)
def test_model(self):
r1 = Redirect.objects.create(
site=self.site, old_path="/initial", new_path="/new_target"
)
self.assertEqual(str(r1), "/initial ---> /new_target")
def test_redirect(self):
Redirect.objects.create(
site=self.site, old_path="/initial", new_path="/new_target"
)
response = self.client.get("/initial")
self.assertRedirects(
response, "/new_target", status_code=301, target_status_code=404
)
@override_settings(APPEND_SLASH=True)
def test_redirect_with_append_slash(self):
Redirect.objects.create(
site=self.site, old_path="/initial/", new_path="/new_target/"
)
response = self.client.get("/initial")
self.assertRedirects(
response, "/new_target/", status_code=301, target_status_code=404
)
@override_settings(APPEND_SLASH=True)
def test_redirect_with_append_slash_and_query_string(self):
Redirect.objects.create(
site=self.site, old_path="/initial/?foo", new_path="/new_target/"
)
response = self.client.get("/initial?foo")
self.assertRedirects(
response, "/new_target/", status_code=301, target_status_code=404
)
@override_settings(APPEND_SLASH=True)
def test_redirect_not_found_with_append_slash(self):
"""
Exercise the second Redirect.DoesNotExist branch in
RedirectFallbackMiddleware.
"""
response = self.client.get("/test")
self.assertEqual(response.status_code, 404)
def test_redirect_shortcircuits_non_404_response(self):
"""RedirectFallbackMiddleware short-circuits on non-404 requests."""
response = self.client.get("/")
self.assertEqual(response.status_code, 200)
def test_response_gone(self):
"""When the redirect target is '', return a 410"""
Redirect.objects.create(site=self.site, old_path="/initial", new_path="")
response = self.client.get("/initial")
self.assertEqual(response.status_code, 410)
@modify_settings(INSTALLED_APPS={"remove": "django.contrib.sites"})
def test_sites_not_installed(self):
def get_response(request):
return HttpResponse()
msg = (
"You cannot use RedirectFallbackMiddleware when "
"django.contrib.sites is not installed."
)
with self.assertRaisesMessage(ImproperlyConfigured, msg):
RedirectFallbackMiddleware(get_response)
| RedirectTests |
python | aio-libs__aiohttp | aiohttp/http_writer.py | {
"start": 1343,
"end": 12691
} | class ____(AbstractStreamWriter):
length: int | None = None
chunked: bool = False
_eof: bool = False
_compress: ZLibCompressor | None = None
def __init__(
self,
protocol: BaseProtocol,
loop: asyncio.AbstractEventLoop,
on_chunk_sent: _T_OnChunkSent = None,
on_headers_sent: _T_OnHeadersSent = None,
) -> None:
self._protocol = protocol
self.loop = loop
self._on_chunk_sent: _T_OnChunkSent = on_chunk_sent
self._on_headers_sent: _T_OnHeadersSent = on_headers_sent
self._headers_buf: bytes | None = None
self._headers_written: bool = False
@property
def transport(self) -> asyncio.Transport | None:
return self._protocol.transport
@property
def protocol(self) -> BaseProtocol:
return self._protocol
def enable_chunking(self) -> None:
self.chunked = True
def enable_compression(
self, encoding: str = "deflate", strategy: int | None = None
) -> None:
self._compress = ZLibCompressor(encoding=encoding, strategy=strategy)
def _write(
self, chunk: Union[bytes, bytearray, "memoryview[int]", "memoryview[bytes]"]
) -> None:
size = len(chunk)
self.buffer_size += size
self.output_size += size
transport = self._protocol.transport
if transport is None or transport.is_closing():
raise ClientConnectionResetError("Cannot write to closing transport")
transport.write(chunk)
def _writelines(
self,
chunks: Iterable[
Union[bytes, bytearray, "memoryview[int]", "memoryview[bytes]"]
],
) -> None:
size = 0
for chunk in chunks:
size += len(chunk)
self.buffer_size += size
self.output_size += size
transport = self._protocol.transport
if transport is None or transport.is_closing():
raise ClientConnectionResetError("Cannot write to closing transport")
if SKIP_WRITELINES or size < MIN_PAYLOAD_FOR_WRITELINES:
transport.write(b"".join(chunks))
else:
transport.writelines(chunks)
def _write_chunked_payload(
self, chunk: Union[bytes, bytearray, "memoryview[int]", "memoryview[bytes]"]
) -> None:
"""Write a chunk with proper chunked encoding."""
chunk_len_pre = f"{len(chunk):x}\r\n".encode("ascii")
self._writelines((chunk_len_pre, chunk, b"\r\n"))
def _send_headers_with_payload(
self,
chunk: Union[bytes, bytearray, "memoryview[int]", "memoryview[bytes]"],
is_eof: bool,
) -> None:
"""Send buffered headers with payload, coalescing into single write."""
# Mark headers as written
self._headers_written = True
headers_buf = self._headers_buf
self._headers_buf = None
if TYPE_CHECKING:
# Safe because callers (write() and write_eof()) only invoke this method
# after checking that self._headers_buf is truthy
assert headers_buf is not None
if not self.chunked:
# Non-chunked: coalesce headers with body
if chunk:
self._writelines((headers_buf, chunk))
else:
self._write(headers_buf)
return
# Coalesce headers with chunked data
if chunk:
chunk_len_pre = f"{len(chunk):x}\r\n".encode("ascii")
if is_eof:
self._writelines((headers_buf, chunk_len_pre, chunk, b"\r\n0\r\n\r\n"))
else:
self._writelines((headers_buf, chunk_len_pre, chunk, b"\r\n"))
elif is_eof:
self._writelines((headers_buf, b"0\r\n\r\n"))
else:
self._write(headers_buf)
async def write(
self,
chunk: Union[bytes, bytearray, "memoryview[int]", "memoryview[bytes]"],
*,
drain: bool = True,
LIMIT: int = 0x10000,
) -> None:
"""
Writes chunk of data to a stream.
write_eof() indicates end of stream.
writer can't be used after write_eof() method being called.
write() return drain future.
"""
if self._on_chunk_sent is not None:
await self._on_chunk_sent(chunk)
if isinstance(chunk, memoryview):
if chunk.nbytes != len(chunk):
# just reshape it
chunk = chunk.cast("c")
if self._compress is not None:
chunk = await self._compress.compress(chunk)
if not chunk:
return
if self.length is not None:
chunk_len = len(chunk)
if self.length >= chunk_len:
self.length = self.length - chunk_len
else:
chunk = chunk[: self.length]
self.length = 0
if not chunk:
return
# Handle buffered headers for small payload optimization
if self._headers_buf and not self._headers_written:
self._send_headers_with_payload(chunk, False)
if drain and self.buffer_size > LIMIT:
self.buffer_size = 0
await self.drain()
return
if chunk:
if self.chunked:
self._write_chunked_payload(chunk)
else:
self._write(chunk)
if drain and self.buffer_size > LIMIT:
self.buffer_size = 0
await self.drain()
async def write_headers(
self, status_line: str, headers: "CIMultiDict[str]"
) -> None:
"""Write headers to the stream."""
if self._on_headers_sent is not None:
await self._on_headers_sent(headers)
# status + headers
buf = _serialize_headers(status_line, headers)
self._headers_written = False
self._headers_buf = buf
def send_headers(self) -> None:
"""Force sending buffered headers if not already sent."""
if not self._headers_buf or self._headers_written:
return
self._headers_written = True
headers_buf = self._headers_buf
self._headers_buf = None
if TYPE_CHECKING:
# Safe because we only enter this block when self._headers_buf is truthy
assert headers_buf is not None
self._write(headers_buf)
def set_eof(self) -> None:
"""Indicate that the message is complete."""
if self._eof:
return
# If headers haven't been sent yet, send them now
# This handles the case where there's no body at all
if self._headers_buf and not self._headers_written:
self._headers_written = True
headers_buf = self._headers_buf
self._headers_buf = None
if TYPE_CHECKING:
# Safe because we only enter this block when self._headers_buf is truthy
assert headers_buf is not None
# Combine headers and chunked EOF marker in a single write
if self.chunked:
self._writelines((headers_buf, b"0\r\n\r\n"))
else:
self._write(headers_buf)
elif self.chunked and self._headers_written:
# Headers already sent, just send the final chunk marker
self._write(b"0\r\n\r\n")
self._eof = True
async def write_eof(self, chunk: bytes = b"") -> None:
if self._eof:
return
if chunk and self._on_chunk_sent is not None:
await self._on_chunk_sent(chunk)
# Handle body/compression
if self._compress:
chunks: list[bytes] = []
chunks_len = 0
if chunk and (compressed_chunk := await self._compress.compress(chunk)):
chunks_len = len(compressed_chunk)
chunks.append(compressed_chunk)
flush_chunk = self._compress.flush()
chunks_len += len(flush_chunk)
chunks.append(flush_chunk)
assert chunks_len
# Send buffered headers with compressed data if not yet sent
if self._headers_buf and not self._headers_written:
self._headers_written = True
headers_buf = self._headers_buf
self._headers_buf = None
if self.chunked:
# Coalesce headers with compressed chunked data
chunk_len_pre = f"{chunks_len:x}\r\n".encode("ascii")
self._writelines(
(headers_buf, chunk_len_pre, *chunks, b"\r\n0\r\n\r\n")
)
else:
# Coalesce headers with compressed data
self._writelines((headers_buf, *chunks))
await self.drain()
self._eof = True
return
# Headers already sent, just write compressed data
if self.chunked:
chunk_len_pre = f"{chunks_len:x}\r\n".encode("ascii")
self._writelines((chunk_len_pre, *chunks, b"\r\n0\r\n\r\n"))
elif len(chunks) > 1:
self._writelines(chunks)
else:
self._write(chunks[0])
await self.drain()
self._eof = True
return
# No compression - send buffered headers if not yet sent
if self._headers_buf and not self._headers_written:
# Use helper to send headers with payload
self._send_headers_with_payload(chunk, True)
await self.drain()
self._eof = True
return
# Handle remaining body
if self.chunked:
if chunk:
# Write final chunk with EOF marker
self._writelines(
(f"{len(chunk):x}\r\n".encode("ascii"), chunk, b"\r\n0\r\n\r\n")
)
else:
self._write(b"0\r\n\r\n")
await self.drain()
self._eof = True
return
if chunk:
self._write(chunk)
await self.drain()
self._eof = True
async def drain(self) -> None:
"""Flush the write buffer.
The intended use is to write
await w.write(data)
await w.drain()
"""
protocol = self._protocol
if protocol.transport is not None and protocol._paused:
await protocol._drain_helper()
def _safe_header(string: str) -> str:
if "\r" in string or "\n" in string:
raise ValueError(
"Newline or carriage return detected in headers. "
"Potential header injection attack."
)
return string
def _py_serialize_headers(status_line: str, headers: "CIMultiDict[str]") -> bytes:
headers_gen = (_safe_header(k) + ": " + _safe_header(v) for k, v in headers.items())
line = status_line + "\r\n" + "\r\n".join(headers_gen) + "\r\n\r\n"
return line.encode("utf-8")
_serialize_headers = _py_serialize_headers
try:
import aiohttp._http_writer as _http_writer # type: ignore[import-not-found]
_c_serialize_headers = _http_writer._serialize_headers
if not NO_EXTENSIONS:
_serialize_headers = _c_serialize_headers
except ImportError:
pass
| StreamWriter |
python | keras-team__keras | keras/src/ops/operation_test.py | {
"start": 244,
"end": 571
} | class ____(operation.Operation):
def call(self, x, y, z=None):
# `z` has to be put first due to the order of operations issue with
# torch backend.
return 3 * z + x + 2 * y
def compute_output_spec(self, x, y, z=None):
return keras_tensor.KerasTensor(x.shape, x.dtype)
| OpWithMultipleInputs |
python | doocs__leetcode | solution/2000-2099/2063.Vowels of All Substrings/Solution.py | {
"start": 0,
"end": 165
} | class ____:
def countVowels(self, word: str) -> int:
n = len(word)
return sum((i + 1) * (n - i) for i, c in enumerate(word) if c in 'aeiou')
| Solution |
python | mlflow__mlflow | mlflow/entities/logged_model_input.py | {
"start": 127,
"end": 720
} | class ____(_MlflowObject):
"""ModelInput object associated with a Run."""
def __init__(self, model_id: str):
self._model_id = model_id
def __eq__(self, other: _MlflowObject) -> bool:
if type(other) is type(self):
return self.__dict__ == other.__dict__
return False
@property
def model_id(self) -> str:
"""Model ID."""
return self._model_id
def to_proto(self):
return ProtoModelInput(model_id=self._model_id)
@classmethod
def from_proto(cls, proto):
return cls(proto.model_id)
| LoggedModelInput |
python | ipython__ipython | IPython/core/magic.py | {
"start": 27631,
"end": 29162
} | class ____:
"""An alias to another magic function.
An alias is determined by its magic name and magic kind. Lookup
is done at call time, so if the underlying magic changes the alias
will call the new function.
Use the :meth:`MagicsManager.register_alias` method or the
`%alias_magic` magic function to create and register a new alias.
"""
def __init__(self, shell, magic_name, magic_kind, magic_params=None):
self.shell = shell
self.magic_name = magic_name
self.magic_params = magic_params
self.magic_kind = magic_kind
self.pretty_target = "%s%s" % (magic_escapes[self.magic_kind], self.magic_name)
self.__doc__ = "Alias for `%s`." % self.pretty_target
self._in_call = False
def __call__(self, *args, **kwargs):
"""Call the magic alias."""
fn = self.shell.find_magic(self.magic_name, self.magic_kind)
if fn is None:
raise UsageError("Magic `%s` not found." % self.pretty_target)
# Protect against infinite recursion.
if self._in_call:
raise UsageError(
"Infinite recursion detected; magic aliases cannot call themselves."
)
self._in_call = True
try:
if self.magic_params:
args_list = list(args)
args_list[0] = self.magic_params + " " + args[0]
args = tuple(args_list)
return fn(*args, **kwargs)
finally:
self._in_call = False
| MagicAlias |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-google-analytics-v4/source_google_analytics_v4/source.py | {
"start": 18949,
"end": 21636
} | class ____(Oauth2Authenticator):
"""Request example for API token extraction:
curl --location --request POST
https://oauth2.googleapis.com/token?grant_type=urn:ietf:params:oauth:grant-type:jwt-bearer&assertion=signed_JWT
"""
def __init__(self, config: Mapping):
self.credentials_json = json.loads(config["credentials_json"])
self.client_email = self.credentials_json["client_email"]
self.scope = "https://www.googleapis.com/auth/analytics.readonly"
super().__init__(
token_refresh_endpoint="https://oauth2.googleapis.com/token",
client_secret=self.credentials_json["private_key"],
client_id=self.credentials_json["private_key_id"],
refresh_token=None,
)
def refresh_access_token(self) -> Tuple[str, int]:
"""
Calling the Google OAuth 2.0 token endpoint. Used for authorizing signed JWT.
Returns tuple with access token and token's time-to-live
"""
response_json = None
try:
response = requests.request(method="POST", url=self.token_refresh_endpoint, params=self.get_refresh_request_params())
response_json = response.json()
response.raise_for_status()
except requests.exceptions.RequestException as e:
if response_json and "error" in response_json:
raise Exception(
"Error refreshing access token {}. Error: {}; Error details: {}; Exception: {}".format(
response_json, response_json["error"], response_json["error_description"], e
)
) from e
raise Exception(f"Error refreshing access token: {e}") from e
else:
return response_json["access_token"], response_json["expires_in"]
def get_refresh_request_params(self) -> Mapping[str, Any]:
"""
Sign the JWT with RSA-256 using the private key found in service account JSON file.
"""
token_lifetime = 3600 # token lifetime is 1 hour
issued_at = time.time()
expiration_time = issued_at + token_lifetime
payload = {
"iss": self.client_email,
"sub": self.client_email,
"scope": self.scope,
"aud": self.token_refresh_endpoint,
"iat": issued_at,
"exp": expiration_time,
}
headers = {"kid": self.client_id}
signed_jwt = jwt.encode(payload, self.client_secret, headers=headers, algorithm="RS256")
return {"grant_type": "urn:ietf:params:oauth:grant-type:jwt-bearer", "assertion": str(signed_jwt)}
| GoogleAnalyticsServiceOauth2Authenticator |
python | huggingface__transformers | src/transformers/models/unispeech/modeling_unispeech.py | {
"start": 22974,
"end": 26020
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.pos_conv_embed = UniSpeechPositionalConvEmbedding(config)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout)
self.layers = nn.ModuleList(
[UniSpeechEncoderLayerStableLayerNorm(config) for _ in range(config.num_hidden_layers)]
)
self.gradient_checkpointing = False
def forward(
self,
hidden_states,
attention_mask=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
if attention_mask is not None:
# make sure padded tokens output 0
expand_attention_mask = attention_mask.unsqueeze(-1).repeat(1, 1, hidden_states.shape[2])
hidden_states[~expand_attention_mask] = 0
attention_mask = create_bidirectional_mask(
config=self.config,
input_embeds=hidden_states,
attention_mask=attention_mask,
)
position_embeddings = self.pos_conv_embed(hidden_states)
hidden_states = hidden_states + position_embeddings
hidden_states = self.dropout(hidden_states)
synced_gpus = is_deepspeed_zero3_enabled() or is_fsdp_managed_module(self)
for layer in self.layers:
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
# add LayerDrop (see https://huggingface.co/papers/1909.11556 for description)
dropout_probability = torch.rand([])
skip_the_layer = self.training and dropout_probability < self.config.layerdrop
if not skip_the_layer or synced_gpus:
# under fsdp or deepspeed zero3 all gpus must run in sync
# XXX: could optimize this like synced_gpus in generate_utils but not sure if it's worth the code complication
layer_outputs = layer(
hidden_states, attention_mask=attention_mask, output_attentions=output_attentions
)
hidden_states = layer_outputs[0]
if skip_the_layer:
layer_outputs = (None, None)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
hidden_states = self.layer_norm(hidden_states)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
| UniSpeechEncoderStableLayerNorm |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/executors/ecs/test_utils.py | {
"start": 4336,
"end": 4681
} | class ____:
"""Test EcsExecutorException class."""
def test_ecs_executor_exception_creation(self):
"""Test EcsExecutorException creation."""
exception = EcsExecutorException("Test error message")
assert str(exception) == "Test error message"
assert isinstance(exception, Exception)
| TestEcsExecutorException |
python | huggingface__transformers | src/transformers/models/glm4/modeling_glm4.py | {
"start": 19504,
"end": 22953
} | class ____(Glm4PreTrainedModel, GenerationMixin):
_tied_weights_keys = {"lm_head.weight": "model.embed_tokens.weight"}
_tp_plan = {"lm_head": "colwise_rep"}
_pp_plan = {"lm_head": (["hidden_states"], ["logits"])}
def __init__(self, config):
super().__init__(config)
self.model = Glm4Model(config)
self.vocab_size = config.vocab_size
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple, CausalLMOutputWithPast]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from transformers import AutoTokenizer, Glm4ForCausalLM
>>> model = Glm4ForCausalLM.from_pretrained("THUDM/GLM-4-9B-0414")
>>> tokenizer = AutoTokenizer.from_pretrained("THUDM/GLM-4-9B-0414")
>>> prompt = "Hey, are you conscious? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
```"""
outputs: BaseModelOutputWithPast = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = outputs.last_hidden_state
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
return CausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
| Glm4ForCausalLM |
python | Textualize__textual | tests/snapshot_tests/snapshot_apps/auto_tab_active.py | {
"start": 112,
"end": 1361
} | class ____(App):
BINDINGS = [("space", "focus_button_2_2", "Focus button 2.2")]
def compose(self) -> ComposeResult:
with TabbedContent(id="tabbed-root"):
with TabPane("[ansi_red]Parent 1[/]"):
with TabbedContent():
with TabPane("[ansi_red]Child 1.1[/]"):
yield Button("Button 1.1", variant="error")
with TabPane("[ansi_red]Child 1.2[/]"):
yield Button("Button 1.2", variant="error")
with TabPane("[ansi_green]Parent 2[/]", id="parent-2"):
with TabbedContent(id="tabbed-parent-2"):
with TabPane("[ansi_green]Child 2.1[/]"):
yield Button("Button 2.1", variant="success")
with TabPane("[ansi_green]Child 2.2[/]", id="child-2-2"):
yield Button(
"Button 2.2",
variant="success",
id="button-2-2",
)
yield Footer()
def action_focus_button_2_2(self) -> None:
self.query_one("#button-2-2", Button).focus()
if __name__ == "__main__":
app = ExampleApp()
app.run()
| ExampleApp |
python | pennersr__django-allauth | allauth/socialaccount/providers/tumblr_oauth2/views.py | {
"start": 181,
"end": 1154
} | class ____(OAuth2Adapter):
provider_id = "tumblr_oauth2"
access_token_url = "https://api.tumblr.com/v2/oauth2/token" # nosec: B105
authorize_url = "https://www.tumblr.com/oauth2/authorize"
profile_url = "https://api.tumblr.com/v2/user/info"
def complete_login(self, request, app, token, response):
extra_data = self.get_user_info(token)
return self.get_provider().sociallogin_from_response(request, extra_data)
def get_user_info(self, token):
resp = (
get_adapter()
.get_requests_session()
.get(
self.profile_url,
headers={"Authorization": "Bearer " + token.token},
)
)
resp.raise_for_status()
extra_data = resp.json()["response"]["user"]
return extra_data
oauth2_login = OAuth2LoginView.adapter_view(TumblrOAuth2Adapter)
oauth2_callback = OAuth2CallbackView.adapter_view(TumblrOAuth2Adapter)
| TumblrOAuth2Adapter |
python | kamyu104__LeetCode-Solutions | Python/random-pick-index.py | {
"start": 605,
"end": 1102
} | class ____(object):
def __init__(self, nums):
"""
:type nums: List[int]
"""
self.__nums = nums
def pick(self, target):
"""
:type target: int
:rtype: int
"""
reservoir = -1
n = 0
for i in xrange(len(self.__nums)):
if self.__nums[i] != target:
continue
reservoir = i if randint(1, n+1) == 1 else reservoir
n += 1
return reservoir
| Solution_TLE |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.