language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | scrapy__scrapy | tests/test_command_runspider.py | {
"start": 10070,
"end": 10765
} | class ____(scrapy.Spider):
name = 'myspider'
@classmethod
def from_crawler(cls, crawler, *args, **kwargs):
spider = super().from_crawler(crawler, *args, **kwargs)
spider.settings.set("FOO", kwargs.get("foo"))
return spider
async def start(self):
self.logger.info(f"The value of FOO is {self.settings.getint('FOO')}")
return
yield
"""
args = ["-a", "foo=42"]
log = self.get_log(tmp_path, spider_code, args=args)
assert "Spider closed (finished)" in log
assert "The value of FOO is 42" in log
@pytest.mark.skipif(
platform.system() != "Windows", reason="Windows required for .pyw files"
)
| MySpider |
python | hynek__structlog | tests/test_twisted.py | {
"start": 8173,
"end": 8373
} | class ____:
def test_repr(self):
"""
The repr of the wrapped string is the vanilla string without quotes.
"""
assert "foo" == repr(ReprWrapper("foo"))
| TestReprWrapper |
python | facebookresearch__faiss | tests/test_index_accuracy.py | {
"start": 10562,
"end": 12742
} | class ____(unittest.TestCase):
def subtest_8bit_direct(self, metric_type, d, quantizer_type):
xt, xb, xq = get_dataset_2(d, 500, 1000, 30)
# rescale everything to get integer
tmin, tmax = xt.min(), xt.max()
def rescale(x):
x = np.floor((x - tmin) * 256 / (tmax - tmin))
x[x < 0] = 0
x[x > 255] = 255
return x
def rescale_signed(x):
x = np.floor((x - tmin) * 256 / (tmax - tmin))
x[x < 0] = 0
x[x > 255] = 255
x -= 128
return x
if quantizer_type == faiss.ScalarQuantizer.QT_8bit_direct_signed:
xt = rescale_signed(xt)
xb = rescale_signed(xb)
xq = rescale_signed(xq)
else:
xt = rescale(xt)
xb = rescale(xb)
xq = rescale(xq)
gt_index = faiss.IndexFlat(d, metric_type)
gt_index.add(xb)
Dref, Iref = gt_index.search(xq, 10)
index = faiss.IndexScalarQuantizer(
d, quantizer_type, metric_type
)
index.add(xb)
D, I = index.search(xq, 10)
assert np.all(I == Iref)
assert np.all(D == Dref)
# same, with IVF
nlist = 64
quantizer = faiss.IndexFlat(d, metric_type)
gt_index = faiss.IndexIVFFlat(quantizer, d, nlist, metric_type)
gt_index.nprobe = 4
gt_index.train(xt)
gt_index.add(xb)
Dref, Iref = gt_index.search(xq, 10)
index = faiss.IndexIVFScalarQuantizer(
quantizer, d, nlist, quantizer_type,
metric_type
)
index.nprobe = 4
index.by_residual = False
index.train(xt)
index.add(xb)
D, I = index.search(xq, 10)
assert np.all(I == Iref)
assert np.all(D == Dref)
def test_8bit_direct(self):
for quantizer in faiss.ScalarQuantizer.QT_8bit_direct, faiss.ScalarQuantizer.QT_8bit_direct_signed:
for d in 13, 16, 24:
for metric_type in faiss.METRIC_L2, faiss.METRIC_INNER_PRODUCT:
self.subtest_8bit_direct(metric_type, d, quantizer)
| TestSQByte |
python | tensorflow__tensorflow | tensorflow/python/keras/initializers/initializers_v2.py | {
"start": 11658,
"end": 13899
} | class ____(Initializer):
"""Initializer that generates a truncated normal distribution.
Also available via the shortcut function
`tf.keras.initializers.truncated_normal`.
The values generated are similar to values from a
`tf.keras.initializers.RandomNormal` initializer except that values more
than two standard deviations from the mean are
discarded and re-drawn.
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.TruncatedNormal(mean=0., stddev=1.)
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.TruncatedNormal(mean=0., stddev=1.)
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Args:
mean: a python scalar or a scalar tensor. Mean of the random values
to generate.
stddev: a python scalar or a scalar tensor. Standard deviation of the
random values to generate before truncation.
seed: A Python integer. An initializer created with a given seed will
always produce the same random tensor for a given shape and dtype.
"""
def __init__(self, mean=0.0, stddev=0.05, seed=None):
self.mean = mean
self.stddev = stddev
self.seed = seed
self._random_generator = _RandomGenerator(seed)
def __call__(self, shape, dtype=None, **kwargs):
"""Returns a tensor object initialized to random normal values (truncated).
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only floating point types are
supported. If not specified, `tf.keras.backend.floatx()` is used, which
default to `float32` unless you configured it otherwise (via
`tf.keras.backend.set_floatx(float_dtype)`)
**kwargs: Additional keyword arguments.
"""
_validate_kwargs(self.__class__.__name__, kwargs)
dtype = _assert_float_dtype(_get_dtype(dtype))
if _PARTITION_SHAPE in kwargs:
shape = kwargs[_PARTITION_SHAPE]
return self._random_generator.truncated_normal(shape, self.mean,
self.stddev, dtype)
def get_config(self):
return {
'mean': self.mean,
'stddev': self.stddev,
'seed': self.seed
}
| TruncatedNormal |
python | sqlalchemy__sqlalchemy | test/orm/test_selectin_relations.py | {
"start": 65805,
"end": 70779
} | class ____(fixtures.DeclarativeMappedTest):
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class Company(Base):
__tablename__ = "company"
id = Column(Integer, primary_key=True)
name = Column(String(50))
employees = relationship("Employee", order_by="Employee.id")
class Employee(Base):
__tablename__ = "employee"
id = Column(Integer, primary_key=True)
type = Column(String(50))
name = Column(String(50))
company_id = Column(ForeignKey("company.id"))
__mapper_args__ = {
"polymorphic_on": "type",
"with_polymorphic": "*",
}
class Programmer(Employee):
__tablename__ = "programmer"
id = Column(ForeignKey("employee.id"), primary_key=True)
languages = relationship("Language")
__mapper_args__ = {"polymorphic_identity": "programmer"}
class Manager(Employee):
__tablename__ = "manager"
id = Column(ForeignKey("employee.id"), primary_key=True)
golf_swing_id = Column(ForeignKey("golf_swing.id"))
golf_swing = relationship("GolfSwing")
__mapper_args__ = {"polymorphic_identity": "manager"}
class Language(Base):
__tablename__ = "language"
id = Column(Integer, primary_key=True)
programmer_id = Column(
Integer, ForeignKey("programmer.id"), nullable=False
)
name = Column(String(50))
class GolfSwing(Base):
__tablename__ = "golf_swing"
id = Column(Integer, primary_key=True)
name = Column(String(50))
@classmethod
def insert_data(cls, connection):
Company, Programmer, Manager, GolfSwing, Language = cls.classes(
"Company", "Programmer", "Manager", "GolfSwing", "Language"
)
c1 = Company(
id=1,
name="Foobar Corp",
employees=[
Programmer(
id=1, name="p1", languages=[Language(id=1, name="Python")]
),
Manager(id=2, name="m1", golf_swing=GolfSwing(name="fore")),
],
)
c2 = Company(
id=2,
name="bat Corp",
employees=[
Manager(id=3, name="m2", golf_swing=GolfSwing(name="clubs")),
Programmer(
id=4, name="p2", languages=[Language(id=2, name="Java")]
),
],
)
sess = Session(connection)
sess.add_all([c1, c2])
sess.commit()
def test_one_to_many(self):
Company, Programmer, Manager, GolfSwing, Language = self.classes(
"Company", "Programmer", "Manager", "GolfSwing", "Language"
)
sess = fixture_session()
company = (
sess.query(Company)
.filter(Company.id == 1)
.options(
selectinload(
Company.employees.of_type(Programmer)
).selectinload(Programmer.languages)
)
.one()
)
def go():
eq_(company.employees[0].languages[0].name, "Python")
self.assert_sql_count(testing.db, go, 0)
def test_many_to_one(self):
Company, Programmer, Manager, GolfSwing, Language = self.classes(
"Company", "Programmer", "Manager", "GolfSwing", "Language"
)
sess = fixture_session()
company = (
sess.query(Company)
.filter(Company.id == 2)
.options(
selectinload(Company.employees.of_type(Manager)).selectinload(
Manager.golf_swing
)
)
.one()
)
# NOTE: we *MUST* do a SQL compare on this one because the adaption
# is very sensitive
def go():
eq_(company.employees[0].golf_swing.name, "clubs")
self.assert_sql_count(testing.db, go, 0)
def test_both(self):
Company, Programmer, Manager, GolfSwing, Language = self.classes(
"Company", "Programmer", "Manager", "GolfSwing", "Language"
)
sess = fixture_session()
rows = (
sess.query(Company)
.options(
selectinload(Company.employees.of_type(Manager)).selectinload(
Manager.golf_swing
),
defaultload(
Company.employees.of_type(Programmer)
).selectinload(Programmer.languages),
)
.order_by(Company.id)
.all()
)
def go():
eq_(rows[0].employees[0].languages[0].name, "Python")
eq_(rows[1].employees[0].golf_swing.name, "clubs")
self.assert_sql_count(testing.db, go, 0)
| HeterogeneousSubtypesTest |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_pyi/PYI045.py | {
"start": 1374,
"end": 1476
} | class ____:
def __aiter__(self) -> typing.AsyncIterable[int]:
...
| TypingAsyncIterableTReturn |
python | wandb__wandb | wandb/sdk/artifacts/_generated/project_artifact_collection.py | {
"start": 259,
"end": 361
} | class ____(GQLResult):
project: Optional[ProjectArtifactCollectionProject]
| ProjectArtifactCollection |
python | dagster-io__dagster | python_modules/libraries/dagster-gcp-pyspark/dagster_gcp_pyspark/bigquery/bigquery_pyspark_type_handler.py | {
"start": 1131,
"end": 7348
} | class ____(DbTypeHandler[DataFrame]):
"""Plugin for the BigQuery I/O Manager that can store and load PySpark DataFrames as BigQuery tables.
Examples:
.. code-block:: python
from dagster_gcp import BigQueryIOManager
from dagster_bigquery_pandas import BigQueryPySparkTypeHandler
from dagster import Definitions, EnvVar
class MyBigQueryIOManager(BigQueryIOManager):
@staticmethod
def type_handlers() -> Sequence[DbTypeHandler]:
return [BigQueryPySparkTypeHandler()]
@asset(
key_prefix=["my_dataset"], # my_dataset will be used as the dataset in BigQuery
)
def my_table() -> pyspark.sql.DataFrame: # the name of the asset will be the table name
...
Definitions(
assets=[my_table],
resources={
"io_manager": MyBigQueryIOManager(project=EnvVar("GCP_PROJECT"))
}
)
"""
def handle_output( # pyright: ignore[reportIncompatibleMethodOverride]
self, context: OutputContext, table_slice: TableSlice, obj: DataFrame, _
) -> Mapping[str, RawMetadataValue]:
options = _get_bigquery_write_options(context.resource_config, table_slice)
with_uppercase_cols = obj.toDF(*[c.upper() for c in obj.columns])
with_uppercase_cols.write.format("bigquery").options(**options).mode("append").save()
return {
"dataframe_columns": MetadataValue.table_schema(
TableSchema(
columns=[
TableColumn(name=field.name, type=field.dataType.typeName())
for field in obj.schema.fields
]
)
),
}
def load_input(self, context: InputContext, table_slice: TableSlice, _) -> DataFrame: # pyright: ignore[reportIncompatibleMethodOverride]
options = _get_bigquery_read_options(table_slice)
spark = SparkSession.builder.getOrCreate() # type: ignore
if table_slice.partition_dimensions and len(context.asset_partition_keys) == 0:
return spark.createDataFrame([], StructType([]))
df = (
spark.read.format("bigquery")
.options(**options)
.load(BigQueryClient.get_select_statement(table_slice))
)
return df.toDF(*[c.lower() for c in df.columns])
@property
def supported_types(self):
return [DataFrame]
bigquery_pyspark_io_manager = build_bigquery_io_manager(
[BigQueryPySparkTypeHandler()], default_load_type=DataFrame
)
bigquery_pyspark_io_manager.__doc__ = """
An I/O manager definition that reads inputs from and writes PySpark DataFrames to BigQuery.
Returns:
IOManagerDefinition
Examples:
.. code-block:: python
from dagster_gcp_pyspark import bigquery_pyspark_io_manager
from dagster import Definitions
@asset(
key_prefix=["my_dataset"], # will be used as the dataset in BigQuery
)
def my_table() -> pd.DataFrame: # the name of the asset will be the table name
...
Definitions(
assets=[my_table],
resources={
"io_manager": bigquery_pyspark_io_manager.configured({
"project": {"env": "GCP_PROJECT"}
})
}
)
You can set a default dataset to store the assets using the ``dataset`` configuration value of the BigQuery I/O
Manager. This dataset will be used if no other dataset is specified directly on an asset or op.
.. code-block:: python
Definitions(
assets=[my_table],
resources={
"io_manager": bigquery_pyspark_io_manager.configured({
"project": {"env": "GCP_PROJECT"},
"dataset": "my_dataset"
})
}
)
On individual assets, you an also specify the dataset where they should be stored using metadata or
by adding a ``key_prefix`` to the asset key. If both ``key_prefix`` and metadata are defined, the metadata will
take precedence.
.. code-block:: python
@asset(
key_prefix=["my_dataset"] # will be used as the dataset in BigQuery
)
def my_table() -> pyspark.sql.DataFrame:
...
@asset(
# note that the key needs to be "schema"
metadata={"schema": "my_dataset"} # will be used as the dataset in BigQuery
)
def my_other_table() -> pyspark.sql.DataFrame:
...
For ops, the dataset can be specified by including a "schema" entry in output metadata.
.. code-block:: python
@op(
out={"my_table": Out(metadata={"schema": "my_schema"})}
)
def make_my_table() -> pyspark.sql.DataFrame:
...
If none of these is provided, the dataset will default to "public".
To only use specific columns of a table as input to a downstream op or asset, add the metadata "columns" to the
In or AssetIn.
.. code-block:: python
@asset(
ins={"my_table": AssetIn("my_table", metadata={"columns": ["a"]})}
)
def my_table_a(my_table: pyspark.sql.DataFrame) -> pyspark.sql.DataFrame:
# my_table will just contain the data from column "a"
...
If you cannot upload a file to your Dagster deployment, or otherwise cannot
`authenticate with GCP <https://cloud.google.com/docs/authentication/provide-credentials-adc>`_
via a standard method, you can provide a service account key as the "gcp_credentials" configuration.
Dagster will store this key in a temporary file and set GOOGLE_APPLICATION_CREDENTIALS to point to the file.
After the run completes, the file will be deleted, and GOOGLE_APPLICATION_CREDENTIALS will be
unset. The key must be base64 encoded to avoid issues with newlines in the keys. You can retrieve
the base64 encoded key with this shell command: cat $GOOGLE_APPLICATION_CREDENTIALS | base64
"""
| BigQueryPySparkTypeHandler |
python | django__django | tests/queries/models.py | {
"start": 9494,
"end": 9714
} | class ____(models.Model):
name = models.CharField(max_length=50)
objecta = models.ForeignKey(ObjectA, models.CASCADE)
num = models.PositiveIntegerField()
def __str__(self):
return self.name
| ObjectB |
python | pytorch__pytorch | torch/distributions/independent.py | {
"start": 372,
"end": 5019
} | class ____(Distribution, Generic[D]):
r"""
Reinterprets some of the batch dims of a distribution as event dims.
This is mainly useful for changing the shape of the result of
:meth:`log_prob`. For example to create a diagonal Normal distribution with
the same shape as a Multivariate Normal distribution (so they are
interchangeable), you can::
>>> from torch.distributions.multivariate_normal import MultivariateNormal
>>> from torch.distributions.normal import Normal
>>> loc = torch.zeros(3)
>>> scale = torch.ones(3)
>>> mvn = MultivariateNormal(loc, scale_tril=torch.diag(scale))
>>> [mvn.batch_shape, mvn.event_shape]
[torch.Size([]), torch.Size([3])]
>>> normal = Normal(loc, scale)
>>> [normal.batch_shape, normal.event_shape]
[torch.Size([3]), torch.Size([])]
>>> diagn = Independent(normal, 1)
>>> [diagn.batch_shape, diagn.event_shape]
[torch.Size([]), torch.Size([3])]
Args:
base_distribution (torch.distributions.distribution.Distribution): a
base distribution
reinterpreted_batch_ndims (int): the number of batch dims to
reinterpret as event dims
"""
arg_constraints: dict[str, constraints.Constraint] = {}
base_dist: D
def __init__(
self,
base_distribution: D,
reinterpreted_batch_ndims: int,
validate_args: Optional[bool] = None,
) -> None:
if reinterpreted_batch_ndims > len(base_distribution.batch_shape):
raise ValueError(
"Expected reinterpreted_batch_ndims <= len(base_distribution.batch_shape), "
f"actual {reinterpreted_batch_ndims} vs {len(base_distribution.batch_shape)}"
)
shape: Size = base_distribution.batch_shape + base_distribution.event_shape
event_dim: int = reinterpreted_batch_ndims + len(base_distribution.event_shape)
batch_shape = shape[: len(shape) - event_dim]
event_shape = shape[len(shape) - event_dim :]
self.base_dist = base_distribution
self.reinterpreted_batch_ndims = reinterpreted_batch_ndims
super().__init__(batch_shape, event_shape, validate_args=validate_args)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(Independent, _instance)
batch_shape = torch.Size(batch_shape)
new.base_dist = self.base_dist.expand(
batch_shape + self.event_shape[: self.reinterpreted_batch_ndims]
)
new.reinterpreted_batch_ndims = self.reinterpreted_batch_ndims
super(Independent, new).__init__(
batch_shape, self.event_shape, validate_args=False
)
new._validate_args = self._validate_args
return new
@property
def has_rsample(self) -> bool: # type: ignore[override]
return self.base_dist.has_rsample
@property
def has_enumerate_support(self) -> bool: # type: ignore[override]
if self.reinterpreted_batch_ndims > 0:
return False
return self.base_dist.has_enumerate_support
@constraints.dependent_property
# pyrefly: ignore [bad-override]
def support(self):
result = self.base_dist.support
if self.reinterpreted_batch_ndims:
result = constraints.independent(result, self.reinterpreted_batch_ndims)
return result
@property
def mean(self) -> Tensor:
return self.base_dist.mean
@property
def mode(self) -> Tensor:
return self.base_dist.mode
@property
def variance(self) -> Tensor:
return self.base_dist.variance
def sample(self, sample_shape=torch.Size()) -> Tensor:
return self.base_dist.sample(sample_shape)
def rsample(self, sample_shape: _size = torch.Size()) -> Tensor:
return self.base_dist.rsample(sample_shape)
def log_prob(self, value):
log_prob = self.base_dist.log_prob(value)
return _sum_rightmost(log_prob, self.reinterpreted_batch_ndims)
def entropy(self):
entropy = self.base_dist.entropy()
return _sum_rightmost(entropy, self.reinterpreted_batch_ndims)
def enumerate_support(self, expand=True):
if self.reinterpreted_batch_ndims > 0:
raise NotImplementedError(
"Enumeration over cartesian product is not implemented"
)
return self.base_dist.enumerate_support(expand=expand)
def __repr__(self):
return (
self.__class__.__name__
+ f"({self.base_dist}, {self.reinterpreted_batch_ndims})"
)
| Independent |
python | numba__numba | numba/core/typing/npdatetime.py | {
"start": 7947,
"end": 8033
} | class ____(DatetimeCmpOp):
key = operator.eq
@infer_global(operator.ne)
| DatetimeCmpEq |
python | fastai__fastai | fastai/optimizer.py | {
"start": 3731,
"end": 15942
} | class ____(_BaseOptimizer):
"Base optimizer class for the fastai library, updating `params` with `cbs`"
_keep_on_clear = ['force_train', 'do_wd']
def __init__(self,
params:Tensor|Iterable, # Model parameters
cbs:Callable|MutableSequence, # `Optimizer` step callbacks
**defaults # Hyper parameters default values
):
if 'train_bn' in defaults.keys():
_ = defaults.pop('train_bn')
warn('Setting `train_bn` in `Optimizer` has no effect. Set `train_bn` on `Learner` init instead')
params = L(params)
self.cbs,self.state = L(cbs),defaultdict(dict)
defaults = merge(*self.cbs.attrgot('defaults'), defaults)
self.param_lists = L(L(p) for p in params) if isinstance(params[0], (L,list)) else L([params])
self.hypers = L({} for _ in range_of(self.param_lists))
self.set_hypers(**defaults)
self.frozen_idx = 0
def zero_grad(self):
for p,*_ in self.all_params(with_grad=True):
p.grad.detach_()
p.grad.zero_()
def step(self, closure=None):
if closure is not None: raise NotImplementedError("fastai optimizers currently do not support closure")
for p,pg,state,hyper in self.all_params(with_grad=True):
for cb in self.cbs: state = _update(state, cb(p, **{**state, **hyper}))
self.state[p] = state
def clear_state(self):
for p,pg,state,hyper in self.all_params():
self.state[p] = {k: state[k] for k in self._keep_on_clear if k in state}
def state_dict(self):
state = [self.state[p] for p,*_ in self.all_params()]
return {'state': state, 'hypers': self.hypers}
def load_state_dict(self,
sd:dict # State dict with `hypers` and `state` to load on the optimizer
):
assert len(sd["hypers"]) == len(self.param_lists)
assert len(sd["state"]) == sum([len(pg) for pg in self.param_lists])
self.hypers = sd['hypers']
self.state = {p: s for p,s in zip(self.all_params().itemgot(0), sd['state'])}
# %% ../nbs/12_optimizer.ipynb 21
def sgd_step(p, lr, **kwargs):
p.data.add_(p.grad.data, alpha=-lr)
# %% ../nbs/12_optimizer.ipynb 24
def weight_decay(p, lr, wd, do_wd=True, **kwargs):
"Weight decay as decaying `p` with `lr*wd`"
if do_wd and wd!=0: p.data.mul_(1 - lr*wd)
weight_decay.defaults = dict(wd=0.)
# %% ../nbs/12_optimizer.ipynb 26
def l2_reg(p, lr, wd, do_wd=True, **kwargs):
"L2 regularization as adding `wd*p` to `p.grad`"
if do_wd and wd!=0: p.grad.data.add_(p.data, alpha=wd)
l2_reg.defaults = dict(wd=0.)
# %% ../nbs/12_optimizer.ipynb 41
def average_grad(p, mom, dampening=False, grad_avg=None, **kwargs):
"Keeps track of the avg grads of `p` in `state` with `mom`."
if grad_avg is None: grad_avg = torch.zeros_like(p.grad.data)
damp = 1-mom if dampening else 1.
grad_avg.mul_(mom).add_(p.grad.data, alpha=damp)
return {'grad_avg': grad_avg}
average_grad.defaults = dict(mom=0.9)
# %% ../nbs/12_optimizer.ipynb 44
def average_sqr_grad(p, sqr_mom, dampening=True, sqr_avg=None, **kwargs):
if sqr_avg is None: sqr_avg = torch.zeros_like(p.grad.data)
damp = 1-sqr_mom if dampening else 1.
sqr_avg.mul_(sqr_mom).addcmul_(p.grad.data, p.grad.data, value=damp)
return {'sqr_avg': sqr_avg}
average_sqr_grad.defaults = dict(sqr_mom=0.99)
# %% ../nbs/12_optimizer.ipynb 62
def momentum_step(p, lr, grad_avg, **kwargs):
"Step for SGD with momentum with `lr`"
p.data.add_(grad_avg, alpha=-lr)
# %% ../nbs/12_optimizer.ipynb 63
def SGD(
params:Tensor|Iterable, # Model parameters
lr:float|slice, # Default learning rate
mom:float=0., # Gradient moving average (β1) coefficient
wd:Real=0., # Optional weight decay (true or L2)
decouple_wd:bool=True # Apply true weight decay or L2 regularization (SGD)
) -> Optimizer:
"A SGD `Optimizer`"
cbs = [weight_decay] if decouple_wd else [l2_reg]
if mom != 0: cbs.append(average_grad)
cbs.append(sgd_step if mom==0 else momentum_step)
return Optimizer(params, cbs, lr=lr, mom=mom, wd=wd)
# %% ../nbs/12_optimizer.ipynb 70
def rms_prop_step(p, lr, sqr_avg, eps, grad_avg=None, **kwargs):
"Step for RMSProp with momentum with `lr`"
denom = sqr_avg.sqrt().add_(eps)
p.data.addcdiv_((grad_avg if grad_avg is not None else p.grad), denom, value=-lr)
rms_prop_step.defaults = dict(eps=1e-8)
# %% ../nbs/12_optimizer.ipynb 71
def RMSProp(
params:Tensor|Iterable, # Model parameters
lr:float|slice, # Default learning rate
mom:float=0., # Gradient moving average (β1) coefficient
sqr_mom:float=0.99, # Gradient squared moving average (β2) coefficient
eps:float=1e-8, # Added for numerical stability
wd:Real=0., # Optional weight decay (true or L2)
decouple_wd:bool=True # Apply true weight decay or L2 regularization (RMSProp)
) -> Optimizer:
"A RMSProp `Optimizer`"
cbs = [weight_decay] if decouple_wd else [l2_reg]
cbs += ([average_sqr_grad] if mom==0. else [average_grad, average_sqr_grad])
cbs.append(rms_prop_step)
return Optimizer(params, cbs, lr=lr, mom=mom, sqr_mom=sqr_mom, wd=wd)
# %% ../nbs/12_optimizer.ipynb 76
def step_stat(p, step=0, **kwargs):
"Register the number of steps done in `state` for `p`"
step += 1
return {'step' : step}
# %% ../nbs/12_optimizer.ipynb 78
def debias(mom, damp, step): return damp * (1 - mom**step) / (1-mom)
# %% ../nbs/12_optimizer.ipynb 79
def adam_step(p, lr, mom, step, sqr_mom, grad_avg, sqr_avg, eps, **kwargs):
"Step for Adam with `lr` on `p`"
debias1 = debias(mom, 1-mom, step)
debias2 = debias(sqr_mom, 1-sqr_mom, step)
p.data.addcdiv_(grad_avg, (sqr_avg/debias2).sqrt() + eps, value = -lr / debias1)
return p
adam_step._defaults = dict(eps=1e-5)
# %% ../nbs/12_optimizer.ipynb 80
def Adam(
params:Tensor|Iterable, # Model parameters
lr:float|slice, # Default learning rate
mom:float=0.9, # Gradient moving average (β1) coefficient
sqr_mom:float=0.99, # Gradient squared moving average (β2) coefficient
eps:float=1e-5, # Added for numerical stability
wd:Real=0.01, # Optional weight decay (true or L2)
decouple_wd:bool=True # Apply true weight decay (AdamW) or L2 regularization (Adam)
) -> Optimizer:
"A Adam/AdamW `Optimizer`"
cbs = [weight_decay] if decouple_wd else [l2_reg]
cbs += [partial(average_grad, dampening=True), average_sqr_grad, step_stat, adam_step]
return Optimizer(params, cbs, lr=lr, mom=mom, sqr_mom=sqr_mom, eps=eps, wd=wd)
# %% ../nbs/12_optimizer.ipynb 85
def radam_step(p, lr, mom, step, sqr_mom, grad_avg, sqr_avg, eps, beta, **kwargs):
"Step for RAdam with `lr` on `p`"
debias1 = debias(mom, 1-mom, step)
debias2 = debias(sqr_mom, 1-sqr_mom, step)
r_inf = 2/(1-sqr_mom) - 1
r = r_inf - 2*step*sqr_mom**step/(1-sqr_mom**step)
if r > 5:
v = math.sqrt(((r-4) * (r-2) * r_inf)/((r_inf-4)*(r_inf-2)*r))
denom = (sqr_avg/debias2).sqrt()
if eps: denom += eps
if beta: denom = F.softplus(denom, beta)
p.data.addcdiv_(grad_avg, denom, value = -lr*v / debias1)
else: p.data.add_(grad_avg, alpha=-lr / debias1)
return p
radam_step._defaults = dict(eps=1e-5)
# %% ../nbs/12_optimizer.ipynb 86
def RAdam(
params:Tensor|Iterable, # Model parameters
lr:float|slice, # Default learning rate
mom:float=0.9, # Gradient moving average (β1) coefficient
sqr_mom:float=0.99, # Gradient squared moving average (β2) coefficient
eps:float=1e-5, # Added for numerical stability
wd:Real=0., # Optional weight decay (true or L2)
beta:float=0., # Set to enable SAdam
decouple_wd:bool=True # Apply true weight decay (RAdamW) or L2 regularization (RAdam)
) -> Optimizer:
"A RAdam/RAdamW `Optimizer`"
cbs = [weight_decay] if decouple_wd else [l2_reg]
cbs += [partial(average_grad, dampening=True), average_sqr_grad, step_stat, radam_step]
return Optimizer(params, cbs, lr=lr, mom=mom, sqr_mom=sqr_mom, eps=eps, wd=wd, beta=beta)
# %% ../nbs/12_optimizer.ipynb 92
def qhadam_step(p, lr, mom, sqr_mom, sqr_avg, nu_1, nu_2, step, grad_avg, eps, **kwargs):
debias1 = debias(mom, 1-mom, step)
debias2 = debias(sqr_mom, 1-sqr_mom, step)
p.data.addcdiv_(((1-nu_1) * p.grad.data) + (nu_1 * (grad_avg / debias1)),
(((1 - nu_2) * (p.grad.data)**2) + (nu_2 * (sqr_avg / debias2))).sqrt() + eps,
value = -lr)
return p
qhadam_step._defaults = dict(eps=1e-8)
# %% ../nbs/12_optimizer.ipynb 93
def QHAdam(
params:Tensor|Iterable, # Model parameters
lr:float|slice, # Default learning rate
mom:float=0.999, # Gradient moving average (β1) coefficient
sqr_mom:float=0.999, # Gradient squared moving average (β2) coefficient
nu_1:float=0.7, # QH immediate discount factor
nu_2:float=1.0, # QH momentum discount factor
eps:float=1e-8, # Added for numerical stability
wd:Real=0., # Optional weight decay (true or L2)
decouple_wd:bool=True, # Apply true weight decay (QHAdamW) or L2 regularization (QHAdam)
) -> Optimizer:
"A QHAdam/QHAdamW `Optimizer`"
cbs = [weight_decay] if decouple_wd else [l2_reg]
cbs += [partial(average_grad, dampening=True), partial(average_sqr_grad, dampening=True), step_stat, qhadam_step]
return Optimizer(params, cbs, lr=lr, nu_1=nu_1, nu_2=nu_2 ,
mom=mom, sqr_mom=sqr_mom, eps=eps, wd=wd)
# %% ../nbs/12_optimizer.ipynb 96
def larc_layer_lr(p, lr, trust_coeff, wd, eps, clip=True, **kwargs):
"Computes the local lr before weight decay is applied"
p_norm,g_norm = torch.norm(p.data),torch.norm(p.grad.data)
local_lr = lr*trust_coeff * (p_norm) / (g_norm + p_norm * wd + eps)
return {'local_lr': min(lr, local_lr) if clip else local_lr}
larc_layer_lr.defaults = dict(trust_coeff=0.02, wd=0., eps=1e-8)
# %% ../nbs/12_optimizer.ipynb 97
def larc_step(p, local_lr, grad_avg=None, **kwargs):
"Step for LARC `local_lr` on `p`"
p.data.add_(p.grad.data if grad_avg is None else grad_avg, alpha = -local_lr)
# %% ../nbs/12_optimizer.ipynb 98
def Larc(
params:Tensor|Iterable, # Model parameters
lr:float|slice, # Default learning rate
mom:float=0.9, # Gradient moving average (β1) coefficient
clip:bool=True, # LARC if clip=True, LARS if clip=False
trust_coeff:float=0.02, # Trust coeffiecnet for calculating layerwise LR
eps:float=1e-8, # Added for numerical stability
wd:Real=0., # Optional weight decay (true or L2)
decouple_wd:bool=True # Apply true weight decay or L2 regularization
) -> Optimizer:
"A LARC/LARS `Optimizer`"
cbs = [weight_decay] if decouple_wd else [l2_reg]
if mom!=0.: cbs.append(average_grad)
cbs += [partial(larc_layer_lr, clip=clip), larc_step]
return Optimizer(params, cbs, lr=lr, mom=mom, trust_coeff=trust_coeff, eps=eps, wd=wd)
# %% ../nbs/12_optimizer.ipynb 103
def lamb_step(p, lr, mom, step, sqr_mom, grad_avg, sqr_avg, eps, **kwargs):
"Step for LAMB with `lr` on `p`"
debias1 = debias(mom, 1-mom, step)
debias2 = debias(sqr_mom, 1-sqr_mom, step)
r1 = p.data.pow(2).mean().sqrt()
step = (grad_avg/debias1) / ((sqr_avg/debias2).sqrt()+eps)
r2 = step.pow(2).mean().sqrt()
q = 1 if r1 == 0 or r2 == 0 else min(r1/r2,10)
p.data.add_(step, alpha = -lr * q)
lamb_step._defaults = dict(eps=1e-6, wd=0.)
# %% ../nbs/12_optimizer.ipynb 104
def Lamb(
params:Tensor|Iterable, # Model parameters
lr:float|slice, # Default learning rate
mom:float=0.9, # Gradient moving average (β1) coefficient
sqr_mom:float=0.99, # Gradient squared moving average (β2) coefficient
eps:float=1e-5, # Added for numerical stability
wd:Real=0., # Optional weight decay (true or L2)
decouple_wd:bool=True # Apply true weight decay or L2 regularization
) -> Optimizer:
"A LAMB `Optimizer`"
cbs = [weight_decay] if decouple_wd else [l2_reg]
cbs += [partial(average_grad, dampening=True), average_sqr_grad, step_stat, lamb_step]
return Optimizer(params, cbs, lr=lr, mom=mom, sqr_mom=sqr_mom, eps=eps, wd=wd)
# %% ../nbs/12_optimizer.ipynb 109
| Optimizer |
python | getsentry__sentry | src/sentry/features/manager.py | {
"start": 936,
"end": 5070
} | class ____:
"""
Feature functions that are built around the need to register feature
handlers
TODO: Once features have been audited and migrated to the entity
handler, remove this class entirely
"""
def __init__(self) -> None:
self._handler_registry: dict[str, list[FeatureHandler]] = defaultdict(list)
def add_handler(self, handler: FeatureHandler) -> None:
"""
Register a feature handler.
The passed object is a FeatureHandler that is associated with all
features defined in the ``handler.features`` property.
"""
for feature_name in handler.features:
self._handler_registry[feature_name].append(handler)
def _get_handler(self, feature: Feature, actor: User) -> bool | None:
for handler in self._handler_registry[feature.name]:
rv = handler(feature, actor)
if rv is not None:
return rv
return None
@abc.abstractmethod
def _get_feature_class(self, name: str) -> type[Feature]:
"""
We need this abstract method on this class because the `has_for_batch()`
method instantiates a `FeatureCheckBatch` and sets `manager` as `self`
as a `RegisteredFeatureManager`.
"""
raise NotImplementedError
def has_for_batch(
self,
name: str,
organization: Organization,
objects: Sequence[Project],
actor: User | RpcUser | AnonymousUser | None = None,
) -> dict[Project, bool | None]:
"""
Determine if a feature is enabled for a batch of objects.
This method enables checking a feature for an organization and a collection
of objects (e.g. projects). Feature handlers for batch checks are expected to
subclass `features.BatchFeatureHandler` and implement `has_for_batch` or
`_check_for_batch`. BatchFeatureHandlers will receive a `FeatureCheckBatch`
that contains the organization and object list.
Feature handlers that depend only on organization attributes, and not
on attributes of the individual objects being checked, will generally
perform faster if this method is used in instead of ``has``.
The return value is a dictionary with the objects as keys, and each
value is the result of the feature check on the organization.
This method *does not* work with the `entity_handler`.
>>> FeatureManager.has_for_batch('projects:feature', organization, [project1, project2], actor=request.user)
"""
result: dict[Project, bool | None] = {}
remaining = set(objects)
handlers = self._handler_registry[name]
try:
for handler in handlers:
if not remaining:
break
with sentry_sdk.start_span(
op="feature.has_for_batch.handler",
name=f"{type(handler).__name__} ({name})",
) as span:
batch_size = len(remaining)
span.set_data("Batch Size", batch_size)
span.set_data("Feature Name", name)
span.set_data("Handler Type", type(handler).__name__)
batch = FeatureCheckBatch(self, name, organization, remaining, actor)
handler_result = handler.has_for_batch(batch)
for obj, flag in handler_result.items():
if flag is not None:
remaining.remove(obj)
result[obj] = flag
span.set_data("Flags Found", batch_size - len(remaining))
default_flag = settings.SENTRY_FEATURES.get(name, False)
for obj in remaining:
result[obj] = default_flag
except Exception as e:
if in_random_rollout("features.error.capture_rate"):
sentry_sdk.capture_exception(e)
return result
FLAGPOLE_OPTION_PREFIX = "feature"
# TODO: Change RegisteredFeatureManager back to object once it can be removed
| RegisteredFeatureManager |
python | getsentry__sentry | src/sentry/releases/endpoints/release_deploys.py | {
"start": 1197,
"end": 1927
} | class ____(serializers.Serializer):
"""Serializer for Deploy response objects"""
id = serializers.CharField(help_text="The ID of the deploy")
environment = serializers.CharField(help_text="The environment name")
dateStarted = serializers.DateTimeField(
allow_null=True, help_text="An optional date that indicates when the deploy started"
)
dateFinished = serializers.DateTimeField(
help_text="An optional date that indicates when the deploy ended"
)
name = serializers.CharField(allow_null=True, help_text="The optional name of the deploy")
url = serializers.URLField(
allow_null=True, help_text="The optional URL that points to the deploy"
)
| DeployResponseSerializer |
python | realpython__materials | python-protocol/animals_v1.py | {
"start": 280,
"end": 361
} | class ____(Animal):
def meow(self):
print(f"{self.name} is meowing.")
| Cat |
python | pandas-dev__pandas | pandas/tests/scalar/timestamp/methods/test_round.py | {
"start": 395,
"end": 12705
} | class ____:
def test_round_division_by_zero_raises(self):
ts = Timestamp("2016-01-01")
msg = "Division by zero in rounding"
with pytest.raises(ValueError, match=msg):
ts.round("0ns")
@pytest.mark.parametrize(
"timestamp, freq, expected",
[
("20130101 09:10:11", "D", "20130101"),
("20130101 19:10:11", "D", "20130102"),
("20130201 12:00:00", "D", "20130202"),
("20130104 12:00:00", "D", "20130105"),
("2000-01-05 05:09:15.13", "D", "2000-01-05 00:00:00"),
("2000-01-05 05:09:15.13", "h", "2000-01-05 05:00:00"),
("2000-01-05 05:09:15.13", "s", "2000-01-05 05:09:15"),
],
)
def test_round_frequencies(self, timestamp, freq, expected):
dt = Timestamp(timestamp)
result = dt.round(freq)
expected = Timestamp(expected)
assert result == expected
def test_round_tzaware(self):
dt = Timestamp("20130101 09:10:11", tz="US/Eastern")
result = dt.round("D")
expected = Timestamp("20130101", tz="US/Eastern")
assert result == expected
dt = Timestamp("20130101 09:10:11", tz="US/Eastern")
result = dt.round("s")
assert result == dt
def test_round_30min(self):
# round
dt = Timestamp("20130104 12:32:00")
result = dt.round("30Min")
expected = Timestamp("20130104 12:30:00")
assert result == expected
def test_round_subsecond(self):
# GH#14440 & GH#15578
result = Timestamp("2016-10-17 12:00:00.0015").round("ms")
expected = Timestamp("2016-10-17 12:00:00.002000")
assert result == expected
result = Timestamp("2016-10-17 12:00:00.00149").round("ms")
expected = Timestamp("2016-10-17 12:00:00.001000")
assert result == expected
ts = Timestamp("2016-10-17 12:00:00.0015")
for freq in ["us", "ns"]:
assert ts == ts.round(freq)
result = Timestamp("2016-10-17 12:00:00.001501031").round("10ns")
expected = Timestamp("2016-10-17 12:00:00.001501030")
assert result == expected
def test_round_nonstandard_freq(self):
with tm.assert_produces_warning(False):
Timestamp("2016-10-17 12:00:00.001501031").round("1010ns")
def test_round_invalid_arg(self):
stamp = Timestamp("2000-01-05 05:09:15.13")
with pytest.raises(ValueError, match=INVALID_FREQ_ERR_MSG):
stamp.round("foo")
@pytest.mark.parametrize(
"test_input, rounder, freq, expected",
[
("2117-01-01 00:00:45", "floor", "15s", "2117-01-01 00:00:45"),
("2117-01-01 00:00:45", "ceil", "15s", "2117-01-01 00:00:45"),
(
"2117-01-01 00:00:45.000000012",
"floor",
"10ns",
"2117-01-01 00:00:45.000000010",
),
(
"1823-01-01 00:00:01.000000012",
"ceil",
"10ns",
"1823-01-01 00:00:01.000000020",
),
("1823-01-01 00:00:01", "floor", "1s", "1823-01-01 00:00:01"),
("1823-01-01 00:00:01", "ceil", "1s", "1823-01-01 00:00:01"),
("NaT", "floor", "1s", "NaT"),
("NaT", "ceil", "1s", "NaT"),
],
)
def test_ceil_floor_edge(self, test_input, rounder, freq, expected):
dt = Timestamp(test_input)
func = getattr(dt, rounder)
result = func(freq)
if dt is NaT:
assert result is NaT
else:
expected = Timestamp(expected)
assert result == expected
@pytest.mark.parametrize(
"test_input, freq, expected",
[
("2018-01-01 00:02:06", "2s", "2018-01-01 00:02:06"),
("2018-01-01 00:02:00", "2min", "2018-01-01 00:02:00"),
("2018-01-01 00:04:00", "4min", "2018-01-01 00:04:00"),
("2018-01-01 00:15:00", "15min", "2018-01-01 00:15:00"),
("2018-01-01 00:20:00", "20min", "2018-01-01 00:20:00"),
("2018-01-01 03:00:00", "3h", "2018-01-01 03:00:00"),
],
)
@pytest.mark.parametrize("rounder", ["ceil", "floor", "round"])
def test_round_minute_freq(self, test_input, freq, expected, rounder):
# Ensure timestamps that shouldn't round dont!
# GH#21262
dt = Timestamp(test_input)
expected = Timestamp(expected)
func = getattr(dt, rounder)
result = func(freq)
assert result == expected
def test_ceil(self, unit):
dt = Timestamp("20130101 09:10:11").as_unit(unit)
result = dt.ceil("D")
expected = Timestamp("20130102")
assert result == expected
assert result._creso == dt._creso
def test_floor(self, unit):
dt = Timestamp("20130101 09:10:11").as_unit(unit)
result = dt.floor("D")
expected = Timestamp("20130101")
assert result == expected
assert result._creso == dt._creso
@pytest.mark.parametrize("method", ["ceil", "round", "floor"])
def test_round_dst_border_ambiguous(self, method, unit):
# GH 18946 round near "fall back" DST
ts = Timestamp("2017-10-29 00:00:00", tz="UTC").tz_convert("Europe/Madrid")
ts = ts.as_unit(unit)
result = getattr(ts, method)("h", ambiguous=True)
assert result == ts
assert result._creso == getattr(NpyDatetimeUnit, f"NPY_FR_{unit}").value
result = getattr(ts, method)("h", ambiguous=False)
expected = Timestamp("2017-10-29 01:00:00", tz="UTC").tz_convert(
"Europe/Madrid"
)
assert result == expected
assert result._creso == getattr(NpyDatetimeUnit, f"NPY_FR_{unit}").value
result = getattr(ts, method)("h", ambiguous="NaT")
assert result is NaT
msg = "Cannot infer dst time"
with pytest.raises(ValueError, match=msg):
getattr(ts, method)("h", ambiguous="raise")
@pytest.mark.parametrize(
"method, ts_str, freq",
[
["ceil", "2018-03-11 01:59:00-0600", "5min"],
["round", "2018-03-11 01:59:00-0600", "5min"],
["floor", "2018-03-11 03:01:00-0500", "2h"],
],
)
def test_round_dst_border_nonexistent(self, method, ts_str, freq, unit):
# GH 23324 round near "spring forward" DST
ts = Timestamp(ts_str, tz="America/Chicago").as_unit(unit)
result = getattr(ts, method)(freq, nonexistent="shift_forward")
expected = Timestamp("2018-03-11 03:00:00", tz="America/Chicago")
assert result == expected
assert result._creso == getattr(NpyDatetimeUnit, f"NPY_FR_{unit}").value
result = getattr(ts, method)(freq, nonexistent="NaT")
assert result is NaT
msg = "2018-03-11 02:00:00"
with pytest.raises(ValueError, match=msg):
getattr(ts, method)(freq, nonexistent="raise")
@pytest.mark.parametrize(
"timestamp",
[
"2018-01-01 0:0:0.124999360",
"2018-01-01 0:0:0.125000367",
"2018-01-01 0:0:0.125500",
"2018-01-01 0:0:0.126500",
"2018-01-01 12:00:00",
"2019-01-01 12:00:00",
],
)
@pytest.mark.parametrize(
"freq",
[
"2ns",
"3ns",
"4ns",
"5ns",
"6ns",
"7ns",
"250ns",
"500ns",
"750ns",
"1us",
"19us",
"250us",
"500us",
"750us",
"1s",
"2s",
"3s",
"1D",
],
)
def test_round_int64(self, timestamp, freq):
# check that all rounding modes are accurate to int64 precision
# see GH#22591
dt = Timestamp(timestamp).as_unit("ns")
unit = to_offset(freq).nanos
# test floor
result = dt.floor(freq)
assert result._value % unit == 0, f"floor not a {freq} multiple"
assert 0 <= dt._value - result._value < unit, "floor error"
# test ceil
result = dt.ceil(freq)
assert result._value % unit == 0, f"ceil not a {freq} multiple"
assert 0 <= result._value - dt._value < unit, "ceil error"
# test round
result = dt.round(freq)
assert result._value % unit == 0, f"round not a {freq} multiple"
assert abs(result._value - dt._value) <= unit // 2, "round error"
if unit % 2 == 0 and abs(result._value - dt._value) == unit // 2:
# round half to even
assert result._value // unit % 2 == 0, "round half to even error"
def test_round_implementation_bounds(self):
# See also: analogous test for Timedelta
result = Timestamp.min.ceil("s")
expected = Timestamp(1677, 9, 21, 0, 12, 44)
assert result == expected
result = Timestamp.max.floor("s")
expected = Timestamp.max - Timedelta(854775807)
assert result == expected
msg = "Cannot round 1677-09-21 00:12:43.145224193 to freq=<Second>"
with pytest.raises(OutOfBoundsDatetime, match=msg):
Timestamp.min.floor("s")
with pytest.raises(OutOfBoundsDatetime, match=msg):
Timestamp.min.round("s")
msg = "Cannot round 2262-04-11 23:47:16.854775807 to freq=<Second>"
with pytest.raises(OutOfBoundsDatetime, match=msg):
Timestamp.max.ceil("s")
with pytest.raises(OutOfBoundsDatetime, match=msg):
Timestamp.max.round("s")
@pytest.mark.slow
@given(val=st.integers(iNaT + 1, lib.i8max))
@pytest.mark.parametrize(
"method", [Timestamp.round, Timestamp.floor, Timestamp.ceil]
)
def test_round_sanity(self, val, method):
cls = Timestamp
err_cls = OutOfBoundsDatetime
val = np.int64(val)
ts = cls(val)
def checker(ts, nanos, unit):
# First check that we do raise in cases where we should
if nanos == 1:
pass
else:
div, mod = divmod(ts._value, nanos)
diff = int(nanos - mod)
lb = ts._value - mod
assert lb <= ts._value # i.e. no overflows with python ints
ub = ts._value + diff
assert ub > ts._value # i.e. no overflows with python ints
msg = "without overflow"
if mod == 0:
# We should never be raising in this
pass
elif method is cls.ceil:
if ub > cls.max._value:
with pytest.raises(err_cls, match=msg):
method(ts, unit)
return
elif method is cls.floor:
if lb < cls.min._value:
with pytest.raises(err_cls, match=msg):
method(ts, unit)
return
elif mod >= diff:
if ub > cls.max._value:
with pytest.raises(err_cls, match=msg):
method(ts, unit)
return
elif lb < cls.min._value:
with pytest.raises(err_cls, match=msg):
method(ts, unit)
return
res = method(ts, unit)
td = res - ts
diff = abs(td._value)
assert diff < nanos
assert res._value % nanos == 0
if method is cls.round:
assert diff <= nanos / 2
elif method is cls.floor:
assert res <= ts
elif method is cls.ceil:
assert res >= ts
nanos = 1
checker(ts, nanos, "ns")
nanos = 1000
checker(ts, nanos, "us")
nanos = 1_000_000
checker(ts, nanos, "ms")
nanos = 1_000_000_000
checker(ts, nanos, "s")
nanos = 60 * 1_000_000_000
checker(ts, nanos, "min")
nanos = 60 * 60 * 1_000_000_000
checker(ts, nanos, "h")
nanos = 24 * 60 * 60 * 1_000_000_000
checker(ts, nanos, "D")
| TestTimestampRound |
python | huggingface__transformers | src/transformers/models/megatron_bert/modeling_megatron_bert.py | {
"start": 20030,
"end": 20839
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->MegatronBert
| MegatronBertPredictionHeadTransform |
python | python-poetry__poetry | tests/plugins/test_plugin_manager.py | {
"start": 1749,
"end": 1842
} | class ____(ApplicationPlugin):
commands: ClassVar[list[type[Command]]] = []
| MyCommandPlugin |
python | getsentry__sentry | src/sentry/rules/filters/latest_release.py | {
"start": 1886,
"end": 4292
} | class ____(EventFilter):
id = "sentry.rules.filters.latest_release.LatestReleaseFilter"
label = "The event is from the latest release"
def get_latest_release(self, event: GroupEvent) -> Release | None:
environment_id = None if self.rule is None else self.rule.environment_id
cache_key = get_project_release_cache_key(event.group.project_id, environment_id)
latest_release = cache.get(cache_key)
if latest_release is None:
organization_id = event.group.project.organization_id
environments = None
if environment_id:
environments = [Environment.objects.get(id=environment_id)]
try:
latest_release_versions = get_latest_release(
[event.group.project],
environments,
organization_id,
)
except Release.DoesNotExist:
return None
latest_releases = list(
Release.objects.filter(
version=latest_release_versions[0], organization_id=organization_id
)
)
if latest_releases:
cache.set(cache_key, latest_releases[0], 600)
return latest_releases[0]
else:
cache.set(cache_key, False, 600)
return latest_release
def passes(self, event: GroupEvent, state: EventState) -> bool:
latest_release = self.get_latest_release(event)
if not latest_release:
return False
releases = (
v.lower()
for k, v in event.tags
if k.lower() == "release" or tagstore.backend.get_standardized_key(k) == "release"
)
for release in releases:
if release == latest_release.version.lower():
return True
return False
post_save.connect(clear_release_cache, sender=Release, weak=False)
pre_delete.connect(clear_release_cache, sender=Release, weak=False)
post_save.connect(clear_release_project_cache, sender=ReleaseProject, weak=False)
post_delete.connect(clear_release_project_cache, sender=ReleaseProject, weak=False)
post_save.connect(clear_release_environment_project_cache, sender=ReleaseEnvironment, weak=False)
post_delete.connect(clear_release_environment_project_cache, sender=ReleaseEnvironment, weak=False)
| LatestReleaseFilter |
python | openai__gym | gym/error.py | {
"start": 1727,
"end": 1887
} | class ____(Error):
"""When the monitor is active, raised when the user tries to step an environment that's not yet terminated or truncated."""
| ResetNotAllowed |
python | fabric__fabric | fabric/transfer.py | {
"start": 13018,
"end": 14760
} | class ____:
"""
A container for information about the result of a file transfer.
See individual attribute/method documentation below for details.
.. note::
Unlike similar classes such as `invoke.runners.Result` or
`fabric.runners.Result` (which have a concept of "warn and return
anyways on failure") this class has no useful truthiness behavior. If a
file transfer fails, some exception will be raised, either an `OSError`
or an error from within Paramiko.
.. versionadded:: 2.0
"""
# TODO: how does this differ from put vs get? field stating which? (feels
# meh) distinct classes differing, for now, solely by name? (also meh)
def __init__(self, local, orig_local, remote, orig_remote, connection):
#: The local path the file was saved as, or the object it was saved
#: into if a file-like object was given instead.
#:
#: If a string path, this value is massaged to be absolute; see
#: `.orig_local` for the original argument value.
self.local = local
#: The original value given as the returning method's ``local``
#: argument.
self.orig_local = orig_local
#: The remote path downloaded from. Massaged to be absolute; see
#: `.orig_remote` for the original argument value.
self.remote = remote
#: The original argument value given as the returning method's
#: ``remote`` argument.
self.orig_remote = orig_remote
#: The `.Connection` object this result was obtained from.
self.connection = connection
# TODO: ensure str/repr makes it easily differentiable from run() or
# local() result objects (and vice versa).
| Result |
python | numba__numba | numba/misc/help/inspector.py | {
"start": 5855,
"end": 7798
} | class ____(Formatter):
"""Formatter that outputs HTML
"""
def escape(self, text):
import html
return html.escape(text)
def title(self, text):
self.print('<h1>', text, '</h2>')
def begin_module_section(self, modname):
self.print('<h2>', modname, '</h2>')
self.print('<ul>')
def end_module_section(self):
self.print('</ul>')
def write_supported_item(self, modname, itemname, typename, explained,
sources, alias):
self.print('<li>')
self.print('{}.<b>{}</b>'.format(
modname,
itemname,
))
self.print(': <b>{}</b>'.format(typename))
self.print('<div><pre>', explained, '</pre></div>')
self.print("<ul>")
for tcls, source in sources.items():
if source:
self.print("<li>")
impl = source['name']
sig = source['sig']
filename = source['filename']
lines = source['lines']
self.print(
"<p>defined by <b>{}</b>{} at {}:{}-{}</p>".format(
self.escape(impl), self.escape(sig),
self.escape(filename), lines[0], lines[1],
),
)
self.print('<p>{}</p>'.format(
self.escape(source['docstring'] or '')
))
else:
self.print("<li>{}".format(self.escape(str(tcls))))
self.print("</li>")
self.print("</ul>")
self.print('</li>')
def write_unsupported_item(self, modname, itemname):
self.print('<li>')
self.print('{}.<b>{}</b>: UNSUPPORTED'.format(
modname,
itemname,
))
self.print('</li>')
def write_statistic(self, stats):
self.print('<p>{}</p>'.format(stats.describe()))
| HTMLFormatter |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/core_api/datamodels/common.py | {
"start": 3537,
"end": 4006
} | class ____(StrictBaseModel, Generic[T]):
"""Serializer for bulk entity operations."""
actions: list[
Annotated[
Union[
Annotated[BulkCreateAction[T], Tag(BulkAction.CREATE.value)],
Annotated[BulkUpdateAction[T], Tag(BulkAction.UPDATE.value)],
Annotated[BulkDeleteAction[T], Tag(BulkAction.DELETE.value)],
],
Discriminator(_action_discriminator),
]
]
| BulkBody |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-guru/llama_index/readers/guru/base.py | {
"start": 365,
"end": 5459
} | class ____(BaseReader):
"""Guru cards / collections reader."""
def __init__(self, guru_username: str, api_token: str) -> None:
"""
Initialize GuruReader.
Args:
guru_username: Guru username.
api_token: Guru API token. This can be personal API keys or collection based API keys. Note this is not the same as your password.
"""
self.guru_username = guru_username
self.api_token = api_token
self.guru_auth = HTTPBasicAuth(guru_username, api_token)
def load_data(
self,
collection_ids: Optional[List[str]] = None,
card_ids: Optional[List[str]] = None,
) -> List[Document]:
"""
Load data from Guru.
Args:
collection_ids: List of collection ids to load from. Only pass in card_ids or collection_ids, not both.
card_ids: List of card ids to load from. Only pass in card_ids or collection_ids, not both.
Returns:
List[Document]: List of documents.
"""
assert (collection_ids is None) or (card_ids is None), (
"Only pass in card_ids or collection_ids, not both."
)
assert (collection_ids is not None) or (card_ids is not None), (
"Pass in card_ids or collection_ids."
)
if collection_ids is not None:
card_ids = self._get_card_ids_from_collection_ids(collection_ids)
return [self._get_card_info(card_id) for card_id in card_ids]
def _get_card_ids_from_collection_ids(self, collection_ids: List[str]) -> List[str]:
"""Get card ids from collection ids."""
all_ids = []
for collection_id in collection_ids:
card_ids = self._get_card_ids_from_collection_id(collection_id)
all_ids.extend(card_ids)
return all_ids
def _get_card_ids_from_collection_id(self, collection_id: str) -> List[str]:
records = []
next_page = True
initial_url = "https://api.getguru.com/api/v1/search/cardmgr?queryType=cards"
response = requests.get(initial_url, auth=self.guru_auth)
records.extend(response.json())
while next_page:
try:
url = response.headers["Link"]
url_pattern = r"<(.*?)>"
url_match = re.search(url_pattern, url)
url = url_match.group(1)
except Exception:
next_page = False
break
response = requests.get(url, auth=self.guru_auth)
records.extend(response.json())
cards = pd.DataFrame.from_records(records)
df_normalized = pd.json_normalize(cards["collection"])
df_normalized.columns = ["collection_" + col for col in df_normalized.columns]
df = pd.concat([cards, df_normalized], axis=1)
df = df[df.collection_id == collection_id]
return list(df["id"])
def _get_card_info(self, card_id: str) -> Any:
"""
Get card info.
Args:
card_id: Card id.
Returns:
Document: Document.
"""
url = f"https://api.getguru.com/api/v1/cards/{card_id}/extended"
headers = {"accept": "application/json"}
response = requests.get(url, auth=self.guru_auth, headers=headers)
if response.status_code == 200:
title = response.json()["preferredPhrase"]
html = response.json()["content"] # i think this needs to be loaded
content = self._clean_html(html)
collection = response.json()["collection"]["name"]
metadata = {
"title": title,
"collection": collection,
"card_id": card_id,
"guru_link": self._get_guru_link(card_id),
}
return Document(text=content, extra_info=metadata)
else:
logger.warning(f"Could not get card info for {card_id}.")
return None
@staticmethod
def _clean_html(text: str) -> str:
"""
Cleans HTML content by fetching its text representation using BeautifulSoup.
"""
if text is None:
return ""
if isinstance(text, str):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=UserWarning)
soup = BeautifulSoup(text, "html.parser")
return soup.get_text()
return str(text)
def _get_guru_link(self, card_id) -> str:
"""
Takes a guru "ExternalId" from meta data and returns the link to the guru card.
"""
url = f"https://api.getguru.com/api/v1/cards/{card_id}/extended"
headers = {
"accept": "application/json",
}
response = requests.get(url, headers=headers, auth=self.guru_auth)
if response.status_code == 200:
slug = response.json()["slug"]
else:
raise RuntimeError(f"Guru link doesn't exist: {response.status_code}")
return f"https://app.getguru.com/card/{slug}"
| GuruReader |
python | joke2k__faker | faker/providers/lorem/he_IL/__init__.py | {
"start": 68,
"end": 2655
} | class ____(LoremProvider):
"""Implement lorem provider for ``he_IL`` locale."""
word_list = (
"אאוגו",
"אגת",
"אדיפיסינג",
"אדנדום",
"אט",
"איאקוליס",
"איבן",
"איף",
"איפסום",
"אלית",
"אלמנקום",
"אמט",
"אס",
"אפאח",
"אקווזמן",
"ארווס",
"בגורמי",
"בורק?",
"בלובק",
"בלינדו",
"בלינך",
"בליקרה",
"בעליק",
"בעריר",
"בראיט",
"ברומץ",
"בריקנה",
"ברשג",
"גדדיש",
"גולר",
"גק",
"דול",
"דולור",
"דומור",
"דז",
"דיאם",
"דלאמת",
"דס",
"הבקיץ",
"הדש",
"הועניב",
"היושבב",
"הכייר",
"השמה",
"התידם",
"וואל",
"וולופטה",
"וחאית",
"ולחת",
"ולתיעם",
"ומעיוט",
"ומרגשח",
"וסטיבולום",
"וסתעד",
"וק",
"ותלברו",
"זותה",
"חשלו",
"טידום",
"יבש",
"יהול",
"ישבעס",
"כאנה",
"כלרשט",
"להאמית",
"לורם",
"ליאמום",
"ליבם",
"ליץ",
"לכימפו",
"לכנו",
"לכנוץ",
"למטכין",
"למרקוח",
"למרקל",
"לפמעט",
"לפריקך",
"לפתיעם",
"לקטוס",
"לרטי",
"לתיג",
"לתכי",
"מא",
"מגמש",
"מונחף",
"מונפרד",
"מונפרר",
"מוסן",
"מורגם",
"מיחוצים",
"מנורך",
"מנכם",
"מנק",
"מנת",
"מרגשי",
"נובש",
"נולום",
"נון",
"נונסטי",
"ניבאה",
"ניסי",
"ניצאחו",
"נמרגי",
"נשואי",
"סאפיאן",
"סוברט",
"סולגק",
"סוליסי",
"סחטיר",
"סטום",
"סיט",
"סילקוף",
"סכעיט",
"סת",
"סתשם",
"עמחליף",
"ערששף",
"פוסיליס",
"צוט",
"צופעט",
"צורק",
"קוויז",
"קוויס",
"קולהע",
"קולורס",
"קונדימנטום",
"קונסקטורר",
"קורוס",
"קלאצי",
"קלובר",
"קראס",
"קרהשק",
"רוגצה",
"שבצק",
"שהכים",
"שלושע",
"שמחויט",
"שנרא",
"שעותלשך",
"שערש",
"תוק",
"תצטנפל",
"תצטריק",
"תרבנך",
)
parts_of_speech: Dict[str, tuple] = {}
| Provider |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/scheduler/instigation.py | {
"start": 1366,
"end": 1739
} | class ____(EnumSerializer):
def unpack(self, value: str):
if value == InstigatorStatus.AUTOMATICALLY_RUNNING.name:
value = InstigatorStatus.DECLARED_IN_CODE.name
return super().unpack(value)
@whitelist_for_serdes(
serializer=InstigatorStatusBackcompatSerializer,
old_storage_names={"JobStatus"},
)
| InstigatorStatusBackcompatSerializer |
python | boto__boto3 | boto3/resources/action.py | {
"start": 865,
"end": 3546
} | class ____:
"""
A class representing a callable action on a resource, for example
``sqs.get_queue_by_name(...)`` or ``s3.Bucket('foo').delete()``.
The action may construct parameters from existing resource identifiers
and may return either a raw response or a new resource instance.
:type action_model: :py:class`~boto3.resources.model.Action`
:param action_model: The action model.
:type factory: ResourceFactory
:param factory: The factory that created the resource class to which
this action is attached.
:type service_context: :py:class:`~boto3.utils.ServiceContext`
:param service_context: Context about the AWS service
"""
def __init__(self, action_model, factory=None, service_context=None):
self._action_model = action_model
# In the simplest case we just return the response, but if a
# resource is defined, then we must create these before returning.
resource_response_model = action_model.resource
if resource_response_model:
self._response_handler = ResourceHandler(
search_path=resource_response_model.path,
factory=factory,
resource_model=resource_response_model,
service_context=service_context,
operation_name=action_model.request.operation,
)
else:
self._response_handler = RawHandler(action_model.path)
def __call__(self, parent, *args, **kwargs):
"""
Perform the action's request operation after building operation
parameters and build any defined resources from the response.
:type parent: :py:class:`~boto3.resources.base.ServiceResource`
:param parent: The resource instance to which this action is attached.
:rtype: dict or ServiceResource or list(ServiceResource)
:return: The response, either as a raw dict or resource instance(s).
"""
operation_name = xform_name(self._action_model.request.operation)
# First, build predefined params and then update with the
# user-supplied kwargs, which allows overriding the pre-built
# params if needed.
params = create_request_parameters(parent, self._action_model.request)
params.update(kwargs)
logger.debug(
'Calling %s:%s with %r',
parent.meta.service_name,
operation_name,
params,
)
response = getattr(parent.meta.client, operation_name)(*args, **params)
logger.debug('Response: %r', response)
return self._response_handler(parent, params, response)
| ServiceAction |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 908854,
"end": 911245
} | class ____(
sgqlc.types.Type,
Node,
Comment,
Deletable,
Updatable,
UpdatableComment,
Reactable,
RepositoryNode,
):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = (
"author_can_push_to_repository",
"comments",
"commit",
"on_behalf_of",
"pull_request",
"resource_path",
"state",
"submitted_at",
"url",
)
author_can_push_to_repository = sgqlc.types.Field(
sgqlc.types.non_null(Boolean), graphql_name="authorCanPushToRepository"
)
comments = sgqlc.types.Field(
sgqlc.types.non_null(PullRequestReviewCommentConnection),
graphql_name="comments",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
(
"before",
sgqlc.types.Arg(String, graphql_name="before", default=None),
),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
commit = sgqlc.types.Field(Commit, graphql_name="commit")
on_behalf_of = sgqlc.types.Field(
sgqlc.types.non_null(TeamConnection),
graphql_name="onBehalfOf",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
(
"before",
sgqlc.types.Arg(String, graphql_name="before", default=None),
),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
pull_request = sgqlc.types.Field(
sgqlc.types.non_null(PullRequest), graphql_name="pullRequest"
)
resource_path = sgqlc.types.Field(
sgqlc.types.non_null(URI), graphql_name="resourcePath"
)
state = sgqlc.types.Field(
sgqlc.types.non_null(PullRequestReviewState), graphql_name="state"
)
submitted_at = sgqlc.types.Field(DateTime, graphql_name="submittedAt")
url = sgqlc.types.Field(sgqlc.types.non_null(URI), graphql_name="url")
| PullRequestReview |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/layout/containers.py | {
"start": 46631,
"end": 94414
} | class ____(Container):
"""
Container that holds a control.
:param content: :class:`.UIControl` instance.
:param width: :class:`.Dimension` instance or callable.
:param height: :class:`.Dimension` instance or callable.
:param z_index: When specified, this can be used to bring element in front
of floating elements.
:param dont_extend_width: When `True`, don't take up more width then the
preferred width reported by the control.
:param dont_extend_height: When `True`, don't take up more width then the
preferred height reported by the control.
:param ignore_content_width: A `bool` or :class:`.Filter` instance. Ignore
the :class:`.UIContent` width when calculating the dimensions.
:param ignore_content_height: A `bool` or :class:`.Filter` instance. Ignore
the :class:`.UIContent` height when calculating the dimensions.
:param left_margins: A list of :class:`.Margin` instance to be displayed on
the left. For instance: :class:`~prompt_toolkit.layout.NumberedMargin`
can be one of them in order to show line numbers.
:param right_margins: Like `left_margins`, but on the other side.
:param scroll_offsets: :class:`.ScrollOffsets` instance, representing the
preferred amount of lines/columns to be always visible before/after the
cursor. When both top and bottom are a very high number, the cursor
will be centered vertically most of the time.
:param allow_scroll_beyond_bottom: A `bool` or
:class:`.Filter` instance. When True, allow scrolling so far, that the
top part of the content is not visible anymore, while there is still
empty space available at the bottom of the window. In the Vi editor for
instance, this is possible. You will see tildes while the top part of
the body is hidden.
:param wrap_lines: A `bool` or :class:`.Filter` instance. When True, don't
scroll horizontally, but wrap lines instead.
:param get_vertical_scroll: Callable that takes this window
instance as input and returns a preferred vertical scroll.
(When this is `None`, the scroll is only determined by the last and
current cursor position.)
:param get_horizontal_scroll: Callable that takes this window
instance as input and returns a preferred vertical scroll.
:param always_hide_cursor: A `bool` or
:class:`.Filter` instance. When True, never display the cursor, even
when the user control specifies a cursor position.
:param cursorline: A `bool` or :class:`.Filter` instance. When True,
display a cursorline.
:param cursorcolumn: A `bool` or :class:`.Filter` instance. When True,
display a cursorcolumn.
:param colorcolumns: A list of :class:`.ColorColumn` instances that
describe the columns to be highlighted, or a callable that returns such
a list.
:param align: :class:`.WindowAlign` value or callable that returns an
:class:`.WindowAlign` value. alignment of content.
:param style: A style string. Style to be applied to all the cells in this
window. (This can be a callable that returns a string.)
:param char: (string) Character to be used for filling the background. This
can also be a callable that returns a character.
:param get_line_prefix: None or a callable that returns formatted text to
be inserted before a line. It takes a line number (int) and a
wrap_count and returns formatted text. This can be used for
implementation of line continuations, things like Vim "breakindent" and
so on.
"""
def __init__(
self,
content: UIControl | None = None,
width: AnyDimension = None,
height: AnyDimension = None,
z_index: int | None = None,
dont_extend_width: FilterOrBool = False,
dont_extend_height: FilterOrBool = False,
ignore_content_width: FilterOrBool = False,
ignore_content_height: FilterOrBool = False,
left_margins: Sequence[Margin] | None = None,
right_margins: Sequence[Margin] | None = None,
scroll_offsets: ScrollOffsets | None = None,
allow_scroll_beyond_bottom: FilterOrBool = False,
wrap_lines: FilterOrBool = False,
get_vertical_scroll: Callable[[Window], int] | None = None,
get_horizontal_scroll: Callable[[Window], int] | None = None,
always_hide_cursor: FilterOrBool = False,
cursorline: FilterOrBool = False,
cursorcolumn: FilterOrBool = False,
colorcolumns: (
None | list[ColorColumn] | Callable[[], list[ColorColumn]]
) = None,
align: WindowAlign | Callable[[], WindowAlign] = WindowAlign.LEFT,
style: str | Callable[[], str] = "",
char: None | str | Callable[[], str] = None,
get_line_prefix: GetLinePrefixCallable | None = None,
) -> None:
self.allow_scroll_beyond_bottom = to_filter(allow_scroll_beyond_bottom)
self.always_hide_cursor = to_filter(always_hide_cursor)
self.wrap_lines = to_filter(wrap_lines)
self.cursorline = to_filter(cursorline)
self.cursorcolumn = to_filter(cursorcolumn)
self.content = content or DummyControl()
self.dont_extend_width = to_filter(dont_extend_width)
self.dont_extend_height = to_filter(dont_extend_height)
self.ignore_content_width = to_filter(ignore_content_width)
self.ignore_content_height = to_filter(ignore_content_height)
self.left_margins = left_margins or []
self.right_margins = right_margins or []
self.scroll_offsets = scroll_offsets or ScrollOffsets()
self.get_vertical_scroll = get_vertical_scroll
self.get_horizontal_scroll = get_horizontal_scroll
self.colorcolumns = colorcolumns or []
self.align = align
self.style = style
self.char = char
self.get_line_prefix = get_line_prefix
self.width = width
self.height = height
self.z_index = z_index
# Cache for the screens generated by the margin.
self._ui_content_cache: SimpleCache[tuple[int, int, int], UIContent] = (
SimpleCache(maxsize=8)
)
self._margin_width_cache: SimpleCache[tuple[Margin, int], int] = SimpleCache(
maxsize=1
)
self.reset()
def __repr__(self) -> str:
return f"Window(content={self.content!r})"
def reset(self) -> None:
self.content.reset()
#: Scrolling position of the main content.
self.vertical_scroll = 0
self.horizontal_scroll = 0
# Vertical scroll 2: this is the vertical offset that a line is
# scrolled if a single line (the one that contains the cursor) consumes
# all of the vertical space.
self.vertical_scroll_2 = 0
#: Keep render information (mappings between buffer input and render
#: output.)
self.render_info: WindowRenderInfo | None = None
def _get_margin_width(self, margin: Margin) -> int:
"""
Return the width for this margin.
(Calculate only once per render time.)
"""
# Margin.get_width, needs to have a UIContent instance.
def get_ui_content() -> UIContent:
return self._get_ui_content(width=0, height=0)
def get_width() -> int:
return margin.get_width(get_ui_content)
key = (margin, get_app().render_counter)
return self._margin_width_cache.get(key, get_width)
def _get_total_margin_width(self) -> int:
"""
Calculate and return the width of the margin (left + right).
"""
return sum(self._get_margin_width(m) for m in self.left_margins) + sum(
self._get_margin_width(m) for m in self.right_margins
)
def preferred_width(self, max_available_width: int) -> Dimension:
"""
Calculate the preferred width for this window.
"""
def preferred_content_width() -> int | None:
"""Content width: is only calculated if no exact width for the
window was given."""
if self.ignore_content_width():
return None
# Calculate the width of the margin.
total_margin_width = self._get_total_margin_width()
# Window of the content. (Can be `None`.)
preferred_width = self.content.preferred_width(
max_available_width - total_margin_width
)
if preferred_width is not None:
# Include width of the margins.
preferred_width += total_margin_width
return preferred_width
# Merge.
return self._merge_dimensions(
dimension=to_dimension(self.width),
get_preferred=preferred_content_width,
dont_extend=self.dont_extend_width(),
)
def preferred_height(self, width: int, max_available_height: int) -> Dimension:
"""
Calculate the preferred height for this window.
"""
def preferred_content_height() -> int | None:
"""Content height: is only calculated if no exact height for the
window was given."""
if self.ignore_content_height():
return None
total_margin_width = self._get_total_margin_width()
wrap_lines = self.wrap_lines()
return self.content.preferred_height(
width - total_margin_width,
max_available_height,
wrap_lines,
self.get_line_prefix,
)
return self._merge_dimensions(
dimension=to_dimension(self.height),
get_preferred=preferred_content_height,
dont_extend=self.dont_extend_height(),
)
@staticmethod
def _merge_dimensions(
dimension: Dimension | None,
get_preferred: Callable[[], int | None],
dont_extend: bool = False,
) -> Dimension:
"""
Take the Dimension from this `Window` class and the received preferred
size from the `UIControl` and return a `Dimension` to report to the
parent container.
"""
dimension = dimension or Dimension()
# When a preferred dimension was explicitly given to the Window,
# ignore the UIControl.
preferred: int | None
if dimension.preferred_specified:
preferred = dimension.preferred
else:
# Otherwise, calculate the preferred dimension from the UI control
# content.
preferred = get_preferred()
# When a 'preferred' dimension is given by the UIControl, make sure
# that it stays within the bounds of the Window.
if preferred is not None:
if dimension.max_specified:
preferred = min(preferred, dimension.max)
if dimension.min_specified:
preferred = max(preferred, dimension.min)
# When a `dont_extend` flag has been given, use the preferred dimension
# also as the max dimension.
max_: int | None
min_: int | None
if dont_extend and preferred is not None:
max_ = min(dimension.max, preferred)
else:
max_ = dimension.max if dimension.max_specified else None
min_ = dimension.min if dimension.min_specified else None
return Dimension(
min=min_, max=max_, preferred=preferred, weight=dimension.weight
)
def _get_ui_content(self, width: int, height: int) -> UIContent:
"""
Create a `UIContent` instance.
"""
def get_content() -> UIContent:
return self.content.create_content(width=width, height=height)
key = (get_app().render_counter, width, height)
return self._ui_content_cache.get(key, get_content)
def _get_digraph_char(self) -> str | None:
"Return `False`, or the Digraph symbol to be used."
app = get_app()
if app.quoted_insert:
return "^"
if app.vi_state.waiting_for_digraph:
if app.vi_state.digraph_symbol1:
return app.vi_state.digraph_symbol1
return "?"
return None
def write_to_screen(
self,
screen: Screen,
mouse_handlers: MouseHandlers,
write_position: WritePosition,
parent_style: str,
erase_bg: bool,
z_index: int | None,
) -> None:
"""
Write window to screen. This renders the user control, the margins and
copies everything over to the absolute position at the given screen.
"""
# If dont_extend_width/height was given. Then reduce width/height in
# WritePosition if the parent wanted us to paint in a bigger area.
# (This happens if this window is bundled with another window in a
# HSplit/VSplit, but with different size requirements.)
write_position = WritePosition(
xpos=write_position.xpos,
ypos=write_position.ypos,
width=write_position.width,
height=write_position.height,
)
if self.dont_extend_width():
write_position.width = min(
write_position.width,
self.preferred_width(write_position.width).preferred,
)
if self.dont_extend_height():
write_position.height = min(
write_position.height,
self.preferred_height(
write_position.width, write_position.height
).preferred,
)
# Draw
z_index = z_index if self.z_index is None else self.z_index
draw_func = partial(
self._write_to_screen_at_index,
screen,
mouse_handlers,
write_position,
parent_style,
erase_bg,
)
if z_index is None or z_index <= 0:
# When no z_index is given, draw right away.
draw_func()
else:
# Otherwise, postpone.
screen.draw_with_z_index(z_index=z_index, draw_func=draw_func)
def _write_to_screen_at_index(
self,
screen: Screen,
mouse_handlers: MouseHandlers,
write_position: WritePosition,
parent_style: str,
erase_bg: bool,
) -> None:
# Don't bother writing invisible windows.
# (We save some time, but also avoid applying last-line styling.)
if write_position.height <= 0 or write_position.width <= 0:
return
# Calculate margin sizes.
left_margin_widths = [self._get_margin_width(m) for m in self.left_margins]
right_margin_widths = [self._get_margin_width(m) for m in self.right_margins]
total_margin_width = sum(left_margin_widths + right_margin_widths)
# Render UserControl.
ui_content = self.content.create_content(
write_position.width - total_margin_width, write_position.height
)
assert isinstance(ui_content, UIContent)
# Scroll content.
wrap_lines = self.wrap_lines()
self._scroll(
ui_content, write_position.width - total_margin_width, write_position.height
)
# Erase background and fill with `char`.
self._fill_bg(screen, write_position, erase_bg)
# Resolve `align` attribute.
align = self.align() if callable(self.align) else self.align
# Write body
visible_line_to_row_col, rowcol_to_yx = self._copy_body(
ui_content,
screen,
write_position,
sum(left_margin_widths),
write_position.width - total_margin_width,
self.vertical_scroll,
self.horizontal_scroll,
wrap_lines=wrap_lines,
highlight_lines=True,
vertical_scroll_2=self.vertical_scroll_2,
always_hide_cursor=self.always_hide_cursor(),
has_focus=get_app().layout.current_control == self.content,
align=align,
get_line_prefix=self.get_line_prefix,
)
# Remember render info. (Set before generating the margins. They need this.)
x_offset = write_position.xpos + sum(left_margin_widths)
y_offset = write_position.ypos
render_info = WindowRenderInfo(
window=self,
ui_content=ui_content,
horizontal_scroll=self.horizontal_scroll,
vertical_scroll=self.vertical_scroll,
window_width=write_position.width - total_margin_width,
window_height=write_position.height,
configured_scroll_offsets=self.scroll_offsets,
visible_line_to_row_col=visible_line_to_row_col,
rowcol_to_yx=rowcol_to_yx,
x_offset=x_offset,
y_offset=y_offset,
wrap_lines=wrap_lines,
)
self.render_info = render_info
# Set mouse handlers.
def mouse_handler(mouse_event: MouseEvent) -> NotImplementedOrNone:
"""
Wrapper around the mouse_handler of the `UIControl` that turns
screen coordinates into line coordinates.
Returns `NotImplemented` if no UI invalidation should be done.
"""
# Don't handle mouse events outside of the current modal part of
# the UI.
if self not in get_app().layout.walk_through_modal_area():
return NotImplemented
# Find row/col position first.
yx_to_rowcol = {v: k for k, v in rowcol_to_yx.items()}
y = mouse_event.position.y
x = mouse_event.position.x
# If clicked below the content area, look for a position in the
# last line instead.
max_y = write_position.ypos + len(visible_line_to_row_col) - 1
y = min(max_y, y)
result: NotImplementedOrNone
while x >= 0:
try:
row, col = yx_to_rowcol[y, x]
except KeyError:
# Try again. (When clicking on the right side of double
# width characters, or on the right side of the input.)
x -= 1
else:
# Found position, call handler of UIControl.
result = self.content.mouse_handler(
MouseEvent(
position=Point(x=col, y=row),
event_type=mouse_event.event_type,
button=mouse_event.button,
modifiers=mouse_event.modifiers,
)
)
break
else:
# nobreak.
# (No x/y coordinate found for the content. This happens in
# case of a DummyControl, that does not have any content.
# Report (0,0) instead.)
result = self.content.mouse_handler(
MouseEvent(
position=Point(x=0, y=0),
event_type=mouse_event.event_type,
button=mouse_event.button,
modifiers=mouse_event.modifiers,
)
)
# If it returns NotImplemented, handle it here.
if result == NotImplemented:
result = self._mouse_handler(mouse_event)
return result
mouse_handlers.set_mouse_handler_for_range(
x_min=write_position.xpos + sum(left_margin_widths),
x_max=write_position.xpos + write_position.width - total_margin_width,
y_min=write_position.ypos,
y_max=write_position.ypos + write_position.height,
handler=mouse_handler,
)
# Render and copy margins.
move_x = 0
def render_margin(m: Margin, width: int) -> UIContent:
"Render margin. Return `Screen`."
# Retrieve margin fragments.
fragments = m.create_margin(render_info, width, write_position.height)
# Turn it into a UIContent object.
# already rendered those fragments using this size.)
return FormattedTextControl(fragments).create_content(
width + 1, write_position.height
)
for m, width in zip(self.left_margins, left_margin_widths):
if width > 0: # (ConditionalMargin returns a zero width. -- Don't render.)
# Create screen for margin.
margin_content = render_margin(m, width)
# Copy and shift X.
self._copy_margin(margin_content, screen, write_position, move_x, width)
move_x += width
move_x = write_position.width - sum(right_margin_widths)
for m, width in zip(self.right_margins, right_margin_widths):
# Create screen for margin.
margin_content = render_margin(m, width)
# Copy and shift X.
self._copy_margin(margin_content, screen, write_position, move_x, width)
move_x += width
# Apply 'self.style'
self._apply_style(screen, write_position, parent_style)
# Tell the screen that this user control has been painted at this
# position.
screen.visible_windows_to_write_positions[self] = write_position
def _copy_body(
self,
ui_content: UIContent,
new_screen: Screen,
write_position: WritePosition,
move_x: int,
width: int,
vertical_scroll: int = 0,
horizontal_scroll: int = 0,
wrap_lines: bool = False,
highlight_lines: bool = False,
vertical_scroll_2: int = 0,
always_hide_cursor: bool = False,
has_focus: bool = False,
align: WindowAlign = WindowAlign.LEFT,
get_line_prefix: Callable[[int, int], AnyFormattedText] | None = None,
) -> tuple[dict[int, tuple[int, int]], dict[tuple[int, int], tuple[int, int]]]:
"""
Copy the UIContent into the output screen.
Return (visible_line_to_row_col, rowcol_to_yx) tuple.
:param get_line_prefix: None or a callable that takes a line number
(int) and a wrap_count (int) and returns formatted text.
"""
xpos = write_position.xpos + move_x
ypos = write_position.ypos
line_count = ui_content.line_count
new_buffer = new_screen.data_buffer
empty_char = _CHAR_CACHE["", ""]
# Map visible line number to (row, col) of input.
# 'col' will always be zero if line wrapping is off.
visible_line_to_row_col: dict[int, tuple[int, int]] = {}
# Maps (row, col) from the input to (y, x) screen coordinates.
rowcol_to_yx: dict[tuple[int, int], tuple[int, int]] = {}
def copy_line(
line: StyleAndTextTuples,
lineno: int,
x: int,
y: int,
is_input: bool = False,
) -> tuple[int, int]:
"""
Copy over a single line to the output screen. This can wrap over
multiple lines in the output. It will call the prefix (prompt)
function before every line.
"""
if is_input:
current_rowcol_to_yx = rowcol_to_yx
else:
current_rowcol_to_yx = {} # Throwaway dictionary.
# Draw line prefix.
if is_input and get_line_prefix:
prompt = to_formatted_text(get_line_prefix(lineno, 0))
x, y = copy_line(prompt, lineno, x, y, is_input=False)
# Scroll horizontally.
skipped = 0 # Characters skipped because of horizontal scrolling.
if horizontal_scroll and is_input:
h_scroll = horizontal_scroll
line = explode_text_fragments(line)
while h_scroll > 0 and line:
h_scroll -= get_cwidth(line[0][1])
skipped += 1
del line[:1] # Remove first character.
x -= h_scroll # When scrolling over double width character,
# this can end up being negative.
# Align this line. (Note that this doesn't work well when we use
# get_line_prefix and that function returns variable width prefixes.)
if align == WindowAlign.CENTER:
line_width = fragment_list_width(line)
if line_width < width:
x += (width - line_width) // 2
elif align == WindowAlign.RIGHT:
line_width = fragment_list_width(line)
if line_width < width:
x += width - line_width
col = 0
wrap_count = 0
for style, text, *_ in line:
new_buffer_row = new_buffer[y + ypos]
# Remember raw VT escape sequences. (E.g. FinalTerm's
# escape sequences.)
if "[ZeroWidthEscape]" in style:
new_screen.zero_width_escapes[y + ypos][x + xpos] += text
continue
for c in text:
char = _CHAR_CACHE[c, style]
char_width = char.width
# Wrap when the line width is exceeded.
if wrap_lines and x + char_width > width:
visible_line_to_row_col[y + 1] = (
lineno,
visible_line_to_row_col[y][1] + x,
)
y += 1
wrap_count += 1
x = 0
# Insert line prefix (continuation prompt).
if is_input and get_line_prefix:
prompt = to_formatted_text(
get_line_prefix(lineno, wrap_count)
)
x, y = copy_line(prompt, lineno, x, y, is_input=False)
new_buffer_row = new_buffer[y + ypos]
if y >= write_position.height:
return x, y # Break out of all for loops.
# Set character in screen and shift 'x'.
if x >= 0 and y >= 0 and x < width:
new_buffer_row[x + xpos] = char
# When we print a multi width character, make sure
# to erase the neighbors positions in the screen.
# (The empty string if different from everything,
# so next redraw this cell will repaint anyway.)
if char_width > 1:
for i in range(1, char_width):
new_buffer_row[x + xpos + i] = empty_char
# If this is a zero width characters, then it's
# probably part of a decomposed unicode character.
# See: https://en.wikipedia.org/wiki/Unicode_equivalence
# Merge it in the previous cell.
elif char_width == 0:
# Handle all character widths. If the previous
# character is a multiwidth character, then
# merge it two positions back.
for pw in [2, 1]: # Previous character width.
if (
x - pw >= 0
and new_buffer_row[x + xpos - pw].width == pw
):
prev_char = new_buffer_row[x + xpos - pw]
char2 = _CHAR_CACHE[
prev_char.char + c, prev_char.style
]
new_buffer_row[x + xpos - pw] = char2
# Keep track of write position for each character.
current_rowcol_to_yx[lineno, col + skipped] = (
y + ypos,
x + xpos,
)
col += 1
x += char_width
return x, y
# Copy content.
def copy() -> int:
y = -vertical_scroll_2
lineno = vertical_scroll
while y < write_position.height and lineno < line_count:
# Take the next line and copy it in the real screen.
line = ui_content.get_line(lineno)
visible_line_to_row_col[y] = (lineno, horizontal_scroll)
# Copy margin and actual line.
x = 0
x, y = copy_line(line, lineno, x, y, is_input=True)
lineno += 1
y += 1
return y
copy()
def cursor_pos_to_screen_pos(row: int, col: int) -> Point:
"Translate row/col from UIContent to real Screen coordinates."
try:
y, x = rowcol_to_yx[row, col]
except KeyError:
# Normally this should never happen. (It is a bug, if it happens.)
# But to be sure, return (0, 0)
return Point(x=0, y=0)
# raise ValueError(
# 'Invalid position. row=%r col=%r, vertical_scroll=%r, '
# 'horizontal_scroll=%r, height=%r' %
# (row, col, vertical_scroll, horizontal_scroll, write_position.height))
else:
return Point(x=x, y=y)
# Set cursor and menu positions.
if ui_content.cursor_position:
screen_cursor_position = cursor_pos_to_screen_pos(
ui_content.cursor_position.y, ui_content.cursor_position.x
)
if has_focus:
new_screen.set_cursor_position(self, screen_cursor_position)
if always_hide_cursor:
new_screen.show_cursor = False
else:
new_screen.show_cursor = ui_content.show_cursor
self._highlight_digraph(new_screen)
if highlight_lines:
self._highlight_cursorlines(
new_screen,
screen_cursor_position,
xpos,
ypos,
width,
write_position.height,
)
# Draw input characters from the input processor queue.
if has_focus and ui_content.cursor_position:
self._show_key_processor_key_buffer(new_screen)
# Set menu position.
if ui_content.menu_position:
new_screen.set_menu_position(
self,
cursor_pos_to_screen_pos(
ui_content.menu_position.y, ui_content.menu_position.x
),
)
# Update output screen height.
new_screen.height = max(new_screen.height, ypos + write_position.height)
return visible_line_to_row_col, rowcol_to_yx
def _fill_bg(
self, screen: Screen, write_position: WritePosition, erase_bg: bool
) -> None:
"""
Erase/fill the background.
(Useful for floats and when a `char` has been given.)
"""
char: str | None
if callable(self.char):
char = self.char()
else:
char = self.char
if erase_bg or char:
wp = write_position
char_obj = _CHAR_CACHE[char or " ", ""]
for y in range(wp.ypos, wp.ypos + wp.height):
row = screen.data_buffer[y]
for x in range(wp.xpos, wp.xpos + wp.width):
row[x] = char_obj
def _apply_style(
self, new_screen: Screen, write_position: WritePosition, parent_style: str
) -> None:
# Apply `self.style`.
style = parent_style + " " + to_str(self.style)
new_screen.fill_area(write_position, style=style, after=False)
# Apply the 'last-line' class to the last line of each Window. This can
# be used to apply an 'underline' to the user control.
wp = WritePosition(
write_position.xpos,
write_position.ypos + write_position.height - 1,
write_position.width,
1,
)
new_screen.fill_area(wp, "class:last-line", after=True)
def _highlight_digraph(self, new_screen: Screen) -> None:
"""
When we are in Vi digraph mode, put a question mark underneath the
cursor.
"""
digraph_char = self._get_digraph_char()
if digraph_char:
cpos = new_screen.get_cursor_position(self)
new_screen.data_buffer[cpos.y][cpos.x] = _CHAR_CACHE[
digraph_char, "class:digraph"
]
def _show_key_processor_key_buffer(self, new_screen: Screen) -> None:
"""
When the user is typing a key binding that consists of several keys,
display the last pressed key if the user is in insert mode and the key
is meaningful to be displayed.
E.g. Some people want to bind 'jj' to escape in Vi insert mode. But the
first 'j' needs to be displayed in order to get some feedback.
"""
app = get_app()
key_buffer = app.key_processor.key_buffer
if key_buffer and _in_insert_mode() and not app.is_done:
# The textual data for the given key. (Can be a VT100 escape
# sequence.)
data = key_buffer[-1].data
# Display only if this is a 1 cell width character.
if get_cwidth(data) == 1:
cpos = new_screen.get_cursor_position(self)
new_screen.data_buffer[cpos.y][cpos.x] = _CHAR_CACHE[
data, "class:partial-key-binding"
]
def _highlight_cursorlines(
self, new_screen: Screen, cpos: Point, x: int, y: int, width: int, height: int
) -> None:
"""
Highlight cursor row/column.
"""
cursor_line_style = " class:cursor-line "
cursor_column_style = " class:cursor-column "
data_buffer = new_screen.data_buffer
# Highlight cursor line.
if self.cursorline():
row = data_buffer[cpos.y]
for x in range(x, x + width):
original_char = row[x]
row[x] = _CHAR_CACHE[
original_char.char, original_char.style + cursor_line_style
]
# Highlight cursor column.
if self.cursorcolumn():
for y2 in range(y, y + height):
row = data_buffer[y2]
original_char = row[cpos.x]
row[cpos.x] = _CHAR_CACHE[
original_char.char, original_char.style + cursor_column_style
]
# Highlight color columns
colorcolumns = self.colorcolumns
if callable(colorcolumns):
colorcolumns = colorcolumns()
for cc in colorcolumns:
assert isinstance(cc, ColorColumn)
column = cc.position
if column < x + width: # Only draw when visible.
color_column_style = " " + cc.style
for y2 in range(y, y + height):
row = data_buffer[y2]
original_char = row[column + x]
row[column + x] = _CHAR_CACHE[
original_char.char, original_char.style + color_column_style
]
def _copy_margin(
self,
margin_content: UIContent,
new_screen: Screen,
write_position: WritePosition,
move_x: int,
width: int,
) -> None:
"""
Copy characters from the margin screen to the real screen.
"""
xpos = write_position.xpos + move_x
ypos = write_position.ypos
margin_write_position = WritePosition(xpos, ypos, width, write_position.height)
self._copy_body(margin_content, new_screen, margin_write_position, 0, width)
def _scroll(self, ui_content: UIContent, width: int, height: int) -> None:
"""
Scroll body. Ensure that the cursor is visible.
"""
if self.wrap_lines():
func = self._scroll_when_linewrapping
else:
func = self._scroll_without_linewrapping
func(ui_content, width, height)
def _scroll_when_linewrapping(
self, ui_content: UIContent, width: int, height: int
) -> None:
"""
Scroll to make sure the cursor position is visible and that we maintain
the requested scroll offset.
Set `self.horizontal_scroll/vertical_scroll`.
"""
scroll_offsets_bottom = self.scroll_offsets.bottom
scroll_offsets_top = self.scroll_offsets.top
# We don't have horizontal scrolling.
self.horizontal_scroll = 0
def get_line_height(lineno: int) -> int:
return ui_content.get_height_for_line(lineno, width, self.get_line_prefix)
# When there is no space, reset `vertical_scroll_2` to zero and abort.
# This can happen if the margin is bigger than the window width.
# Otherwise the text height will become "infinite" (a big number) and
# the copy_line will spend a huge amount of iterations trying to render
# nothing.
if width <= 0:
self.vertical_scroll = ui_content.cursor_position.y
self.vertical_scroll_2 = 0
return
# If the current line consumes more than the whole window height,
# then we have to scroll vertically inside this line. (We don't take
# the scroll offsets into account for this.)
# Also, ignore the scroll offsets in this case. Just set the vertical
# scroll to this line.
line_height = get_line_height(ui_content.cursor_position.y)
if line_height > height - scroll_offsets_top:
# Calculate the height of the text before the cursor (including
# line prefixes).
text_before_height = ui_content.get_height_for_line(
ui_content.cursor_position.y,
width,
self.get_line_prefix,
slice_stop=ui_content.cursor_position.x,
)
# Adjust scroll offset.
self.vertical_scroll = ui_content.cursor_position.y
self.vertical_scroll_2 = min(
text_before_height - 1, # Keep the cursor visible.
line_height
- height, # Avoid blank lines at the bottom when scrolling up again.
self.vertical_scroll_2,
)
self.vertical_scroll_2 = max(
0, text_before_height - height, self.vertical_scroll_2
)
return
else:
self.vertical_scroll_2 = 0
# Current line doesn't consume the whole height. Take scroll offsets into account.
def get_min_vertical_scroll() -> int:
# Make sure that the cursor line is not below the bottom.
# (Calculate how many lines can be shown between the cursor and the .)
used_height = 0
prev_lineno = ui_content.cursor_position.y
for lineno in range(ui_content.cursor_position.y, -1, -1):
used_height += get_line_height(lineno)
if used_height > height - scroll_offsets_bottom:
return prev_lineno
else:
prev_lineno = lineno
return 0
def get_max_vertical_scroll() -> int:
# Make sure that the cursor line is not above the top.
prev_lineno = ui_content.cursor_position.y
used_height = 0
for lineno in range(ui_content.cursor_position.y - 1, -1, -1):
used_height += get_line_height(lineno)
if used_height > scroll_offsets_top:
return prev_lineno
else:
prev_lineno = lineno
return prev_lineno
def get_topmost_visible() -> int:
"""
Calculate the upper most line that can be visible, while the bottom
is still visible. We should not allow scroll more than this if
`allow_scroll_beyond_bottom` is false.
"""
prev_lineno = ui_content.line_count - 1
used_height = 0
for lineno in range(ui_content.line_count - 1, -1, -1):
used_height += get_line_height(lineno)
if used_height > height:
return prev_lineno
else:
prev_lineno = lineno
return prev_lineno
# Scroll vertically. (Make sure that the whole line which contains the
# cursor is visible.
topmost_visible = get_topmost_visible()
# Note: the `min(topmost_visible, ...)` is to make sure that we
# don't require scrolling up because of the bottom scroll offset,
# when we are at the end of the document.
self.vertical_scroll = max(
self.vertical_scroll, min(topmost_visible, get_min_vertical_scroll())
)
self.vertical_scroll = min(self.vertical_scroll, get_max_vertical_scroll())
# Disallow scrolling beyond bottom?
if not self.allow_scroll_beyond_bottom():
self.vertical_scroll = min(self.vertical_scroll, topmost_visible)
def _scroll_without_linewrapping(
self, ui_content: UIContent, width: int, height: int
) -> None:
"""
Scroll to make sure the cursor position is visible and that we maintain
the requested scroll offset.
Set `self.horizontal_scroll/vertical_scroll`.
"""
cursor_position = ui_content.cursor_position or Point(x=0, y=0)
# Without line wrapping, we will never have to scroll vertically inside
# a single line.
self.vertical_scroll_2 = 0
if ui_content.line_count == 0:
self.vertical_scroll = 0
self.horizontal_scroll = 0
return
else:
current_line_text = fragment_list_to_text(
ui_content.get_line(cursor_position.y)
)
def do_scroll(
current_scroll: int,
scroll_offset_start: int,
scroll_offset_end: int,
cursor_pos: int,
window_size: int,
content_size: int,
) -> int:
"Scrolling algorithm. Used for both horizontal and vertical scrolling."
# Calculate the scroll offset to apply.
# This can obviously never be more than have the screen size. Also, when the
# cursor appears at the top or bottom, we don't apply the offset.
scroll_offset_start = int(
min(scroll_offset_start, window_size / 2, cursor_pos)
)
scroll_offset_end = int(
min(scroll_offset_end, window_size / 2, content_size - 1 - cursor_pos)
)
# Prevent negative scroll offsets.
if current_scroll < 0:
current_scroll = 0
# Scroll back if we scrolled to much and there's still space to show more of the document.
if (
not self.allow_scroll_beyond_bottom()
and current_scroll > content_size - window_size
):
current_scroll = max(0, content_size - window_size)
# Scroll up if cursor is before visible part.
if current_scroll > cursor_pos - scroll_offset_start:
current_scroll = max(0, cursor_pos - scroll_offset_start)
# Scroll down if cursor is after visible part.
if current_scroll < (cursor_pos + 1) - window_size + scroll_offset_end:
current_scroll = (cursor_pos + 1) - window_size + scroll_offset_end
return current_scroll
# When a preferred scroll is given, take that first into account.
if self.get_vertical_scroll:
self.vertical_scroll = self.get_vertical_scroll(self)
assert isinstance(self.vertical_scroll, int)
if self.get_horizontal_scroll:
self.horizontal_scroll = self.get_horizontal_scroll(self)
assert isinstance(self.horizontal_scroll, int)
# Update horizontal/vertical scroll to make sure that the cursor
# remains visible.
offsets = self.scroll_offsets
self.vertical_scroll = do_scroll(
current_scroll=self.vertical_scroll,
scroll_offset_start=offsets.top,
scroll_offset_end=offsets.bottom,
cursor_pos=ui_content.cursor_position.y,
window_size=height,
content_size=ui_content.line_count,
)
if self.get_line_prefix:
current_line_prefix_width = fragment_list_width(
to_formatted_text(self.get_line_prefix(ui_content.cursor_position.y, 0))
)
else:
current_line_prefix_width = 0
self.horizontal_scroll = do_scroll(
current_scroll=self.horizontal_scroll,
scroll_offset_start=offsets.left,
scroll_offset_end=offsets.right,
cursor_pos=get_cwidth(current_line_text[: ui_content.cursor_position.x]),
window_size=width - current_line_prefix_width,
# We can only analyze the current line. Calculating the width off
# all the lines is too expensive.
content_size=max(
get_cwidth(current_line_text), self.horizontal_scroll + width
),
)
def _mouse_handler(self, mouse_event: MouseEvent) -> NotImplementedOrNone:
"""
Mouse handler. Called when the UI control doesn't handle this
particular event.
Return `NotImplemented` if nothing was done as a consequence of this
key binding (no UI invalidate required in that case).
"""
if mouse_event.event_type == MouseEventType.SCROLL_DOWN:
self._scroll_down()
return None
elif mouse_event.event_type == MouseEventType.SCROLL_UP:
self._scroll_up()
return None
return NotImplemented
def _scroll_down(self) -> None:
"Scroll window down."
info = self.render_info
if info is None:
return
if self.vertical_scroll < info.content_height - info.window_height:
if info.cursor_position.y <= info.configured_scroll_offsets.top:
self.content.move_cursor_down()
self.vertical_scroll += 1
def _scroll_up(self) -> None:
"Scroll window up."
info = self.render_info
if info is None:
return
if info.vertical_scroll > 0:
# TODO: not entirely correct yet in case of line wrapping and long lines.
if (
info.cursor_position.y
>= info.window_height - 1 - info.configured_scroll_offsets.bottom
):
self.content.move_cursor_up()
self.vertical_scroll -= 1
def get_key_bindings(self) -> KeyBindingsBase | None:
return self.content.get_key_bindings()
def get_children(self) -> list[Container]:
return []
| Window |
python | getsentry__sentry | src/sentry/monitors/types.py | {
"start": 433,
"end": 484
} | class ____(TypedDict):
trace_id: str
| CheckinTrace |
python | pytorch__pytorch | torch/distributed/tensor/debug/_comm_mode.py | {
"start": 2189,
"end": 7938
} | class ____(ModTracker):
"""
Inherits ModuleTracker and expands on its functionality to track the
parameters and sharding information of a model at a module-level
"""
def __init__(self):
super().__init__()
self.module_helper_dict = {}
self.module_parameters_dict = {}
self.module_parents_dict = {}
self.register_forward_hook_handles = {}
self.parent_dict = {}
self.parent_list = []
self.sharding_dict = {}
self.activation_checkpointing = False
self.name = ""
def _fw_set_module_hook(self, mod, input, output):
"""
Updates the current module after module finishes running and
all other hooks are resolved
"""
if self.is_bw:
self.activation_checkpointing = True
else:
self.activation_checkpointing = False
if not self.activation_checkpointing:
# module is no longer parent of next modules
self.parent_list.pop()
# set current module to previous parent module
self.name = self.parent_list[-1]
def _fw_pre_hook(self, mod, input):
"""
This function is called before the forward pass of a module. It
collects the parameters and sharding information of a module and
stores it in a dictionary.
"""
if self.is_bw:
self.activation_checkpointing = True
else:
self.activation_checkpointing = False
self.name = super()._get_mod_name(mod)
w_mod = weakref.ref(mod)
# adds current sub-module to module tracker parent class
super()._get_append_fn(w_mod, self.name, False)()
args, _ = tree_flatten(input)
tensors = [a for a in args if isinstance(a, torch.Tensor) and a.requires_grad]
if not self.is_bw and tensors:
register_multi_grad_hook(
tensors, super()._get_pop_fn(w_mod, self.name, True)
)
if not self.activation_checkpointing:
# contains information about module ordering and depth in the module tree
if self.name not in self.module_helper_dict:
self.module_helper_dict[self.name] = {}
self.module_helper_dict[self.name]["module_type"] = (
str(type(mod)).replace("<", "").replace(">", "")
)
self.module_helper_dict[self.name]["depth"] = len(self.parents) - 1
for param_name, param in mod.named_parameters(recurse=False):
if self.name not in self.module_parameters_dict:
self.module_parameters_dict[self.name] = {}
self.module_parameters_dict[self.name][param_name] = param.data
if isinstance(param.data, DTensor):
key_name = self.name + "." + param_name
self.sharding_dict[key_name] = param.data.placements
if "parameters" not in self.module_helper_dict[self.name]:
self.module_helper_dict[self.name]["parameters"] = {}
self.module_helper_dict[self.name]["parameters"][param_name] = str(
param.data.placements
)
# used to store module's parents to ensure correctness in backward pass/checkpointing
if self.name not in self.module_parents_dict:
self.module_parents_dict[self.name] = copy.deepcopy(self.parents)
# used to create parent-child module associations for json dumps
parent = self.parent_list[-1]
if parent not in self.parent_dict:
self.parent_dict[parent] = []
self.parent_dict[parent].append(self.name)
self.parent_list.append(self.name)
self.register_forward_hook_handles[self.name] = mod.register_forward_hook(
self._fw_set_module_hook
)
def _fw_post_hook(self, mod, input, output):
"""
This function is called when the forward pass of a module is called.
It updates the module tracker and removes the module from parent data
"""
super()._fw_post_hook(mod, input, output)
def _bw_hook(self, mod, output):
"""
This function is called when the backward pass of a module is called. It
updates the current module for backward passes
"""
self.activation_checkpointing = False
self.name = super()._get_mod_name(mod)
def __enter__(self):
self.activation_checkpointing = False
self.module_parameters_dict.clear()
self.sharding_dict.clear()
self.parent_dict.clear()
self.parent_list = ["Global"]
self.module_helper_dict.clear()
self.module_helper_dict["Global"] = {"depth": 0}
self.module_parents_dict.clear()
self.module_parents_dict["Global"] = set()
self._fw_pre_handle = register_module_forward_pre_hook(self._fw_pre_hook)
self._fw_post_handle = register_module_forward_hook(self._fw_post_hook)
self.register_forward_hook_handles.clear()
self._bw_handle = register_module_full_backward_pre_hook(self._bw_hook)
self.name = "Global"
def __exit__(self, *args):
super().__exit__(*args)
self._bw_handle.remove()
# removes all forward_hook handles added in the pre-hook
for handle in self.register_forward_hook_handles.values():
handle.remove()
def print_paramater_info(self):
print(self.module_parameters_dict)
def print_sharding_info(self):
for key, value in self.sharding_dict.items():
print(key + ": " + str(value))
| _CommModeModuleTracker |
python | numba__numba | numba/core/types/containers.py | {
"start": 5359,
"end": 5802
} | class ____(Sequence, BaseTuple):
@property
def iterator_type(self):
return UniTupleIter(self)
def __getitem__(self, i):
"""
Return element at position i
"""
return self.dtype
def __iter__(self):
return iter([self.dtype] * self.count)
def __len__(self):
return self.count
@property
def types(self):
return (self.dtype,) * self.count
| _HomogeneousTuple |
python | dask__distributed | distributed/worker_state_machine.py | {
"start": 21398,
"end": 21499
} | class ____(StateMachineEvent):
worker: str
__slots__ = ("worker",)
@dataclass
| RemoveWorkerEvent |
python | getsentry__sentry | src/sentry/users/api/serializers/user.py | {
"start": 1938,
"end": 2189
} | class ____(int, Enum):
DEFAULT = int(StacktraceOrder.DEFAULT) # Equivalent to `MOST_RECENT_FIRST`
MOST_RECENT_LAST = int(StacktraceOrder.MOST_RECENT_LAST)
MOST_RECENT_FIRST = int(StacktraceOrder.MOST_RECENT_FIRST)
| _SerializedStacktraceOrder |
python | plotly__plotly.py | plotly/graph_objs/histogram2dcontour/_hoverlabel.py | {
"start": 233,
"end": 11319
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "histogram2dcontour"
_path_str = "histogram2dcontour.hoverlabel"
_valid_props = {
"align",
"alignsrc",
"bgcolor",
"bgcolorsrc",
"bordercolor",
"bordercolorsrc",
"font",
"namelength",
"namelengthsrc",
"showarrow",
}
@property
def align(self):
"""
Sets the horizontal alignment of the text content within hover
label box. Has an effect only if the hover label text spans
more two or more lines
The 'align' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'right', 'auto']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["align"]
@align.setter
def align(self, val):
self["align"] = val
@property
def alignsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `align`.
The 'alignsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["alignsrc"]
@alignsrc.setter
def alignsrc(self, val):
self["alignsrc"] = val
@property
def bgcolor(self):
"""
Sets the background color of the hover labels for this trace
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
@property
def bgcolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `bgcolor`.
The 'bgcolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bgcolorsrc"]
@bgcolorsrc.setter
def bgcolorsrc(self, val):
self["bgcolorsrc"] = val
@property
def bordercolor(self):
"""
Sets the border color of the hover labels for this trace.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
@property
def bordercolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
The 'bordercolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bordercolorsrc"]
@bordercolorsrc.setter
def bordercolorsrc(self, val):
self["bordercolorsrc"] = val
@property
def font(self):
"""
Sets the font used in hover labels.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.histogram2dcontour.hoverlabel.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Returns
-------
plotly.graph_objs.histogram2dcontour.hoverlabel.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
@property
def namelength(self):
"""
Sets the default length (in number of characters) of the trace
name in the hover labels for all traces. -1 shows the whole
name regardless of length. 0-3 shows the first 0-3 characters,
and an integer >3 will show the whole name if it is less than
that many characters, but if it is longer, will truncate to
`namelength - 3` characters and add an ellipsis.
The 'namelength' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [-1, 9223372036854775807]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|numpy.ndarray
"""
return self["namelength"]
@namelength.setter
def namelength(self, val):
self["namelength"] = val
@property
def namelengthsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`namelength`.
The 'namelengthsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["namelengthsrc"]
@namelengthsrc.setter
def namelengthsrc(self, val):
self["namelengthsrc"] = val
@property
def showarrow(self):
"""
Sets whether or not to show the hover label arrow/triangle
pointing to the data point.
The 'showarrow' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showarrow"]
@showarrow.setter
def showarrow(self, val):
self["showarrow"] = val
@property
def _prop_descriptions(self):
return """\
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
`align`.
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
`bgcolor`.
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
`namelength`.
showarrow
Sets whether or not to show the hover label
arrow/triangle pointing to the data point.
"""
def __init__(
self,
arg=None,
align=None,
alignsrc=None,
bgcolor=None,
bgcolorsrc=None,
bordercolor=None,
bordercolorsrc=None,
font=None,
namelength=None,
namelengthsrc=None,
showarrow=None,
**kwargs,
):
"""
Construct a new Hoverlabel object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.histogram2dcon
tour.Hoverlabel`
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
`align`.
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
`bgcolor`.
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
`namelength`.
showarrow
Sets whether or not to show the hover label
arrow/triangle pointing to the data point.
Returns
-------
Hoverlabel
"""
super().__init__("hoverlabel")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.histogram2dcontour.Hoverlabel
constructor must be a dict or
an instance of :class:`plotly.graph_objs.histogram2dcontour.Hoverlabel`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("align", arg, align)
self._set_property("alignsrc", arg, alignsrc)
self._set_property("bgcolor", arg, bgcolor)
self._set_property("bgcolorsrc", arg, bgcolorsrc)
self._set_property("bordercolor", arg, bordercolor)
self._set_property("bordercolorsrc", arg, bordercolorsrc)
self._set_property("font", arg, font)
self._set_property("namelength", arg, namelength)
self._set_property("namelengthsrc", arg, namelengthsrc)
self._set_property("showarrow", arg, showarrow)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Hoverlabel |
python | getsentry__sentry | src/sentry/tsdb/snuba.py | {
"start": 2508,
"end": 34571
} | class ____(BaseTSDB):
"""
A time series query interface to Snuba
Write methods are not supported, as the raw data from which we generate our
time series is assumed to already exist in snuba.
Read methods are supported only for models based on group/event data and
will return empty results for unsupported models.
"""
# ``project_filter_model_query_settings`` and ``outcomes_partial_query_settings`` are all the TSDB models for
# outcomes
project_filter_model_query_settings = {
model: SnubaModelQuerySettings(
Dataset.Outcomes,
"project_id",
"quantity",
[
["reason", "=", reason],
["outcome", "IN", TOTAL_RECEIVED_OUTCOMES],
OUTCOMES_CATEGORY_CONDITION,
],
)
for reason, model in FILTER_STAT_KEYS_TO_VALUES.items()
}
outcomes_partial_query_settings = {
TSDBModel.organization_total_received: SnubaModelQuerySettings(
Dataset.Outcomes,
"org_id",
"quantity",
[
["outcome", "IN", TOTAL_RECEIVED_OUTCOMES],
OUTCOMES_CATEGORY_CONDITION,
],
),
TSDBModel.organization_total_rejected: SnubaModelQuerySettings(
Dataset.Outcomes,
"org_id",
"quantity",
[["outcome", "=", outcomes.Outcome.RATE_LIMITED], OUTCOMES_CATEGORY_CONDITION],
),
TSDBModel.organization_total_blacklisted: SnubaModelQuerySettings(
Dataset.Outcomes,
"org_id",
"quantity",
[["outcome", "=", outcomes.Outcome.FILTERED], OUTCOMES_CATEGORY_CONDITION],
),
TSDBModel.project_total_received: SnubaModelQuerySettings(
Dataset.Outcomes,
"project_id",
"quantity",
[["outcome", "IN", TOTAL_RECEIVED_OUTCOMES], OUTCOMES_CATEGORY_CONDITION],
),
TSDBModel.project_total_rejected: SnubaModelQuerySettings(
Dataset.Outcomes,
"project_id",
"quantity",
[["outcome", "=", outcomes.Outcome.RATE_LIMITED], OUTCOMES_CATEGORY_CONDITION],
),
TSDBModel.project_total_blacklisted: SnubaModelQuerySettings(
Dataset.Outcomes,
"project_id",
"quantity",
[["outcome", "=", outcomes.Outcome.FILTERED], OUTCOMES_CATEGORY_CONDITION],
),
TSDBModel.key_total_received: SnubaModelQuerySettings(
Dataset.Outcomes,
"key_id",
"quantity",
[["outcome", "IN", TOTAL_RECEIVED_OUTCOMES], OUTCOMES_CATEGORY_CONDITION],
),
TSDBModel.key_total_rejected: SnubaModelQuerySettings(
Dataset.Outcomes,
"key_id",
"quantity",
[["outcome", "=", outcomes.Outcome.RATE_LIMITED], OUTCOMES_CATEGORY_CONDITION],
),
TSDBModel.key_total_blacklisted: SnubaModelQuerySettings(
Dataset.Outcomes,
"key_id",
"quantity",
[["outcome", "=", outcomes.Outcome.FILTERED], OUTCOMES_CATEGORY_CONDITION],
),
}
# ``non_outcomes_query_settings`` are all the query settings for non outcomes based TSDB models.
# Single tenant reads Snuba for these models, and writes to DummyTSDB. It reads and writes to Redis for all the
# other models.
# these query settings should use SnQL style parameters instead of the legacy format
non_outcomes_snql_query_settings = {
TSDBModel.project: SnubaModelQuerySettings(Dataset.Events, "project_id", None, []),
TSDBModel.group: SnubaModelQuerySettings(Dataset.Events, "group_id", None, []),
TSDBModel.release: SnubaModelQuerySettings(Dataset.Events, "release", None, []),
TSDBModel.users_affected_by_group: SnubaModelQuerySettings(
Dataset.Events, "group_id", "tags[sentry:user]", []
),
TSDBModel.users_affected_by_project: SnubaModelQuerySettings(
Dataset.Events, "project_id", "user", []
),
TSDBModel.frequent_environments_by_group: SnubaModelQuerySettings(
Dataset.Events, "group_id", "environment", []
),
TSDBModel.frequent_releases_by_group: SnubaModelQuerySettings(
Dataset.Events, "group_id", "release", []
),
TSDBModel.frequent_issues_by_project: SnubaModelQuerySettings(
Dataset.Events, "project_id", "group_id", []
),
TSDBModel.group_generic: SnubaModelQuerySettings(
Dataset.IssuePlatform,
"group_id",
None,
[],
None,
),
TSDBModel.users_affected_by_generic_group: SnubaModelQuerySettings(
Dataset.IssuePlatform,
"group_id",
"tags[sentry:user]",
[],
None,
),
}
# ``model_query_settings`` is a translation of TSDB models into required settings for querying snuba
model_query_settings = dict(
itertools.chain(
project_filter_model_query_settings.items(),
outcomes_partial_query_settings.items(),
non_outcomes_snql_query_settings.items(),
)
)
def __init__(self, **options):
super().__init__(**options)
def __manual_group_on_time_aggregation(self, rollup, time_column_alias) -> list[Any]:
"""
Explicitly builds an aggregation expression in-place of using a `TimeSeriesProcessor` on the snuba entity.
Older tables and queries that target that table had syntactic sugar on the `time` column and would apply
additional processing to re-write the query. For entities/models that don't have that special processing,
we need to manually insert the equivalent query to get the same result.
"""
def rollup_agg(rollup_granularity, alias):
if rollup_granularity == 60:
return ["toUnixTimestamp", [["toStartOfMinute", "timestamp"]], alias]
elif rollup_granularity == 3600:
return ["toUnixTimestamp", [["toStartOfHour", "timestamp"]], alias]
elif rollup_granularity == 3600 * 24:
return [
"toUnixTimestamp",
[["toDateTime", [["toDate", "timestamp"]]]],
time_column_alias,
]
else:
return None
# if we don't have an explicit function mapped to this rollup, we have to calculate it on the fly
# multiply(intDiv(toUInt32(toUnixTimestamp(timestamp)), granularity)))
synthetic_rollup = [
"multiply",
[["intDiv", [["toUInt32", [["toUnixTimestamp", "timestamp"]]], rollup]], rollup],
time_column_alias,
]
known_rollups = rollup_agg(rollup, time_column_alias)
return known_rollups if known_rollups else synthetic_rollup
def get_data(
self,
model,
keys,
start,
end,
rollup=None,
environment_ids=None,
aggregation="count()",
group_on_model=True,
group_on_time=False,
conditions=None,
use_cache=False,
jitter_value=None,
tenant_ids: dict[str, str | int] | None = None,
referrer_suffix: str | None = None,
):
if model in self.non_outcomes_snql_query_settings:
# no way around having to explicitly map legacy condition format to SnQL since this function
# is used everywhere that expects `conditions` to be legacy format
parsed_conditions = []
for cond in conditions or ():
if not is_condition(cond):
or_conditions = []
for or_cond in cond:
or_conditions.append(parse_condition(or_cond))
if len(or_conditions) > 1:
parsed_conditions.append(Or(or_conditions))
else:
parsed_conditions.extend(or_conditions)
else:
parsed_conditions.append(parse_condition(cond))
return self.__get_data_snql(
model,
keys,
start,
end,
rollup,
environment_ids,
"count" if aggregation == "count()" else aggregation,
group_on_model,
group_on_time,
parsed_conditions,
use_cache,
jitter_value,
manual_group_on_time=(
model in (TSDBModel.group_generic, TSDBModel.users_affected_by_generic_group)
),
is_grouprelease=(model == TSDBModel.frequent_releases_by_group),
tenant_ids=tenant_ids,
referrer_suffix=referrer_suffix,
)
else:
return self.__get_data_legacy(
model,
keys,
start,
end,
rollup,
environment_ids,
aggregation,
group_on_model,
group_on_time,
conditions,
use_cache,
jitter_value,
tenant_ids,
referrer_suffix,
)
def __get_data_snql(
self,
model: TSDBModel,
keys: Sequence | Set | Mapping,
start: datetime,
end: datetime | None,
rollup: int | None = None,
environment_ids: Sequence[int] | None = None,
aggregation: str = "count",
group_on_model: bool = True,
group_on_time: bool = False,
conditions: ConditionGroup | None = None,
use_cache: bool = False,
jitter_value: int | None = None,
manual_group_on_time: bool = False,
is_grouprelease: bool = False,
tenant_ids: dict[str, str | int] | None = None,
referrer_suffix: str | None = None,
):
"""
Similar to __get_data_legacy but uses the SnQL format. For future additions, prefer using this impl over
the legacy format.
"""
model_query_settings = self.model_query_settings.get(model)
if model_query_settings is None:
raise Exception(f"Unsupported TSDBModel: {model.name}")
model_group = model_query_settings.groupby
model_aggregate = model_query_settings.aggregate
model_dataset = model_query_settings.dataset
columns = (model_query_settings.groupby, model_query_settings.aggregate)
keys_map_tmp = dict(zip(columns, self.flatten_keys(keys)))
keys_map = {k: v for k, v in keys_map_tmp.items() if k is not None and v is not None}
if environment_ids is not None:
keys_map["environment"] = environment_ids
# For historical compatibility with bucket-counted TSDB implementations
# we grab the original bucketed series and add the rollup time to the
# timestamp of the last bucket to get the end time.
rollup, series = self.get_optimal_rollup_series(start, end, rollup)
# If jitter_value is provided then we use it to offset the buckets we round start/end to by
# up to `rollup` seconds.
series = self._add_jitter_to_series(series, start, rollup, jitter_value)
groupby = []
if group_on_model and model_group is not None:
groupby.append(model_group)
if group_on_time:
groupby.append("time")
if aggregation == "count" and model_aggregate is not None:
# Special case, because count has different semantics, we change:
# `COUNT(model_aggregate)` to `COUNT() GROUP BY model_aggregate`
groupby.append(model_aggregate)
model_aggregate = None
aggregated_as = "aggregate"
if aggregation == "upsampled_count":
aggregations: list[SelectableExpression] = [
get_upsampled_count_snql_with_alias(aggregated_as)
]
else:
aggregations = [
Function(
function=aggregation,
parameters=[Column(model_aggregate)] if model_aggregate else [],
alias=aggregated_as,
)
]
if group_on_time and manual_group_on_time:
aggregations.append(manual_group_on_time_aggregation(rollup, "time"))
if keys:
start = to_datetime(series[0])
end = to_datetime(series[-1] + rollup)
limit = min(LIMIT, int(len(keys) * ((end - start).total_seconds() / rollup)))
# build up order by
orderby: list[OrderBy] = []
if group_on_time:
orderby.append(OrderBy(Column("time"), Direction.DESC))
if group_on_model and model_group is not None:
orderby.append(OrderBy(Column(model_group), Direction.ASC))
# build up where conditions
conditions = list(conditions) if conditions is not None else []
if model_query_settings.conditions is not None:
conditions += model_query_settings.conditions
project_ids = infer_project_ids_from_related_models(keys_map)
keys_map["project_id"] = project_ids
forward, reverse = get_snuba_translators(keys_map, is_grouprelease)
# resolve filter_key values to the right values environment.id -> environment.name, etc.
mapped_filter_conditions = []
for col, f_keys in forward(deepcopy(keys_map)).items():
if f_keys:
if len(f_keys) == 1 and None in f_keys:
mapped_filter_conditions.append(Condition(Column(col), Op.IS_NULL))
else:
mapped_filter_conditions.append(Condition(Column(col), Op.IN, f_keys))
where_conds = conditions + mapped_filter_conditions
if manual_group_on_time:
where_conds += [
Condition(Column("timestamp"), Op.GTE, start),
Condition(Column("timestamp"), Op.LT, end),
]
else:
time_column = get_required_time_column(model_dataset.value)
if time_column:
where_conds += [
Condition(Column(time_column), Op.GTE, start),
Condition(Column(time_column), Op.LT, end),
]
snql_request = Request(
dataset=model_dataset.value,
app_id="tsdb.get_data",
query=Query(
match=Entity(model_dataset.value),
select=list(
itertools.chain((model_query_settings.selected_columns or []), aggregations)
),
where=where_conds,
groupby=[Column(g) for g in groupby] if groupby else None,
orderby=orderby,
granularity=Granularity(rollup),
limit=Limit(limit),
),
tenant_ids=tenant_ids or dict(),
)
referrer = f"tsdb-modelid:{model.value}"
if referrer_suffix:
referrer += f".{referrer_suffix}"
query_result = raw_snql_query(snql_request, referrer, use_cache=use_cache)
if manual_group_on_time:
translated_results = {"data": query_result["data"]}
else:
translated_results = {"data": [reverse(d) for d in query_result["data"]]}
result = nest_groups(translated_results["data"], groupby, [aggregated_as])
else:
# don't bother querying snuba since we probably won't have the proper filter conditions to return
# reasonable data (invalid query)
result = {}
if group_on_time:
keys_map["time"] = series
self.zerofill(result, groupby, keys_map)
self.trim(result, groupby, keys)
if group_on_time and manual_group_on_time:
self.unnest(result, aggregated_as)
return result
else:
return result
def __get_data_legacy(
self,
model,
keys,
start,
end,
rollup=None,
environment_ids=None,
aggregation="count()",
group_on_model=True,
group_on_time=False,
conditions=None,
use_cache=False,
jitter_value=None,
tenant_ids=None,
referrer_suffix=None,
):
"""
Normalizes all the TSDB parameters and sends a query to snuba.
`group_on_time`: whether to add a GROUP BY clause on the 'time' field.
`group_on_model`: whether to add a GROUP BY clause on the primary model.
"""
# XXX: to counteract the hack in project_key_stats.py
if model in [
TSDBModel.key_total_received,
TSDBModel.key_total_blacklisted,
TSDBModel.key_total_rejected,
]:
keys = list(set(map(lambda x: int(x), keys)))
model_requires_manual_group_on_time = model in (
TSDBModel.group_generic,
TSDBModel.users_affected_by_generic_group,
)
group_on_time_column_alias = "grouped_time"
model_query_settings = self.model_query_settings.get(model)
if model_query_settings is None:
raise Exception(f"Unsupported TSDBModel: {model.name}")
model_group = model_query_settings.groupby
model_aggregate = model_query_settings.aggregate
# 10s is the only rollup under an hour that we support
if rollup == 10 and model_query_settings.dataset == Dataset.Outcomes:
model_dataset = Dataset.OutcomesRaw
else:
model_dataset = model_query_settings.dataset
groupby = []
if group_on_model and model_group is not None:
groupby.append(model_group)
if group_on_time:
if not model_requires_manual_group_on_time:
groupby.append("time")
else:
groupby.append(group_on_time_column_alias)
if aggregation == "count()" and model_aggregate is not None:
# Special case, because count has different semantics, we change:
# `COUNT(model_aggregate)` to `COUNT() GROUP BY model_aggregate`
groupby.append(model_aggregate)
model_aggregate = None
columns = (model_query_settings.groupby, model_query_settings.aggregate)
keys_map = dict(zip(columns, self.flatten_keys(keys)))
keys_map = {k: v for k, v in keys_map.items() if k is not None and v is not None}
if environment_ids is not None:
keys_map["environment"] = environment_ids
aggregated_as = "aggregate"
aggregations = [[aggregation, model_aggregate, aggregated_as]]
# For historical compatibility with bucket-counted TSDB implementations
# we grab the original bucketed series and add the rollup time to the
# timestamp of the last bucket to get the end time.
rollup, series = self.get_optimal_rollup_series(start, end, rollup)
if group_on_time and model_requires_manual_group_on_time:
aggregations.append(
self.__manual_group_on_time_aggregation(rollup, group_on_time_column_alias)
)
# If jitter_value is provided then we use it to offset the buckets we round start/end to by
# up to `rollup` seconds.
series = self._add_jitter_to_series(series, start, rollup, jitter_value)
start = to_datetime(series[0])
end = to_datetime(series[-1] + rollup)
limit = min(LIMIT, int(len(keys) * ((end - start).total_seconds() / rollup)))
conditions = conditions if conditions is not None else []
if model_query_settings.conditions is not None:
conditions += deepcopy(model_query_settings.conditions)
# copy because we modify the conditions in snuba.query
orderby = []
if group_on_time:
if not model_requires_manual_group_on_time:
orderby.append("-time")
else:
orderby.append(f"-{group_on_time_column_alias}")
if group_on_model and model_group is not None:
orderby.append(model_group)
if keys:
referrer = f"tsdb-modelid:{model.value}"
if referrer_suffix:
referrer += f".{referrer_suffix}"
query_func_without_selected_columns = functools.partial(
snuba.query,
dataset=model_dataset,
start=start,
end=end,
groupby=groupby,
conditions=conditions,
filter_keys=keys_map,
aggregations=aggregations,
rollup=rollup,
limit=limit,
orderby=orderby,
referrer=referrer,
is_grouprelease=(model == TSDBModel.frequent_releases_by_group),
use_cache=use_cache,
tenant_ids=tenant_ids or dict(),
)
if model_query_settings.selected_columns:
result = query_func_without_selected_columns(
selected_columns=model_query_settings.selected_columns
)
self.unnest(result, aggregated_as)
else:
result = query_func_without_selected_columns()
else:
result = {}
if group_on_time:
if not model_requires_manual_group_on_time:
keys_map["time"] = series
else:
keys_map[group_on_time_column_alias] = series
self.zerofill(result, groupby, keys_map)
self.trim(result, groupby, keys)
if group_on_time and model_requires_manual_group_on_time:
# unroll aggregated data
self.unnest(result, aggregated_as)
return result
else:
return result
def zerofill(self, result, groups, flat_keys):
"""
Fills in missing keys in the nested result with zeroes.
`result` is the nested result
`groups` is the order in which the result is nested, eg: ['project', 'time']
`flat_keys` is a map from groups to lists of required keys for that group.
eg: {'project': [1,2]}
"""
if len(groups) > 0:
group, subgroups = groups[0], groups[1:]
# Zerofill missing keys
for k in flat_keys[group]:
if k not in result:
result[k] = 0 if len(groups) == 1 else {}
if subgroups:
for v in result.values():
self.zerofill(v, subgroups, flat_keys)
def trim(self, result, groups, keys):
"""
Similar to zerofill, but removes keys that should not exist.
Uses the non-flattened version of keys, so that different sets
of keys can exist in different branches at the same nesting level.
"""
if len(groups) > 0:
group, subgroups = groups[0], groups[1:]
if isinstance(result, dict):
for rk in list(result.keys()):
if group == "time": # Skip over time group
self.trim(result[rk], subgroups, keys)
elif rk in keys:
if isinstance(keys, dict):
self.trim(result[rk], subgroups, keys[rk])
else:
del result[rk]
def unnest(self, result, aggregated_as):
"""
Unnests the aggregated value in results and places it one level higher to conform to the
proper result format
convert:
{
"groupby[0]:value1" : {
"groupby[1]:value1" : {
"groupby[2]:value1" : {
"groupby[0]": groupby[0]:value1
"groupby[1]": groupby[1]:value1
"aggregation_as": aggregated_value
}
}
},
},
to:
{
"groupby[0]:value1": {
"groupby[1]:value1" : {
"groupby[2]:value1" : aggregated_value
}
},
}, ...
"""
from collections.abc import MutableMapping
if isinstance(result, MutableMapping):
for key, val in result.items():
if isinstance(val, MutableMapping):
if val.get(aggregated_as):
result[key] = val.get(aggregated_as)
else:
self.unnest(val, aggregated_as)
def get_aggregate_function(self, model) -> str:
model_query_settings = self.model_query_settings.get(model)
assert model_query_settings is not None, f"Unsupported TSDBModel: {model.name}"
if model_query_settings.dataset == Dataset.Outcomes:
aggregate_function = "sum"
else:
aggregate_function = "count()"
return aggregate_function
def get_sums_data(
self,
model: TSDBModel,
keys: Sequence[TSDBKey],
start: datetime,
end: datetime,
rollup: int | None = None,
environment_ids: Sequence[int] | None = None,
conditions=None,
use_cache: bool = False,
jitter_value: int | None = None,
tenant_ids: dict[str, str | int] | None = None,
referrer_suffix: str | None = None,
group_on_time: bool = True,
project_ids: Sequence[int] | None = None,
) -> Mapping[TSDBKey, int]:
aggregation = self.get_aggregate_function(model)
if self._should_use_upsampled_aggregation(model, project_ids):
aggregation = "upsampled_count"
result: Mapping[TSDBKey, int] = self.get_data(
model,
keys,
start,
end,
rollup,
environment_ids,
aggregation=aggregation,
group_on_time=group_on_time,
conditions=conditions,
use_cache=use_cache,
jitter_value=jitter_value,
tenant_ids=tenant_ids,
referrer_suffix=referrer_suffix,
)
return result
def _should_use_upsampled_aggregation(
self, model: TSDBModel, project_ids: Sequence[int] | None
) -> bool:
"""Check if we should use upsampled aggregation based on model and project allowlist."""
# Only apply to error models
error_model = get_issue_tsdb_group_model(GroupCategory.ERROR)
if model != error_model:
return False
# Check if any projects are in upsampling allowlist
if not project_ids:
return False
try:
return are_any_projects_error_upsampled(list(project_ids))
except Exception:
return False
def get_range(
self,
model: TSDBModel,
keys: Sequence[TSDBKey],
start: datetime,
end: datetime,
rollup: int | None = None,
environment_ids: Sequence[int] | None = None,
conditions=None,
use_cache: bool = False,
jitter_value: int | None = None,
tenant_ids: dict[str, str | int] | None = None,
referrer_suffix: str | None = None,
group_on_time: bool = True,
aggregation_override: str | None = None,
project_ids: Sequence[int] | None = None,
) -> dict[TSDBKey, list[tuple[int, int]]]:
if aggregation_override:
aggregation = aggregation_override
else:
aggregation = self.get_aggregate_function(model)
if self._should_use_upsampled_aggregation(model, project_ids):
aggregation = "upsampled_count"
result = self.get_data(
model,
keys,
start,
end,
rollup,
environment_ids,
aggregation=aggregation,
group_on_time=True,
conditions=conditions,
use_cache=use_cache,
jitter_value=jitter_value,
tenant_ids=tenant_ids,
referrer_suffix=referrer_suffix,
)
# convert
# {group:{timestamp:count, ...}}
# into
# {group: [(timestamp, count), ...]}
return {k: sorted(result[k].items()) for k in result}
def get_distinct_counts_series(
self,
model,
keys: Sequence[TSDBKey],
start,
end=None,
rollup=None,
environment_id=None,
tenant_ids=None,
project_ids: Sequence[int] | None = None,
):
result = self.get_data(
model,
keys,
start,
end,
rollup,
[environment_id] if environment_id is not None else None,
aggregation="uniq",
group_on_time=True,
tenant_ids=tenant_ids,
)
# convert
# {group:{timestamp:count, ...}}
# into
# {group: [(timestamp, count), ...]}
return {k: sorted(result[k].items()) for k in result}
def get_distinct_counts_totals(
self,
model,
keys: Sequence[TSDBKey],
start,
end=None,
rollup=None,
environment_id=None,
use_cache=False,
jitter_value=None,
tenant_ids=None,
referrer_suffix=None,
conditions=None,
group_on_time: bool = False,
project_ids: Sequence[int] | None = None,
) -> Mapping[TSDBKey, int]:
return self.get_data(
model,
keys,
start,
end,
rollup,
[environment_id] if environment_id is not None else None,
aggregation="uniq",
use_cache=use_cache,
jitter_value=jitter_value,
tenant_ids=tenant_ids,
referrer_suffix=referrer_suffix,
conditions=conditions,
group_on_time=group_on_time,
)
def get_frequency_series(
self,
model: TSDBModel,
items: Mapping[TSDBKey, Sequence[TSDBItem]],
start: datetime,
end: datetime | None = None,
rollup: int | None = None,
environment_id: int | None = None,
tenant_ids: dict[str, str | int] | None = None,
project_ids: Sequence[int] | None = None,
) -> dict[TSDBKey, list[tuple[float, dict[TSDBItem, float]]]]:
result = self.get_data(
model,
items,
start,
end,
rollup,
[environment_id] if environment_id is not None else None,
aggregation="count()",
group_on_time=True,
tenant_ids=tenant_ids,
)
# convert
# {group:{timestamp:{agg:count}}}
# into
# {group: [(timestamp, {agg: count, ...}), ...]}
return {k: sorted(result[k].items()) for k in result}
def flatten_keys(self, items: Mapping | Sequence | Set) -> tuple[list, Sequence | None]:
"""
Returns a normalized set of keys based on the various formats accepted
by TSDB methods. The input is either just a plain list of keys for the
top level or a `{level1_key: [level2_key, ...]}` dictionary->list map.
The output is a 2-tuple of ([level_1_keys], [all_level_2_keys])
"""
if isinstance(items, Mapping):
return (
list(items.keys()),
list(set.union(*(set(v) for v in items.values())) if items else []),
)
elif isinstance(items, (Sequence, Set)):
return (list(items), None)
else:
raise ValueError("Unsupported type: %s" % (type(items)))
| SnubaTSDB |
python | getsentry__sentry | src/sentry/integrations/discord/message_builder/base/component/select_menu.py | {
"start": 661,
"end": 1333
} | class ____:
"""
An option for a DiscordSelectMenu.
"""
def __init__(
self, label: str, value: str, description: str | None = None, default: bool = False
) -> None:
self.label = label
self.value = value
self.description = description
self.default = default
def build(self) -> DiscordSelectMenuOptionDict:
option = DiscordSelectMenuOptionDict(label=self.label, value=self.value)
if self.description is not None:
option["description"] = self.description
if self.default is not None:
option["default"] = self.default
return option
| DiscordSelectMenuOption |
python | jazzband__django-simple-history | simple_history/tests/models.py | {
"start": 17932,
"end": 18012
} | class ____(TrackedAbstractBaseA, UntrackedConcreteBase):
pass
| InheritTracking1 |
python | fluentpython__example-code | 20-descriptor/bulkfood/model_v5_check.py | {
"start": 13,
"end": 509
} | class ____:
__counter = 0
def __init__(self):
cls = self.__class__
prefix = cls.__name__
index = cls.__counter
self.storage_name = '_{}#{}'.format(prefix, index)
cls.__counter += 1
def __get__(self, instance, owner):
if instance is None:
return self
else:
return getattr(instance, self.storage_name)
def __set__(self, instance, value):
setattr(instance, self.storage_name, value)
| AutoStorage |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_default_date_format01.py | {
"start": 346,
"end": 2979
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("default_date_format01.xlsx")
def test_create_file_user_date_format(self):
"""Test write_datetime with explicit date format."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.set_column(0, 0, 12)
format1 = workbook.add_format({"num_format": "yyyy\\-mm\\-dd"})
date1 = datetime.strptime("2013-07-25", "%Y-%m-%d")
worksheet.write_datetime(0, 0, date1, format1)
workbook.close()
self.assertExcelEqual()
def test_create_file_default_date_format(self):
"""Test write_datetime with default date format."""
workbook = Workbook(
self.got_filename, {"default_date_format": "yyyy\\-mm\\-dd"}
)
worksheet = workbook.add_worksheet()
worksheet.set_column(0, 0, 12)
date1 = datetime.strptime("2013-07-25", "%Y-%m-%d")
worksheet.write_datetime(0, 0, date1)
workbook.close()
self.assertExcelEqual()
def test_create_file_default_date_format_write(self):
"""Test write_datetime with default date format."""
workbook = Workbook(
self.got_filename, {"default_date_format": "yyyy\\-mm\\-dd"}
)
worksheet = workbook.add_worksheet()
worksheet.set_column(0, 0, 12)
date1 = datetime.strptime("2013-07-25", "%Y-%m-%d")
worksheet.write("A1", date1)
workbook.close()
self.assertExcelEqual()
def test_create_file_default_date_format_write_row(self):
"""Test write_row with default date format."""
workbook = Workbook(
self.got_filename, {"default_date_format": "yyyy\\-mm\\-dd"}
)
worksheet = workbook.add_worksheet()
worksheet.set_column(0, 0, 12)
date1 = datetime.strptime("2013-07-25", "%Y-%m-%d")
worksheet.write_row("A1", [date1])
workbook.close()
self.assertExcelEqual()
def test_create_file_default_date_format_write_column(self):
"""Test write_column with default date format."""
workbook = Workbook(
self.got_filename, {"default_date_format": "yyyy\\-mm\\-dd"}
)
worksheet = workbook.add_worksheet()
worksheet.set_column(0, 0, 12)
date1 = datetime.strptime("2013-07-25", "%Y-%m-%d")
worksheet.write_column(0, 0, [date1])
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/datastore.py | {
"start": 23413,
"end": 25286
} | class ____(GoogleCloudBaseOperator):
"""
Deletes the long-running operation.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDatastoreDeleteOperationOperator`
.. seealso::
https://cloud.google.com/datastore/docs/reference/data/rest/v1/projects.operations/delete
:param name: the name of the operation resource.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"name",
"impersonation_chain",
)
def __init__(
self,
*,
name: str,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.name = name
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> None:
hook = DatastoreHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
hook.delete_operation(name=self.name)
| CloudDatastoreDeleteOperationOperator |
python | pydata__xarray | xarray/tests/test_duck_array_ops.py | {
"start": 2132,
"end": 7852
} | class ____:
@pytest.fixture(autouse=True)
def setUp(self):
self.x = array(
[
[
[nan, nan, 2.0, nan],
[nan, 5.0, 6.0, nan],
[8.0, 9.0, 10.0, nan],
],
[
[nan, 13.0, 14.0, 15.0],
[nan, 17.0, 18.0, nan],
[nan, 21.0, nan, nan],
],
]
)
def test_first(self):
expected_results = [
array([[nan, 13, 2, 15], [nan, 5, 6, nan], [8, 9, 10, nan]]),
array([[8, 5, 2, nan], [nan, 13, 14, 15]]),
array([[2, 5, 8], [13, 17, 21]]),
]
for axis, expected in zip(
[0, 1, 2, -3, -2, -1], 2 * expected_results, strict=True
):
actual = first(self.x, axis)
assert_array_equal(expected, actual)
expected = self.x[0]
actual = first(self.x, axis=0, skipna=False)
assert_array_equal(expected, actual)
expected = self.x[..., 0]
actual = first(self.x, axis=-1, skipna=False)
assert_array_equal(expected, actual)
with pytest.raises(IndexError, match=r"out of bounds"):
first(self.x, 3)
def test_last(self):
expected_results = [
array([[nan, 13, 14, 15], [nan, 17, 18, nan], [8, 21, 10, nan]]),
array([[8, 9, 10, nan], [nan, 21, 18, 15]]),
array([[2, 6, 10], [15, 18, 21]]),
]
for axis, expected in zip(
[0, 1, 2, -3, -2, -1], 2 * expected_results, strict=True
):
actual = last(self.x, axis)
assert_array_equal(expected, actual)
expected = self.x[-1]
actual = last(self.x, axis=0, skipna=False)
assert_array_equal(expected, actual)
expected = self.x[..., -1]
actual = last(self.x, axis=-1, skipna=False)
assert_array_equal(expected, actual)
with pytest.raises(IndexError, match=r"out of bounds"):
last(self.x, 3)
def test_count(self):
assert 12 == count(self.x)
expected = array([[1, 2, 3], [3, 2, 1]])
assert_array_equal(expected, count(self.x, axis=-1))
assert 1 == count(np.datetime64("2000-01-01"))
def test_where_type_promotion(self):
result = where(np.array([True, False]), np.array([1, 2]), np.array(["a", "b"]))
assert_array_equal(result, np.array([1, "b"], dtype=object))
result = where([True, False], np.array([1, 2], np.float32), np.nan)
assert result.dtype == np.float32
assert_array_equal(result, np.array([1, np.nan], dtype=np.float32))
def test_where_extension_duck_array(self, categorical1, categorical2):
where_res = where(
np.array([True, False, True, False, False]),
PandasExtensionArray(categorical1),
PandasExtensionArray(categorical2),
)
assert isinstance(where_res, PandasExtensionArray)
assert (
where_res == pd.Categorical(["cat1", "cat1", "cat2", "cat3", "cat1"])
).all()
def test_concatenate_extension_duck_array(self, categorical1, categorical2):
concate_res = concatenate(
[PandasExtensionArray(categorical1), PandasExtensionArray(categorical2)]
)
assert isinstance(concate_res, PandasExtensionArray)
assert (
concate_res
== type(categorical1)._concat_same_type((categorical1, categorical2))
).all()
@requires_pyarrow
def test_extension_array_pyarrow_concatenate(self, arrow1, arrow2):
concatenated = concatenate(
(PandasExtensionArray(arrow1), PandasExtensionArray(arrow2))
)
assert concatenated[2].array[0]["x"] == 3
assert concatenated[3].array[0]["y"]
@requires_pyarrow
def test_extension_array_copy_arrow_type(self):
arr = pd.array([pd.NA, 1, 2], dtype="int64[pyarrow]")
# Relying on the `__getattr__` of `PandasExtensionArray` to do the deep copy
# recursively only fails for `int64[pyarrow]` and similar types so this
# test ensures that copying still works there.
assert isinstance(
copy.deepcopy(PandasExtensionArray(arr), memo=None).array, type(arr)
)
def test___getitem__extension_duck_array(self, categorical1):
extension_duck_array = PandasExtensionArray(categorical1)
assert (extension_duck_array[0:2] == categorical1[0:2]).all()
assert isinstance(extension_duck_array[0:2], PandasExtensionArray)
assert extension_duck_array[0] == categorical1[0]
assert isinstance(extension_duck_array[0], PandasExtensionArray)
mask = [True, False, True, False, True]
assert (extension_duck_array[mask] == categorical1[mask]).all()
def test__setitem__extension_duck_array(self, categorical1):
extension_duck_array = PandasExtensionArray(categorical1)
extension_duck_array[2] = "cat1" # already existing category
assert extension_duck_array[2] == "cat1"
with pytest.raises(TypeError, match="Cannot setitem on a Categorical"):
extension_duck_array[2] = "cat4" # new category
def test_stack_type_promotion(self):
result = stack([1, "b"])
assert_array_equal(result, np.array([1, "b"], dtype=object))
def test_concatenate_type_promotion(self):
result = concatenate([np.array([1]), np.array(["b"])])
assert_array_equal(result, np.array([1, "b"], dtype=object))
@pytest.mark.filterwarnings("error")
def test_all_nan_arrays(self):
assert np.isnan(mean([np.nan, np.nan]))
@requires_dask
| TestOps |
python | urllib3__urllib3 | test/test_response.py | {
"start": 56368,
"end": 56591
} | class ____(MockChunkedEncodingResponse):
BAD_LENGTH_LINE = "ZZZ\r\n"
def _encode_chunk(self, chunk: bytes) -> bytes:
return f"{self.BAD_LENGTH_LINE}{chunk.decode()}\r\n".encode()
| MockChunkedInvalidChunkLength |
python | sympy__sympy | sympy/polys/polyoptions.py | {
"start": 18133,
"end": 18337
} | class ____(BooleanOption, Flag, metaclass=OptionType):
"""``auto`` option to polynomial manipulation functions. """
option = 'frac'
@classmethod
def default(cls):
return False
| Frac |
python | facelessuser__pymdown-extensions | tests/test_extensions/test_slugs.py | {
"start": 2808,
"end": 3443
} | class ____(util.MdCase):
"""Test encoded GitHub Flavored Markdown style slugs."""
extension = ['markdown.extensions.toc']
extension_configs = {
'markdown.extensions.toc': {
"slugify": slugs.slugify(case="lower-ascii", percent_encode=True)
}
}
def test_slug(self):
"""Test the slug output."""
self.check_markdown(
r'# Testing GFM unicode-slugs_headers ±♠Ωℑ with encoding',
r'<h1 id="testing-gfm-unicode-slugs_headers-%CE%A9%E2%84%91-with-encoding">'
r'Testing GFM unicode-slugs_headers ±♠Ωℑ with encoding</h1>'
)
| TestGFMEncoded |
python | RaRe-Technologies__gensim | gensim/test/test_similarities.py | {
"start": 22952,
"end": 26835
} | class ____(unittest.TestCase):
def setUp(self):
try:
import annoy # noqa:F401
except ImportError as e:
raise unittest.SkipTest("Annoy library is not available: %s" % e)
from gensim.similarities.annoy import AnnoyIndexer
self.indexer = AnnoyIndexer
def test_word2vec(self):
model = word2vec.Word2Vec(TEXTS, min_count=1)
index = self.indexer(model, 10)
self.assertVectorIsSimilarToItself(model.wv, index)
self.assertApproxNeighborsMatchExact(model.wv, model.wv, index)
self.assertIndexSaved(index)
self.assertLoadedIndexEqual(index, model)
def test_fast_text(self):
class LeeReader:
def __init__(self, fn):
self.fn = fn
def __iter__(self):
with utils.open(self.fn, 'r', encoding="latin_1") as infile:
for line in infile:
yield line.lower().strip().split()
model = FastText(LeeReader(datapath('lee.cor')), bucket=5000)
index = self.indexer(model, 10)
self.assertVectorIsSimilarToItself(model.wv, index)
self.assertApproxNeighborsMatchExact(model.wv, model.wv, index)
self.assertIndexSaved(index)
self.assertLoadedIndexEqual(index, model)
def test_annoy_indexing_of_keyed_vectors(self):
from gensim.similarities.annoy import AnnoyIndexer
keyVectors_file = datapath('lee_fasttext.vec')
model = KeyedVectors.load_word2vec_format(keyVectors_file)
index = AnnoyIndexer(model, 10)
self.assertEqual(index.num_trees, 10)
self.assertVectorIsSimilarToItself(model, index)
self.assertApproxNeighborsMatchExact(model, model, index)
def test_load_missing_raises_error(self):
from gensim.similarities.annoy import AnnoyIndexer
test_index = AnnoyIndexer()
self.assertRaises(IOError, test_index.load, fname='test-index')
def assertVectorIsSimilarToItself(self, wv, index):
vector = wv.get_normed_vectors()[0]
label = wv.index_to_key[0]
approx_neighbors = index.most_similar(vector, 1)
word, similarity = approx_neighbors[0]
self.assertEqual(word, label)
self.assertAlmostEqual(similarity, 1.0, places=2)
def assertApproxNeighborsMatchExact(self, model, wv, index):
vector = wv.get_normed_vectors()[0]
approx_neighbors = model.most_similar([vector], topn=5, indexer=index)
exact_neighbors = model.most_similar(positive=[vector], topn=5)
approx_words = [neighbor[0] for neighbor in approx_neighbors]
exact_words = [neighbor[0] for neighbor in exact_neighbors]
self.assertEqual(approx_words, exact_words)
def assertAllSimilaritiesDisableIndexer(self, model, wv, index):
vector = wv.get_normed_vectors()[0]
approx_similarities = model.most_similar([vector], topn=None, indexer=index)
exact_similarities = model.most_similar(positive=[vector], topn=None)
self.assertEqual(approx_similarities, exact_similarities)
self.assertEqual(len(approx_similarities), len(wv.vectors))
def assertIndexSaved(self, index):
fname = get_tmpfile('gensim_similarities.tst.pkl')
index.save(fname)
self.assertTrue(os.path.exists(fname))
self.assertTrue(os.path.exists(fname + '.d'))
def assertLoadedIndexEqual(self, index, model):
from gensim.similarities.annoy import AnnoyIndexer
fname = get_tmpfile('gensim_similarities.tst.pkl')
index.save(fname)
index2 = AnnoyIndexer()
index2.load(fname)
index2.model = model
self.assertEqual(index.index.f, index2.index.f)
self.assertEqual(index.labels, index2.labels)
self.assertEqual(index.num_trees, index2.num_trees)
| TestWord2VecAnnoyIndexer |
python | google__python-fire | fire/core.py | {
"start": 7053,
"end": 36731
} | class ____(SystemExit): # pylint: disable=g-bad-exception-name
"""An exception raised by Fire to the client in the case of a FireError.
The trace of the Fire program is available on the `trace` property.
This exception inherits from SystemExit, so clients may explicitly catch it
with `except SystemExit` or `except FireExit`. If not caught, this exception
will cause the client program to exit without a stacktrace.
"""
def __init__(self, code, component_trace):
"""Constructs a FireExit exception.
Args:
code: (int) Exit code for the Fire CLI.
component_trace: (FireTrace) The trace for the Fire command.
"""
super().__init__(code)
self.trace = component_trace
def _IsHelpShortcut(component_trace, remaining_args):
"""Determines if the user is trying to access help without '--' separator.
For example, mycmd.py --help instead of mycmd.py -- --help.
Args:
component_trace: (FireTrace) The trace for the Fire command.
remaining_args: List of remaining args that haven't been consumed yet.
Returns:
True if help is requested, False otherwise.
"""
show_help = False
if remaining_args:
target = remaining_args[0]
if target in ('-h', '--help'):
# Check if --help would be consumed as a keyword argument, or is a member.
component = component_trace.GetResult()
if inspect.isclass(component) or inspect.isroutine(component):
fn_spec = inspectutils.GetFullArgSpec(component)
_, remaining_kwargs, _ = _ParseKeywordArgs(remaining_args, fn_spec)
show_help = target in remaining_kwargs
else:
members = dict(inspect.getmembers(component))
show_help = target not in members
if show_help:
component_trace.show_help = True
command = f'{component_trace.GetCommand()} -- --help'
print(f'INFO: Showing help with the command {shlex.quote(command)}.\n',
file=sys.stderr)
return show_help
def _PrintResult(component_trace, verbose=False, serialize=None):
"""Prints the result of the Fire call to stdout in a human readable way."""
# TODO(dbieber): Design human readable deserializable serialization method
# and move serialization to its own module.
result = component_trace.GetResult()
# Allow users to modify the return value of the component and provide
# custom formatting.
if serialize:
if not callable(serialize):
raise FireError(
'The argument `serialize` must be empty or callable:', serialize)
result = serialize(result)
if value_types.HasCustomStr(result):
# If the object has a custom __str__ method, rather than one inherited from
# object, then we use that to serialize the object.
print(str(result))
return
if isinstance(result, (list, set, frozenset, types.GeneratorType)):
for i in result:
print(_OneLineResult(i))
elif inspect.isgeneratorfunction(result):
raise NotImplementedError
elif isinstance(result, dict) and value_types.IsSimpleGroup(result):
print(_DictAsString(result, verbose))
elif isinstance(result, tuple):
print(_OneLineResult(result))
elif isinstance(result, value_types.VALUE_TYPES):
if result is not None:
print(result)
else:
help_text = helptext.HelpText(
result, trace=component_trace, verbose=verbose)
output = [help_text]
Display(output, out=sys.stdout)
def _DisplayError(component_trace):
"""Prints the Fire trace and the error to stdout."""
result = component_trace.GetResult()
output = []
show_help = False
for help_flag in ('-h', '--help'):
if help_flag in component_trace.elements[-1].args:
show_help = True
if show_help:
command = f'{component_trace.GetCommand()} -- --help'
print(f'INFO: Showing help with the command {shlex.quote(command)}.\n',
file=sys.stderr)
help_text = helptext.HelpText(result, trace=component_trace,
verbose=component_trace.verbose)
output.append(help_text)
Display(output, out=sys.stderr)
else:
print(formatting.Error('ERROR: ')
+ component_trace.elements[-1].ErrorAsStr(),
file=sys.stderr)
error_text = helptext.UsageText(result, trace=component_trace,
verbose=component_trace.verbose)
print(error_text, file=sys.stderr)
def _DictAsString(result, verbose=False):
"""Returns a dict as a string.
Args:
result: The dict to convert to a string
verbose: Whether to include 'hidden' members, those keys starting with _.
Returns:
A string representing the dict
"""
# We need to do 2 iterations over the items in the result dict
# 1) Getting visible items and the longest key for output formatting
# 2) Actually construct the output lines
class_attrs = inspectutils.GetClassAttrsDict(result)
result_visible = {
key: value for key, value in result.items()
if completion.MemberVisible(result, key, value,
class_attrs=class_attrs, verbose=verbose)
}
if not result_visible:
return '{}'
longest_key = max(len(str(key)) for key in result_visible.keys())
format_string = f'{{key:{longest_key + 1}s}} {{value}}'
lines = []
for key, value in result.items():
if completion.MemberVisible(result, key, value, class_attrs=class_attrs,
verbose=verbose):
line = format_string.format(key=f'{key}:', value=_OneLineResult(value))
lines.append(line)
return '\n'.join(lines)
def _OneLineResult(result):
"""Returns result serialized to a single line string."""
# TODO(dbieber): Ensure line is fewer than eg 120 characters.
if isinstance(result, str):
return str(result).replace('\n', ' ')
# TODO(dbieber): Show a small amount of usage information about the function
# or module if it fits cleanly on the line.
if inspect.isfunction(result):
return f'<function {result.__name__}>'
if inspect.ismodule(result):
return f'<module {result.__name__}>'
try:
# Don't force conversion to ascii.
return json.dumps(result, ensure_ascii=False)
except (TypeError, ValueError):
return str(result).replace('\n', ' ')
def _Fire(component, args, parsed_flag_args, context, name=None):
"""Execute a Fire command on a target component using the args supplied.
Arguments that come after a final isolated '--' are treated as Flags, eg for
interactive mode or completion script generation.
Other arguments are consumed by the execution of the Fire command, eg in the
traversal of the members of the component, or in calling a function or
instantiating a class found during the traversal.
The steps performed by this method are:
1. Parse any Flag args (the args after the final --)
2. Start with component as the current component.
2a. If the current component is a class, instantiate it using args from args.
2b. If the component is a routine, call it using args from args.
2c. If the component is a sequence, index into it using an arg from
args.
2d. If possible, access a member from the component using an arg from args.
2e. If the component is a callable object, call it using args from args.
2f. Repeat 2a-2e until no args remain.
Note: Only the first applicable rule from 2a-2e is applied in each iteration.
After each iteration of step 2a-2e, the current component is updated to be the
result of the applied rule.
3a. Embed into ipython REPL if interactive mode is selected.
3b. Generate a completion script if that flag is provided.
In step 2, arguments will only ever be consumed up to a separator; a single
step will never consume arguments from both sides of a separator.
The separator defaults to a hyphen (-), and can be overwritten with the
--separator Fire argument.
Args:
component: The target component for Fire.
args: A list of args to consume in Firing on the component, usually from
the command line.
parsed_flag_args: The values of the flag args (e.g. --verbose, --separator)
that are part of every Fire CLI.
context: A dict with the local and global variables available at the call
to Fire.
name: Optional. The name of the command. Used in interactive mode and in
the tab completion script.
Returns:
FireTrace of components starting with component, tracing Fire's execution
path as it consumes args.
Raises:
ValueError: If there are arguments that cannot be consumed.
ValueError: If --completion is specified but no name available.
"""
verbose = parsed_flag_args.verbose
interactive = parsed_flag_args.interactive
separator = parsed_flag_args.separator
show_completion = parsed_flag_args.completion
show_help = parsed_flag_args.help
show_trace = parsed_flag_args.trace
# component can be a module, class, routine, object, etc.
if component is None:
component = context
initial_component = component
component_trace = trace.FireTrace(
initial_component=initial_component, name=name, separator=separator,
verbose=verbose, show_help=show_help, show_trace=show_trace)
instance = None
remaining_args = args
while True:
last_component = component
initial_args = remaining_args
if not remaining_args and (show_help or interactive or show_trace
or show_completion is not None):
# Don't initialize the final class or call the final function unless
# there's a separator after it, and instead process the current component.
break
if _IsHelpShortcut(component_trace, remaining_args):
remaining_args = []
break
saved_args = []
used_separator = False
if separator in remaining_args:
# For the current component, only use arguments up to the separator.
separator_index = remaining_args.index(separator)
saved_args = remaining_args[separator_index + 1:]
remaining_args = remaining_args[:separator_index]
used_separator = True
assert separator not in remaining_args
handled = False
candidate_errors = []
is_callable = inspect.isclass(component) or inspect.isroutine(component)
is_callable_object = callable(component) and not is_callable
is_sequence = isinstance(component, (list, tuple))
is_map = isinstance(component, dict) or inspectutils.IsNamedTuple(component)
if not handled and is_callable:
# The component is a class or a routine; we'll try to initialize it or
# call it.
is_class = inspect.isclass(component)
try:
component, remaining_args = _CallAndUpdateTrace(
component,
remaining_args,
component_trace,
treatment='class' if is_class else 'routine',
target=component.__name__)
handled = True
except FireError as error:
candidate_errors.append((error, initial_args))
if handled and last_component is initial_component:
# If the initial component is a class, keep an instance for use with -i.
instance = component
if not handled and is_sequence and remaining_args:
# The component is a tuple or list; we'll try to access a member.
arg = remaining_args[0]
try:
index = int(arg)
component = component[index]
handled = True
except (ValueError, IndexError):
error = FireError(
'Unable to index into component with argument:', arg)
candidate_errors.append((error, initial_args))
if handled:
remaining_args = remaining_args[1:]
filename = None
lineno = None
component_trace.AddAccessedProperty(
component, index, [arg], filename, lineno)
if not handled and is_map and remaining_args:
# The component is a dict or other key-value map; try to access a member.
target = remaining_args[0]
# Treat namedtuples as dicts when handling them as a map.
if inspectutils.IsNamedTuple(component):
component_dict = component._asdict()
else:
component_dict = component
if target in component_dict:
component = component_dict[target]
handled = True
elif target.replace('-', '_') in component_dict:
component = component_dict[target.replace('-', '_')]
handled = True
else:
# The target isn't present in the dict as a string key, but maybe it is
# a key as another type.
# TODO(dbieber): Consider alternatives for accessing non-string keys.
for key, value in (
component_dict.items()):
if target == str(key):
component = value
handled = True
break
if handled:
remaining_args = remaining_args[1:]
filename = None
lineno = None
component_trace.AddAccessedProperty(
component, target, [target], filename, lineno)
else:
error = FireError('Cannot find key:', target)
candidate_errors.append((error, initial_args))
if not handled and remaining_args:
# Object handler. We'll try to access a member of the component.
try:
target = remaining_args[0]
component, consumed_args, remaining_args = _GetMember(
component, remaining_args)
handled = True
filename, lineno = inspectutils.GetFileAndLine(component)
component_trace.AddAccessedProperty(
component, target, consumed_args, filename, lineno)
except FireError as error:
# Couldn't access member.
candidate_errors.append((error, initial_args))
if not handled and is_callable_object:
# The component is a callable object; we'll try to call it.
try:
component, remaining_args = _CallAndUpdateTrace(
component,
remaining_args,
component_trace,
treatment='callable')
handled = True
except FireError as error:
candidate_errors.append((error, initial_args))
if not handled and candidate_errors:
error, initial_args = candidate_errors[0]
component_trace.AddError(error, initial_args)
return component_trace
if used_separator:
# Add back in the arguments from after the separator.
if remaining_args:
remaining_args = remaining_args + [separator] + saved_args
elif (inspect.isclass(last_component)
or inspect.isroutine(last_component)):
remaining_args = saved_args
component_trace.AddSeparator()
elif component is not last_component:
remaining_args = [separator] + saved_args
else:
# It was an unnecessary separator.
remaining_args = saved_args
if component is last_component and remaining_args == initial_args:
# We're making no progress.
break
if remaining_args:
component_trace.AddError(
FireError('Could not consume arguments:', remaining_args),
initial_args)
return component_trace
if show_completion is not None:
if name is None:
raise ValueError('Cannot make completion script without command name')
script = CompletionScript(name, initial_component, shell=show_completion)
component_trace.AddCompletionScript(script)
if interactive:
variables = context.copy()
if name is not None:
variables[name] = initial_component
variables['component'] = initial_component
variables['result'] = component
variables['trace'] = component_trace
if instance is not None:
variables['self'] = instance
interact.Embed(variables, verbose)
component_trace.AddInteractiveMode()
return component_trace
def _GetMember(component, args):
"""Returns a subcomponent of component by consuming an arg from args.
Given a starting component and args, this function gets a member from that
component, consuming one arg in the process.
Args:
component: The component from which to get a member.
args: Args from which to consume in the search for the next component.
Returns:
component: The component that was found by consuming an arg.
consumed_args: The args that were consumed by getting this member.
remaining_args: The remaining args that haven't been consumed yet.
Raises:
FireError: If we cannot consume an argument to get a member.
"""
members = dir(component)
arg = args[0]
arg_names = [
arg,
arg.replace('-', '_'), # treat '-' as '_'.
]
for arg_name in arg_names:
if arg_name in members:
return getattr(component, arg_name), [arg], args[1:]
raise FireError('Could not consume arg:', arg)
def _CallAndUpdateTrace(component, args, component_trace, treatment='class',
target=None):
"""Call the component by consuming args from args, and update the FireTrace.
The component could be a class, a routine, or a callable object. This function
calls the component and adds the appropriate action to component_trace.
Args:
component: The component to call
args: Args for calling the component
component_trace: FireTrace object that contains action trace
treatment: Type of treatment used. Indicating whether we treat the component
as a class, a routine, or a callable.
target: Target in FireTrace element, default is None. If the value is None,
the component itself will be used as target.
Returns:
component: The object that is the result of the callable call.
remaining_args: The remaining args that haven't been consumed yet.
"""
if not target:
target = component
filename, lineno = inspectutils.GetFileAndLine(component)
metadata = decorators.GetMetadata(component)
fn = component.__call__ if treatment == 'callable' else component
parse = _MakeParseFn(fn, metadata)
(varargs, kwargs), consumed_args, remaining_args, capacity = parse(args)
# Call the function.
if inspectutils.IsCoroutineFunction(fn):
try:
loop = asyncio.get_running_loop()
except RuntimeError:
# No event loop running, create a new one
component = asyncio.run(fn(*varargs, **kwargs))
else:
# Event loop is already running
component = loop.run_until_complete(fn(*varargs, **kwargs))
else:
component = fn(*varargs, **kwargs)
if treatment == 'class':
action = trace.INSTANTIATED_CLASS
elif treatment == 'routine':
action = trace.CALLED_ROUTINE
else:
action = trace.CALLED_CALLABLE
component_trace.AddCalledComponent(
component, target, consumed_args, filename, lineno, capacity,
action=action)
return component, remaining_args
def _MakeParseFn(fn, metadata):
"""Creates a parse function for fn.
Args:
fn: The function or class to create the parse function for.
metadata: Additional metadata about the component the parse function is for.
Returns:
A parse function for fn. The parse function accepts a list of arguments
and returns (varargs, kwargs), remaining_args. The original function fn
can then be called with fn(*varargs, **kwargs). The remaining_args are
the leftover args from the arguments to the parse function.
"""
fn_spec = inspectutils.GetFullArgSpec(fn)
# Note: num_required_args is the number of positional arguments without
# default values. All of these arguments are required.
num_required_args = len(fn_spec.args) - len(fn_spec.defaults)
required_kwonly = set(fn_spec.kwonlyargs) - set(fn_spec.kwonlydefaults)
def _ParseFn(args):
"""Parses the list of `args` into (varargs, kwargs), remaining_args."""
kwargs, remaining_kwargs, remaining_args = _ParseKeywordArgs(args, fn_spec)
# Note: _ParseArgs modifies kwargs.
parsed_args, kwargs, remaining_args, capacity = _ParseArgs(
fn_spec.args, fn_spec.defaults, num_required_args, kwargs,
remaining_args, metadata)
if fn_spec.varargs or fn_spec.varkw:
# If we're allowed *varargs or **kwargs, there's always capacity.
capacity = True
extra_kw = set(kwargs) - set(fn_spec.kwonlyargs)
if fn_spec.varkw is None and extra_kw:
raise FireError('Unexpected kwargs present:', extra_kw)
missing_kwonly = set(required_kwonly) - set(kwargs)
if missing_kwonly:
raise FireError('Missing required flags:', missing_kwonly)
# If we accept *varargs, then use all remaining arguments for *varargs.
if fn_spec.varargs is not None:
varargs, remaining_args = remaining_args, []
else:
varargs = []
for index, value in enumerate(varargs):
varargs[index] = _ParseValue(value, None, None, metadata)
varargs = parsed_args + varargs
remaining_args += remaining_kwargs
consumed_args = args[:len(args) - len(remaining_args)]
return (varargs, kwargs), consumed_args, remaining_args, capacity
return _ParseFn
def _ParseArgs(fn_args, fn_defaults, num_required_args, kwargs,
remaining_args, metadata):
"""Parses the positional and named arguments from the available supplied args.
Modifies kwargs, removing args as they are used.
Args:
fn_args: A list of argument names that the target function accepts,
including positional and named arguments, but not the varargs or kwargs
names.
fn_defaults: A list of the default values in the function argspec.
num_required_args: The number of required arguments from the function's
argspec. This is the number of arguments without a default value.
kwargs: Dict with named command line arguments and their values.
remaining_args: The remaining command line arguments, which may still be
used as positional arguments.
metadata: Metadata about the function, typically from Fire decorators.
Returns:
parsed_args: A list of values to be used as positional arguments for calling
the target function.
kwargs: The input dict kwargs modified with the used kwargs removed.
remaining_args: A list of the supplied args that have not been used yet.
capacity: Whether the call could have taken args in place of defaults.
Raises:
FireError: If additional positional arguments are expected, but none are
available.
"""
accepts_positional_args = metadata.get(decorators.ACCEPTS_POSITIONAL_ARGS)
capacity = False # If we see a default get used, we'll set capacity to True
# Select unnamed args.
parsed_args = []
for index, arg in enumerate(fn_args):
value = kwargs.pop(arg, None)
if value is not None: # A value is specified at the command line.
value = _ParseValue(value, index, arg, metadata)
parsed_args.append(value)
else: # No value has been explicitly specified.
if remaining_args and accepts_positional_args:
# Use a positional arg.
value = remaining_args.pop(0)
value = _ParseValue(value, index, arg, metadata)
parsed_args.append(value)
elif index < num_required_args:
raise FireError(
'The function received no value for the required argument:', arg)
else:
# We're past the args for which there's no default value.
# There's a default value for this arg.
capacity = True
default_index = index - num_required_args # index into the defaults.
parsed_args.append(fn_defaults[default_index])
for key, value in kwargs.items():
kwargs[key] = _ParseValue(value, None, key, metadata)
return parsed_args, kwargs, remaining_args, capacity
def _ParseKeywordArgs(args, fn_spec):
"""Parses the supplied arguments for keyword arguments.
Given a list of arguments, finds occurrences of --name value, and uses 'name'
as the keyword and 'value' as the value. Constructs and returns a dictionary
of these keyword arguments, and returns a list of the remaining arguments.
Only if fn_keywords is None, this only finds argument names used by the
function, specified through fn_args.
This returns the values of the args as strings. They are later processed by
_ParseArgs, which converts them to the appropriate type.
Args:
args: A list of arguments.
fn_spec: The inspectutils.FullArgSpec describing the given callable.
Returns:
kwargs: A dictionary mapping keywords to values.
remaining_kwargs: A list of the unused kwargs from the original args.
remaining_args: A list of the unused arguments from the original args.
Raises:
FireError: If a single-character flag is passed that could refer to multiple
possible args.
"""
kwargs = {}
remaining_kwargs = []
remaining_args = []
fn_keywords = fn_spec.varkw
fn_args = fn_spec.args + fn_spec.kwonlyargs
if not args:
return kwargs, remaining_kwargs, remaining_args
skip_argument = False
for index, argument in enumerate(args):
if skip_argument:
skip_argument = False
continue
if _IsFlag(argument):
# This is a named argument. We get its value from this arg or the next.
# Terminology:
# argument: A full token from the command line, e.g. '--alpha=10'
# stripped_argument: An argument without leading hyphens.
# key: The contents of the stripped argument up to the first equal sign.
# "shortcut flag": refers to an argument where the key is just the first
# letter of a longer keyword.
# keyword: The Python function argument being set by this argument.
# value: The unparsed value for that Python function argument.
contains_equals = '=' in argument
stripped_argument = argument.lstrip('-')
if contains_equals:
key, value = stripped_argument.split('=', 1)
else:
key = stripped_argument
value = None # value will be set later on.
key = key.replace('-', '_')
is_bool_syntax = (not contains_equals and
(index + 1 == len(args) or _IsFlag(args[index + 1])))
# Determine the keyword.
keyword = '' # Indicates no valid keyword has been found yet.
if (key in fn_args
or (is_bool_syntax and key.startswith('no') and key[2:] in fn_args)
or fn_keywords):
keyword = key
elif len(key) == 1:
# This may be a shortcut flag.
matching_fn_args = [arg for arg in fn_args if arg[0] == key]
if len(matching_fn_args) == 1:
keyword = matching_fn_args[0]
elif len(matching_fn_args) > 1:
raise FireError(
f"The argument '{argument}' is ambiguous as it could "
f"refer to any of the following arguments: {matching_fn_args}"
)
# Determine the value.
if not keyword:
got_argument = False
elif contains_equals:
# Already got the value above.
got_argument = True
elif is_bool_syntax:
# There's no next arg or the next arg is a Flag, so we consider this
# flag to be a boolean.
got_argument = True
if keyword in fn_args:
value = 'True'
elif keyword.startswith('no'):
keyword = keyword[2:]
value = 'False'
else:
value = 'True'
else:
# The assert should pass. Otherwise either contains_equals or
# is_bool_syntax would have been True.
assert index + 1 < len(args)
value = args[index + 1]
got_argument = True
# In order for us to consume the argument as a keyword arg, we either:
# Need to be explicitly expecting the keyword, or we need to be
# accepting **kwargs.
skip_argument = not contains_equals and not is_bool_syntax
if got_argument:
kwargs[keyword] = value
else:
remaining_kwargs.append(argument)
if skip_argument:
remaining_kwargs.append(args[index + 1])
else: # not _IsFlag(argument)
remaining_args.append(argument)
return kwargs, remaining_kwargs, remaining_args
def _IsFlag(argument):
"""Determines if the argument is a flag argument.
If it starts with a hyphen and isn't a negative number, it's a flag.
Args:
argument: A command line argument that may or may not be a flag.
Returns:
A boolean indicating whether the argument is a flag.
"""
return _IsSingleCharFlag(argument) or _IsMultiCharFlag(argument)
def _IsSingleCharFlag(argument):
"""Determines if the argument is a single char flag (e.g. '-a')."""
return re.match('^-[a-zA-Z]$', argument) or re.match('^-[a-zA-Z]=', argument)
def _IsMultiCharFlag(argument):
"""Determines if the argument is a multi char flag (e.g. '--alpha')."""
return argument.startswith('--') or re.match('^-[a-zA-Z]', argument)
def _ParseValue(value, index, arg, metadata):
"""Parses value, a string, into the appropriate type.
The function used to parse value is determined by the remaining arguments.
Args:
value: The string value to be parsed, typically a command line argument.
index: The index of the value in the function's argspec.
arg: The name of the argument the value is being parsed for.
metadata: Metadata about the function, typically from Fire decorators.
Returns:
value, parsed into the appropriate type for calling a function.
"""
parse_fn = parser.DefaultParseValue
# We check to see if any parse function from the fn metadata applies here.
parse_fns = metadata.get(decorators.FIRE_PARSE_FNS)
if parse_fns:
default = parse_fns['default']
positional = parse_fns['positional']
named = parse_fns['named']
if index is not None and 0 <= index < len(positional):
parse_fn = positional[index]
elif arg in named:
parse_fn = named[arg]
elif default is not None:
parse_fn = default
return parse_fn(value)
| FireExit |
python | jina-ai__jina | jina/proto/docarray_v1/pb/jina_pb2_grpc.py | {
"start": 1726,
"end": 2574
} | class ____(object):
"""*
jina gRPC service for DataRequests.
"""
@staticmethod
def process_data(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
'/jina.JinaDataRequestRPC/process_data',
jina__pb2.DataRequestListProto.SerializeToString,
jina__pb2.DataRequestProto.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
| JinaDataRequestRPC |
python | keras-team__keras | keras/src/ops/numpy.py | {
"start": 241993,
"end": 243568
} | class ____(Operation):
def __init__(self, kth, axis=-1, *, name=None):
super().__init__(name=name)
if not isinstance(kth, int):
raise ValueError(f"kth must be an integer. Received:kth = {kth}")
self.kth = kth
self.axis = axis
def call(self, x):
return backend.numpy.argpartition(x, kth=self.kth, axis=self.axis)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype="int32")
@keras_export(["keras.ops.argpartition", "keras.ops.numpy.argpartition"])
def argpartition(x, kth, axis=-1):
"""Performs an indirect partition along the given axis.
It returns an array
of indices of the same shape as `x` that index data along the given axis
in partitioned order.
Args:
a: Array to sort.
kth: Element index to partition by.
The k-th element will be in its final sorted position and all
smaller elements will be moved before it and all larger elements
behind it. The order of all elements in the partitions is undefined.
If provided with a sequence of k-th it will partition all of them
into their sorted position at once.
axis: Axis along which to sort. The default is -1 (the last axis).
If `None`, the flattened array is used.
Returns:
Array of indices that partition `x` along the specified `axis`.
"""
if any_symbolic_tensors((x,)):
return Argpartition(kth, axis).symbolic_call(x)
return backend.numpy.argpartition(x, kth, axis)
| Argpartition |
python | getsentry__sentry | tests/sentry/uptime/autodetect/test_tasks.py | {
"start": 14882,
"end": 16777
} | class ____(UptimeTestCase):
def test(self) -> None:
url = make_unique_test_url()
assert not is_url_auto_monitored_for_project(self.project, url)
detector = monitor_url_for_project(self.project, url)
assert is_url_auto_monitored_for_project(self.project, url)
assert detector.name == f"Uptime Monitoring for {url}"
def test_existing(self) -> None:
url = make_unique_test_url()
with self.tasks():
detector_1 = monitor_url_for_project(self.project, url)
assert is_url_auto_monitored_for_project(self.project, url)
assert detector_1.name == f"Uptime Monitoring for {url}"
url_2 = make_unique_test_url()
with self.tasks():
detector_2 = monitor_url_for_project(self.project, url_2)
# Execute scheduled deletions to ensure the first detector is cleaned
# up when re-detecting
with self.tasks():
run_scheduled_deletions()
assert not is_url_auto_monitored_for_project(self.project, url)
assert is_url_auto_monitored_for_project(self.project, url_2)
assert detector_2.name == f"Uptime Monitoring for {url_2}"
def test_manual_existing(self) -> None:
manual_url = make_unique_test_url()
self.create_uptime_detector(
uptime_subscription=self.create_uptime_subscription(url=manual_url),
mode=UptimeMonitorMode.MANUAL,
)
url = make_unique_test_url()
monitor_url_for_project(self.project, url)
assert is_url_auto_monitored_for_project(self.project, url)
detectors = Detector.objects.filter(
project=self.project,
config__mode=UptimeMonitorMode.MANUAL.value,
)
assert detectors.exists()
assert any(get_uptime_subscription(detector).url == manual_url for detector in detectors)
| TestMonitorUrlForProject |
python | astropy__astropy | astropy/table/index.py | {
"start": 37865,
"end": 44535
} | class ____:
"""
Pseudo-list of Table rows allowing for retrieval of rows by indexed column values.
Parameters
----------
table : Table
Indexed table to use
index_id : tuple or None
If not None, the index id as a tuple to use for all retrievals. If None
(default), the primary key index is used.
"""
def __init__(self, table: Table, index_id: tuple[str, ...] | None = None):
self.table = table
self.indices = table.indices
self.index_id = index_id
def __repr__(self):
index_id = self.index_id
if index_id is None:
index_id = self.table.primary_key
id_repr = index_id[0] if len(index_id) == 1 else index_id
return (
f"<{self.__class__.__name__} index_id={repr(id_repr)} "
f"id(table)={id(self.table)}>"
)
def with_index(self, *index_id):
"""Return a new instance of this class for ``index_id``
Parameters
----------
index_id : str, tuple[str, ...], or list[str]
Identifier of the index to use
Examples
--------
>>> from astropy.table import QTable
>>> t = QTable({'a': [1, 2, 3], 'b': [4, 5, 6], 'c': [7, 8, 9]})
>>> t.add_index('a')
>>> t.add_index(['b', 'c'])
>>> t.loc.with_index('a')[2] # doctest: +IGNORE_OUTPUT
>>> t.loc.with_index('b', 'c')[5, 8] # doctest: +IGNORE_OUTPUT
>>> t.loc.with_index(['b', 'c'])[5, 8] # doctest: +IGNORE_OUTPUT
"""
if len(index_id) == 1 and isinstance(index_id[0], (tuple, list)):
index_id = tuple(index_id[0])
return self.__class__(self.table, index_id)
def _get_index_id_and_item(self, item):
index_id = self.index_id
if self.index_id is None:
if isinstance(item, tuple):
index_id, item = interpret_item_as_index_id_and_item(item)
else:
index_id = self.table.primary_key
return index_id, item
def _get_row_idxs_as_list(
self,
index_id: tuple,
item,
item_is_sequence: bool,
) -> list[int]:
"""
Retrieve Table row indices for ``item`` as a list of integers.
Parameters
----------
index_id : tuple of str
Identifier of the index to use.
item : column element, list, ndarray, or slice
Can be a value in the table index, a list/ndarray of such values, or a value
slice (both endpoints are included).
item_is_sequence : bool
Whether ``item`` is a sequence (list or ndarray with ndim > 0) of values.
Returns
-------
list of int
List of row indices corresponding to the input item.
Raises
------
ValueError
If the table has no indices.
KeyError
If no matches are found for a given key.
"""
if len(self.indices) == 0:
raise ValueError("Can only use TableLoc for a table with indices")
index = self.indices[index_id]
if isinstance(item, slice):
start = None if item.start is None else (item.start,)
stop = None if item.stop is None else (item.stop,)
rows = index.range(start, stop)
else:
if not item_is_sequence: # single element
item = [item]
# item should be a list or ndarray of values
rows = []
for value in item:
ii = index.find(value if isinstance(value, tuple) else (value,))
if len(ii) == 0:
raise KeyError(f"No matches found for key {value}")
else:
rows.extend(ii)
return rows
def _get_row_idxs_as_list_or_int(self, item) -> list[int] | int:
"""Internal function to retrieve row indices for ``item`` as a list or int.
See ``__getitem__`` for details on the input item.
"""
# This handles ``tbl.loc[<item>]`` and ``tbl.loc[<key>, <item>]`` (and the same
# for ``tbl.loc_indices``).
index_id, item = self._get_index_id_and_item(item)
item_is_sequence = (
isinstance(item, np.ndarray) and item.ndim > 0
) or isinstance(item, list)
# Short-circuit for case like tbl.loc[[]], returns tbl[[]]
if item_is_sequence and len(item) == 0:
return []
row_idxs = self._get_row_idxs_as_list(index_id, item, item_is_sequence)
# If ``item`` is a sequence of keys or a slice then always returns a list of
# rows, where zero rows is OK. Otherwise check output and possibly return a
# scalar.
if not (item_is_sequence or isinstance(item, slice)) and len(row_idxs) == 1:
row_idxs = row_idxs[0]
return row_idxs
def __getitem__(self, item) -> Table | Table.Row:
"""
Retrieve Table rows by value slice.
Parameters
----------
item : column element, list, ndarray, slice or tuple
Can be a value of the table primary index, a list/ndarray
of such values, or a value slice (both endpoints are included).
If a tuple is provided, the first element must be
an index to use instead of the primary key, and the
second element must be as above.
Returns
-------
Table | Row
A table slice or Row corresponding to the input item.
"""
rows = self._get_row_idxs_as_list_or_int(item)
return self.table[rows]
def __setitem__(self, key, value):
"""
Assign Table row's by value slice.
Parameters
----------
key : column element, list, ndarray, slice or tuple
Can be a value of the table primary index, a list/ndarray
of such values, or a value slice (both endpoints are included).
If a tuple is provided, the first element must be
an index to use instead of the primary key, and the
second element must be as above.
value : New values of the row elements.
Can be a list of tuples/lists to update the row.
"""
rows = self._get_row_idxs_as_list_or_int(key)
if hasattr(rows, "__len__"):
if len(rows) == len(value):
for row, val in zip(rows, value):
self.table[row] = val
else:
raise ValueError(f"Right side should contain {len(rows)} values")
else:
self.table[rows] = value
| TableLoc |
python | huggingface__transformers | src/transformers/models/esm/modeling_esm.py | {
"start": 32118,
"end": 33003
} | class ____(nn.Module):
"""ESM Head for masked language modeling."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
def forward(self, features, **kwargs):
x = self.dense(features)
x = gelu(x)
x = self.layer_norm(x)
# project back to size of vocabulary with bias
x = self.decoder(x) + self.bias
return x
@auto_docstring(
custom_intro="""
ESM Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled
output) e.g. for GLUE tasks.
"""
)
| EsmLMHead |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/testing/suite/test_reflection.py | {
"start": 9593,
"end": 12510
} | class ____(fixtures.TestBase):
__sparse_driver_backend__ = True
def column_names():
return testing.combinations(
("plainname",),
("(3)",),
("col%p",),
("[brack]",),
argnames="columnname",
)
def table_names():
return testing.combinations(
("plain",),
("(2)",),
("per % cent",),
("[brackets]",),
argnames="tablename",
)
@testing.variation("use_composite", [True, False])
@column_names()
@table_names()
@testing.requires.foreign_key_constraint_reflection
def test_fk_ref(
self, connection, metadata, use_composite, tablename, columnname
):
"""tests for #10275"""
tt = Table(
tablename,
metadata,
Column(columnname, Integer, key="id", primary_key=True),
test_needs_fk=True,
)
if use_composite:
tt.append_column(Column("id2", Integer, primary_key=True))
if use_composite:
Table(
"other",
metadata,
Column("id", Integer, primary_key=True),
Column("ref", Integer),
Column("ref2", Integer),
sa.ForeignKeyConstraint(["ref", "ref2"], [tt.c.id, tt.c.id2]),
test_needs_fk=True,
)
else:
Table(
"other",
metadata,
Column("id", Integer, primary_key=True),
Column("ref", ForeignKey(tt.c.id)),
test_needs_fk=True,
)
metadata.create_all(connection)
m2 = MetaData()
o2 = Table("other", m2, autoload_with=connection)
t1 = m2.tables[tablename]
assert o2.c.ref.references(t1.c[0])
if use_composite:
assert o2.c.ref2.references(t1.c[1])
@column_names()
@table_names()
@testing.requires.identity_columns
def test_reflect_identity(
self, tablename, columnname, connection, metadata
):
Table(
tablename,
metadata,
Column(columnname, Integer, Identity(), primary_key=True),
)
metadata.create_all(connection)
insp = inspect(connection)
eq_(insp.get_columns(tablename)[0]["identity"]["start"], 1)
@column_names()
@table_names()
@testing.requires.comment_reflection
def test_reflect_comments(
self, tablename, columnname, connection, metadata
):
Table(
tablename,
metadata,
Column("id", Integer, primary_key=True),
Column(columnname, Integer, comment="some comment"),
)
metadata.create_all(connection)
insp = inspect(connection)
eq_(insp.get_columns(tablename)[1]["comment"], "some comment")
| BizarroCharacterTest |
python | getsentry__sentry | src/sentry/api/serializers/models/role.py | {
"start": 2251,
"end": 2791
} | class ____(Serializer):
def __init__(self, **kwargs):
"""
Remove this when deleting "organizations:team-roles" flag
"""
self.organization = kwargs["organization"]
def serialize(self, obj: TeamRole, attrs, user, **kwargs) -> TeamRoleSerializerResponse:
base = _serialize_base_role(
obj, self.organization, allowed_roles=kwargs.get("allowed_roles", ())
)
return {
**base,
"isMinimumRoleFor": obj.is_minimum_role_for,
}
| TeamRoleSerializer |
python | astropy__astropy | astropy/io/votable/tests/test_vo.py | {
"start": 7086,
"end": 13166
} | class ____:
def setup_class(self):
with np.errstate(over="ignore"):
# https://github.com/astropy/astropy/issues/13341
self.votable = parse(get_pkg_data_filename("data/regression.xml"))
self.table = self.votable.get_first_table()
self.array = self.table.array
self.mask = self.table.array.mask
def test_fieldref(self):
fieldref = self.table.groups[1].entries[0]
assert isinstance(fieldref, tree.FieldRef)
assert fieldref.get_ref().name == "boolean"
assert fieldref.get_ref().datatype == "boolean"
def test_paramref(self):
paramref = self.table.groups[0].entries[0]
assert isinstance(paramref, tree.ParamRef)
assert paramref.get_ref().name == "INPUT"
assert paramref.get_ref().datatype == "float"
def test_iter_fields_and_params_on_a_group(self):
assert len(list(self.table.groups[1].iter_fields_and_params())) == 2
def test_iter_groups_on_a_group(self):
assert len(list(self.table.groups[1].iter_groups())) == 1
def test_iter_groups(self):
# Because of the ref'd table, there are more logical groups
# than actually exist in the file
assert len(list(self.votable.iter_groups())) == 9
def test_ref_table(self):
tables = list(self.votable.iter_tables())
for x, y in zip(tables[0].array.data[0], tables[1].array.data[0]):
assert_array_equal(x, y)
def test_iter_coosys(self):
assert len(list(self.votable.iter_coosys())) == 1
@pytest.mark.parametrize(
"columns, expected_missing",
[
# a single non-existent column
pytest.param(["c1"], ["c1"], id="basic"),
# multiple missing columns (checking that order is preserved)
pytest.param(["c1", "c2", "c3"], ["c1", "c2", "c3"], id="check-ordering"),
# mixing existing with missing columns
pytest.param(["c1", "string_test", "c2"], ["c1", "c2"], id="list-only-missing"),
],
)
def test_select_missing_columns_error_message(columns, expected_missing):
# see https://github.com/astropy/astropy/pull/15956
filename = get_pkg_data_filename("data/regression.xml")
with pytest.raises(
ValueError,
match=re.escape(f"Columns {expected_missing!r} were not found in fields list"),
):
parse_single_table(filename, columns=columns)
def test_select_columns_by_index():
columns = [0, 5, 14]
table = parse(
get_pkg_data_filename("data/regression.xml"), columns=columns
).get_first_table()
array = table.array
mask = table.array.mask
assert array["string_test"][0] == "String & test"
columns = ["string_test", "unsignedByte", "bitarray"]
for c in columns:
assert not np.all(mask[c])
# deselected columns shouldn't be present in the output
assert "unicode_test" not in array.dtype.fields
assert "unicode_test" not in mask.dtype.fields
def test_select_columns_by_name():
columns = ["string_test", "unsignedByte", "bitarray"]
table = parse(
get_pkg_data_filename("data/regression.xml"), columns=columns
).get_first_table()
array = table.array
mask = table.array.mask
assert array["string_test"][0] == "String & test"
for c in columns:
assert not np.all(mask[c])
# deselected columns shouldn't be present in the output
assert "unicode_test" not in array.dtype.fields
assert "unicode_test" not in mask.dtype.fields
@pytest.mark.parametrize(
"column_ids, use_names_over_ids, expected_names",
[
# just the first column
pytest.param(
["string_test"],
False,
["string_test"],
id="first-col-ids",
),
pytest.param(
["string_test"],
True,
["string test"],
id="first-col-names",
),
# a single column, other than the first
pytest.param(
["unicode_test"],
False,
["unicode_test"],
id="single-col-ids",
),
pytest.param(
["unicode_test"],
True,
["unicode_test"],
id="single-col-names",
),
# two non-consecutive, differently named columns
pytest.param(
["string_test", "unicode_test"],
False,
["string_test", "unicode_test"],
id="two-cols-ids",
),
pytest.param(
["string_test", "unicode_test"],
True,
["string test", "unicode_test"],
id="two-cols-names",
),
# just the first two columns (that have the same ID)
pytest.param(
["string_test", "string_test_2"],
False,
["string_test", "string_test_2"],
id="two-cols-ids-sameID",
),
pytest.param(
["string_test", "string_test_2"],
True,
["string test", "fixed string test"],
id="two-cols-names-sameID",
),
# columns should be returned in the order they are found, which
# in the general case isn't the order they are requested
pytest.param(
["unicode_test", "string_test"],
False,
["string_test", "unicode_test"],
id="two-cols-ids-order-mismatch",
),
pytest.param(
["unicode_test", "string_test"],
True,
["string test", "unicode_test"],
id="two-cols-names-order-mismatch",
),
],
)
def test_select_columns_by_name_edge_cases(
column_ids, use_names_over_ids, expected_names
):
# see https://github.com/astropy/astropy/issues/14943
filename = get_pkg_data_filename("data/regression.xml")
with np.errstate(over="ignore"):
# https://github.com/astropy/astropy/issues/13341
vot1 = parse_single_table(filename, columns=column_ids)
t1 = vot1.to_table(use_names_over_ids=use_names_over_ids)
assert t1.colnames == expected_names
| TestReferences |
python | pytorch__pytorch | test/dynamo/test_misc.py | {
"start": 432800,
"end": 433563
} | class ____(JitTestCase):
def test_jit_save(self):
def fn():
class Foo(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.a = 3
@torch.jit.export
def __getstate__(self):
return (3, self.training)
@torch.jit.export
def __setstate__(self, state):
self.a = state[0]
self.training = state[1]
def forward(self, x):
return x + self.a
f = Foo()
return torch.jit.trace(f, (torch.rand(3, 4),))
fn()
opt_fn = torch.compile(fn, backend="eager")
opt_fn()
| TestTracer |
python | pallets__flask | src/flask/json/tag.py | {
"start": 4770,
"end": 5308
} | class ____(JSONTag):
"""Serialize anything matching the :class:`~markupsafe.Markup` API by
having a ``__html__`` method to the result of that method. Always
deserializes to an instance of :class:`~markupsafe.Markup`."""
__slots__ = ()
key = " m"
def check(self, value: t.Any) -> bool:
return callable(getattr(value, "__html__", None))
def to_json(self, value: t.Any) -> t.Any:
return str(value.__html__())
def to_python(self, value: t.Any) -> t.Any:
return Markup(value)
| TagMarkup |
python | pandas-dev__pandas | pandas/tests/frame/methods/test_dtypes.py | {
"start": 238,
"end": 4672
} | class ____:
def test_empty_frame_dtypes(self):
empty_df = DataFrame()
tm.assert_series_equal(empty_df.dtypes, Series(dtype=object))
nocols_df = DataFrame(index=[1, 2, 3])
tm.assert_series_equal(nocols_df.dtypes, Series(dtype=object))
norows_df = DataFrame(columns=list("abc"))
tm.assert_series_equal(norows_df.dtypes, Series(object, index=list("abc")))
norows_int_df = DataFrame(columns=list("abc")).astype(np.int32)
tm.assert_series_equal(
norows_int_df.dtypes, Series(np.dtype("int32"), index=list("abc"))
)
df = DataFrame({"a": 1, "b": True, "c": 1.0}, index=[1, 2, 3])
ex_dtypes = Series({"a": np.int64, "b": np.bool_, "c": np.float64})
tm.assert_series_equal(df.dtypes, ex_dtypes)
# same but for empty slice of df
tm.assert_series_equal(df[:0].dtypes, ex_dtypes)
def test_datetime_with_tz_dtypes(self):
tzframe = DataFrame(
{
"A": date_range("20130101", periods=3, unit="ns"),
"B": date_range("20130101", periods=3, tz="US/Eastern", unit="ns"),
"C": date_range("20130101", periods=3, tz="CET", unit="ns"),
}
)
tzframe.iloc[1, 1] = pd.NaT
tzframe.iloc[1, 2] = pd.NaT
result = tzframe.dtypes.sort_index()
expected = Series(
[
np.dtype("datetime64[ns]"),
DatetimeTZDtype("ns", "US/Eastern"),
DatetimeTZDtype("ns", "CET"),
],
["A", "B", "C"],
)
tm.assert_series_equal(result, expected)
def test_dtypes_are_correct_after_column_slice(self):
# GH6525
df = DataFrame(index=range(5), columns=list("abc"), dtype=np.float64)
tm.assert_series_equal(
df.dtypes,
Series({"a": np.float64, "b": np.float64, "c": np.float64}),
)
tm.assert_series_equal(df.iloc[:, 2:].dtypes, Series({"c": np.float64}))
tm.assert_series_equal(
df.dtypes,
Series({"a": np.float64, "b": np.float64, "c": np.float64}),
)
@pytest.mark.parametrize(
"data",
[pd.NA, True],
)
def test_dtypes_are_correct_after_groupby_last(self, data):
# GH46409
df = DataFrame(
{"id": [1, 2, 3, 4], "test": [True, pd.NA, data, False]}
).convert_dtypes()
result = df.groupby("id").last().test
expected = df.set_index("id").test
assert result.dtype == pd.BooleanDtype()
tm.assert_series_equal(expected, result)
def test_dtypes_gh8722(self, float_string_frame):
float_string_frame["bool"] = float_string_frame["A"] > 0
result = float_string_frame.dtypes
expected = Series(
{k: v.dtype for k, v in float_string_frame.items()}, index=result.index
)
tm.assert_series_equal(result, expected)
def test_dtypes_timedeltas(self):
df = DataFrame(
{
"A": Series(date_range("2012-1-1", periods=3, freq="D", unit="ns")),
"B": Series([timedelta(days=i) for i in range(3)]),
}
)
result = df.dtypes
expected = Series(
[np.dtype("datetime64[ns]"), np.dtype("timedelta64[ns]")], index=list("AB")
)
tm.assert_series_equal(result, expected)
df["C"] = df["A"] + df["B"]
result = df.dtypes
expected = Series(
[
np.dtype("datetime64[ns]"),
np.dtype("timedelta64[ns]"),
np.dtype("datetime64[ns]"),
],
index=list("ABC"),
)
tm.assert_series_equal(result, expected)
# mixed int types
df["D"] = 1
result = df.dtypes
expected = Series(
[
np.dtype("datetime64[ns]"),
np.dtype("timedelta64[ns]"),
np.dtype("datetime64[ns]"),
np.dtype("int64"),
],
index=list("ABCD"),
)
tm.assert_series_equal(result, expected)
def test_frame_apply_np_array_return_type(self, using_infer_string):
# GH 35517
df = DataFrame([["foo"]])
result = df.apply(lambda col: np.array("bar"))
expected = Series(np.array("bar"))
tm.assert_series_equal(result, expected)
| TestDataFrameDataTypes |
python | wandb__wandb | wandb/vendor/pygments/lexers/ezhil.py | {
"start": 455,
"end": 2520
} | class ____(RegexLexer):
"""
Lexer for `Ezhil, a Tamil script-based programming language <http://ezhillang.org>`_
.. versionadded:: 2.1
"""
name = 'Ezhil'
aliases = ['ezhil']
filenames = ['*.n']
mimetypes = ['text/x-ezhil']
flags = re.MULTILINE | re.UNICODE
# Refer to tamil.utf8.tamil_letters from open-tamil for a stricter version of this.
# This much simpler version is close enough, and includes combining marks.
_TALETTERS = u'[a-zA-Z_]|[\u0b80-\u0bff]'
tokens = {
'root': [
include('keywords'),
(r'#.*\n', Comment.Single),
(r'[@+/*,^\-%]|[!<>=]=?|&&?|\|\|?', Operator),
(u'இல்', Operator.Word),
(words((u'assert', u'max', u'min',
u'நீளம்', u'சரம்_இடமாற்று', u'சரம்_கண்டுபிடி',
u'பட்டியல்', u'பின்இணை', u'வரிசைப்படுத்து',
u'எடு', u'தலைகீழ்', u'நீட்டிக்க', u'நுழைக்க', u'வை',
u'கோப்பை_திற', u'கோப்பை_எழுது', u'கோப்பை_மூடு',
u'pi', u'sin', u'cos', u'tan', u'sqrt', u'hypot', u'pow',
u'exp', u'log', u'log10', u'exit',
), suffix=r'\b'), Name.Builtin),
(r'(True|False)\b', Keyword.Constant),
(r'[^\S\n]+', Text),
include('identifier'),
include('literal'),
(r'[(){}\[\]:;.]', Punctuation),
],
'keywords': [
(u'பதிப்பி|தேர்ந்தெடு|தேர்வு|ஏதேனில்|ஆனால்|இல்லைஆனால்|இல்லை|ஆக|ஒவ்வொன்றாக|இல்|வரை|செய்|முடியேனில்|பின்கொடு|முடி|நிரல்பாகம்|தொடர்|நிறுத்து|நிரல்பாகம்', Keyword),
],
'identifier': [
(u'(?:'+_TALETTERS+u')(?:[0-9]|'+_TALETTERS+u')*', Name),
],
'literal': [
(r'".*?"', String),
(r'(?u)\d+((\.\d*)?[eE][+-]?\d+|\.\d*)', Number.Float),
(r'(?u)\d+', Number.Integer),
]
}
def __init__(self, **options):
super(EzhilLexer, self).__init__(**options)
self.encoding = options.get('encoding', 'utf-8')
| EzhilLexer |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/executor/child_process_executor.py | {
"start": 1643,
"end": 5897
} | class ____(Exception):
"""Thrown when the child process crashes."""
def __init__(self, pid, exit_code=None):
self.pid = pid
self.exit_code = exit_code
super().__init__()
def _execute_command_in_child_process(event_queue: Queue, command: ChildProcessCommand):
"""Wraps the execution of a ChildProcessCommand.
Handles errors and communicates across a queue with the parent process.
"""
check.inst_param(command, "command", ChildProcessCommand)
with capture_interrupts():
pid = os.getpid()
event_queue.put(ChildProcessStartEvent(pid=pid))
try:
for step_event in command.execute():
event_queue.put(step_event)
event_queue.put(ChildProcessDoneEvent(pid=pid))
except (
Exception,
KeyboardInterrupt,
DagsterExecutionInterruptedError,
):
event_queue.put(
ChildProcessSystemErrorEvent(
pid=pid, error_info=serializable_error_info_from_exc_info(sys.exc_info())
)
)
TICK = 20.0 * 1.0 / 1000.0
"""The minimum interval at which to check for child process liveness -- default 20ms."""
PROCESS_DEAD_AND_QUEUE_EMPTY = "PROCESS_DEAD_AND_QUEUE_EMPTY"
"""Sentinel value."""
def _poll_for_event(
process, event_queue
) -> Optional[Union["DagsterEvent", Literal["PROCESS_DEAD_AND_QUEUE_EMPTY"]]]:
try:
return event_queue.get(block=True, timeout=TICK)
except queue.Empty:
if not process.is_alive():
# There is a possibility that after the last queue.get the
# process created another event and then died. In that case
# we want to continue draining the queue.
try:
return event_queue.get(block=False)
except queue.Empty:
# If the queue empty we know that there are no more events
# and that the process has died.
return PROCESS_DEAD_AND_QUEUE_EMPTY
return None
def execute_child_process_command(
multiprocessing_ctx: MultiprocessingBaseContext, command: ChildProcessCommand
) -> Iterator[Optional[Union["DagsterEvent", ChildProcessEvent, BaseProcess]]]:
"""Execute a ChildProcessCommand in a new process.
This function starts a new process whose execution target is a ChildProcessCommand wrapped by
_execute_command_in_child_process; polls the queue for events yielded by the child process
until the process dies and the queue is empty.
This function yields a complex set of objects to enable having multiple child process
executions in flight:
* None - nothing has happened, yielded to enable cooperative multitasking other iterators
* multiprocessing.BaseProcess - the child process object.
* ChildProcessEvent - Family of objects that communicates state changes in the child process
* The actual values yielded by the child process command
Args:
multiprocessing_ctx: The multiprocessing context to execute in (spawn, forkserver, fork)
command (ChildProcessCommand): The command to execute in the child process.
Warning: if the child process is in an infinite loop, this will
also infinitely loop.
"""
check.inst_param(command, "command", ChildProcessCommand)
event_queue = multiprocessing_ctx.Queue()
try:
process = multiprocessing_ctx.Process( # type: ignore
target=_execute_command_in_child_process, args=(event_queue, command)
)
process.start()
yield process
completed_properly = False
while not completed_properly:
event = _poll_for_event(process, event_queue)
if event == PROCESS_DEAD_AND_QUEUE_EMPTY:
break
yield event
if isinstance(event, (ChildProcessDoneEvent, ChildProcessSystemErrorEvent)):
completed_properly = True
if not completed_properly:
# TODO Figure out what to do about stderr/stdout
raise ChildProcessCrashException(pid=process.pid, exit_code=process.exitcode)
process.join()
finally:
event_queue.close()
| ChildProcessCrashException |
python | python__mypy | mypy/state.py | {
"start": 234,
"end": 850
} | class ____:
# Wrap this in a class since it's faster that using a module-level attribute.
def __init__(self, strict_optional: bool) -> None:
# Value varies by file being processed
self.strict_optional = strict_optional
@contextmanager
def strict_optional_set(self, value: bool) -> Iterator[None]:
saved = self.strict_optional
self.strict_optional = value
try:
yield
finally:
self.strict_optional = saved
state: Final = StrictOptionalState(strict_optional=True)
find_occurrences: tuple[str, str] | None = None
| StrictOptionalState |
python | huggingface__transformers | src/transformers/models/got_ocr2/modeling_got_ocr2.py | {
"start": 17734,
"end": 19738
} | class ____(GotOcr2PreTrainedModel):
_can_record_outputs = {"hidden_states": GotOcr2VisionLayer, "attentions": GotOcr2VisionAttention}
input_modalities = ("image",)
def __init__(self, config: GotOcr2VisionConfig):
super().__init__(config)
self.config = config
self.image_size = config.image_size
self.patch_embed = GotOcr2PatchEmbeddings(config)
self.pos_embed = None
if config.use_abs_pos:
# Initialize absolute positional embedding with pretrain image size.
self.pos_embed = nn.Parameter(
torch.zeros(
1,
config.image_size // config.patch_size,
config.image_size // config.patch_size,
config.hidden_size,
)
)
self.layers = nn.ModuleList()
for i in range(config.num_hidden_layers):
layer = GotOcr2VisionLayer(
config,
window_size=config.window_size if i not in config.global_attn_indexes else 0,
)
self.layers.append(layer)
self.neck = GotOcr2VisionNeck(config)
self.gradient_checkpointing = False
def get_input_embeddings(self):
return self.patch_embed
@check_model_inputs(tie_last_hidden_states=False)
def forward(
self, pixel_values: Optional[torch.FloatTensor] = None, **kwargs: Unpack[TransformersKwargs]
) -> GotOcr2VisionEncoderOutput:
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
hidden_states = self.patch_embed(pixel_values)
if self.pos_embed is not None:
hidden_states = hidden_states + self.pos_embed
for layer_module in self.layers:
hidden_states = layer_module(hidden_states)
hidden_states = self.neck(hidden_states)
return GotOcr2VisionEncoderOutput(
last_hidden_state=hidden_states,
)
| GotOcr2VisionEncoder |
python | anthropics__anthropic-sdk-python | src/anthropic/_exceptions.py | {
"start": 3751,
"end": 3883
} | class ____(APIStatusError):
status_code: Literal[529] = 529 # pyright: ignore[reportIncompatibleVariableOverride]
| OverloadedError |
python | ipython__ipython | IPython/core/formatters.py | {
"start": 9522,
"end": 10887
} | class ____(metaclass=abc.ABCMeta):
""" Abstract base class for Formatters.
A formatter is a callable class that is responsible for computing the
raw format data for a particular format type (MIME type). For example,
an HTML formatter would have a format type of `text/html` and would return
the HTML representation of the object when called.
"""
# The format type of the data returned, usually a MIME type.
format_type = 'text/plain'
# Is the formatter enabled...
enabled = True
@abc.abstractmethod
def __call__(self, obj):
"""Return a JSON'able representation of the object.
If the object cannot be formatted by this formatter,
warn and return None.
"""
return repr(obj)
def _mod_name_key(typ):
"""Return a (__module__, __name__) tuple for a type.
Used as key in Formatter.deferred_printers.
"""
module = getattr(typ, '__module__', None)
name = getattr(typ, '__name__', None)
return (module, name)
def _get_type(obj):
"""Return the type of an instance (old and new-style)"""
return getattr(obj, '__class__', None) or type(obj)
_raise_key_error = Sentinel(
"_raise_key_error",
__name__,
"""
Special value to raise a KeyError
Raise KeyError in `BaseFormatter.pop` if passed as the default value to `pop`
""",
)
| FormatterABC |
python | ray-project__ray | release/long_running_tests/workloads/many_drivers.py | {
"start": 795,
"end": 3037
} | class ____(object):
def method(self):
return 1
for _ in range(5):
for node in nodes:
assert ray.get(
f.options(scheduling_strategy=NodeAffinitySchedulingStrategy(
node, soft=False)).remote()) == 1
actor = Actor.options(scheduling_strategy=NodeAffinitySchedulingStrategy(
node, soft=False)).remote()
assert ray.get(actor.method.remote()) == 1
print("success")
"""
@ray.remote(num_cpus=0)
def run_driver():
output = run_string_as_driver(driver_script, encode="utf-8")
assert "success" in output
iteration = 0
running_ids = [
run_driver.options(
scheduling_strategy=NodeAffinitySchedulingStrategy(node, soft=False)
).remote()
for node in nodes
]
start_time = time.time()
previous_time = start_time
parser = argparse.ArgumentParser(prog="Many Drivers long running tests")
parser.add_argument(
"--iteration-num", type=int, help="How many iterations to run", required=False
)
parser.add_argument(
"--smoke-test",
action="store_true",
help="Whether or not the test is smoke test.",
default=False,
)
args = parser.parse_args()
iteration_num = args.iteration_num
if args.smoke_test:
iteration_num = 400
while True:
if iteration_num is not None and iteration_num < iteration:
break
# Wait for a driver to finish and start a new driver.
[ready_id], running_ids = ray.wait(running_ids, num_returns=1)
ray.get(ready_id)
running_ids.append(
run_driver.options(
scheduling_strategy=NodeAffinitySchedulingStrategy(
nodes[iteration % len(nodes)], soft=False
)
).remote()
)
new_time = time.time()
print(
"Iteration {}:\n"
" - Iteration time: {}.\n"
" - Absolute time: {}.\n"
" - Total elapsed time: {}.".format(
iteration, new_time - previous_time, new_time, new_time - start_time
)
)
update_progress(
{
"iteration": iteration,
"iteration_time": new_time - previous_time,
"absolute_time": new_time,
"elapsed_time": new_time - start_time,
}
)
previous_time = new_time
iteration += 1
| Actor |
python | PrefectHQ__prefect | src/prefect/settings/models/server/events.py | {
"start": 241,
"end": 5828
} | class ____(PrefectBaseSettings):
"""
Settings for controlling behavior of the events subsystem
"""
model_config: ClassVar[SettingsConfigDict] = build_settings_config(
("server", "events")
)
###########################################################################
# Events settings
stream_out_enabled: bool = Field(
default=True,
description="Whether or not to stream events out to the API via websockets.",
validation_alias=AliasChoices(
AliasPath("stream_out_enabled"),
"prefect_server_events_stream_out_enabled",
"prefect_api_events_stream_out_enabled",
),
)
related_resource_cache_ttl: timedelta = Field(
default=timedelta(minutes=5),
description="The number of seconds to cache related resources for in the API.",
validation_alias=AliasChoices(
AliasPath("related_resource_cache_ttl"),
"prefect_server_events_related_resource_cache_ttl",
"prefect_api_events_related_resource_cache_ttl",
),
)
maximum_labels_per_resource: int = Field(
default=500,
description="The maximum number of labels a resource may have.",
validation_alias=AliasChoices(
AliasPath("maximum_labels_per_resource"),
"prefect_server_events_maximum_labels_per_resource",
"prefect_events_maximum_labels_per_resource",
),
)
maximum_related_resources: int = Field(
default=100,
description="The maximum number of related resources an Event may have.",
validation_alias=AliasChoices(
AliasPath("maximum_related_resources"),
"prefect_server_events_maximum_related_resources",
"prefect_events_maximum_related_resources",
),
)
maximum_size_bytes: int = Field(
default=1_500_000,
description="The maximum size of an Event when serialized to JSON",
validation_alias=AliasChoices(
AliasPath("maximum_size_bytes"),
"prefect_server_events_maximum_size_bytes",
"prefect_events_maximum_size_bytes",
),
)
expired_bucket_buffer: timedelta = Field(
default=timedelta(seconds=60),
description="The amount of time to retain expired automation buckets",
validation_alias=AliasChoices(
AliasPath("expired_bucket_buffer"),
"prefect_server_events_expired_bucket_buffer",
"prefect_events_expired_bucket_buffer",
),
)
proactive_granularity: timedelta = Field(
default=timedelta(seconds=5),
description="How frequently proactive automations are evaluated",
validation_alias=AliasChoices(
AliasPath("proactive_granularity"),
"prefect_server_events_proactive_granularity",
"prefect_events_proactive_granularity",
),
)
retention_period: timedelta = Field(
default=timedelta(days=7),
description="The amount of time to retain events in the database.",
validation_alias=AliasChoices(
AliasPath("retention_period"),
"prefect_server_events_retention_period",
"prefect_events_retention_period",
),
)
maximum_websocket_backfill: timedelta = Field(
default=timedelta(minutes=15),
description="The maximum range to look back for backfilling events for a websocket subscriber.",
validation_alias=AliasChoices(
AliasPath("maximum_websocket_backfill"),
"prefect_server_events_maximum_websocket_backfill",
"prefect_events_maximum_websocket_backfill",
),
)
websocket_backfill_page_size: int = Field(
default=250,
gt=0,
description="The page size for the queries to backfill events for websocket subscribers.",
validation_alias=AliasChoices(
AliasPath("websocket_backfill_page_size"),
"prefect_server_events_websocket_backfill_page_size",
"prefect_events_websocket_backfill_page_size",
),
)
messaging_broker: str = Field(
default="prefect.server.utilities.messaging.memory",
description="Which message broker implementation to use for the messaging system, should point to a module that exports a Publisher and Consumer class.",
validation_alias=AliasChoices(
AliasPath("messaging_broker"),
"prefect_server_events_messaging_broker",
"prefect_messaging_broker",
),
)
messaging_cache: str = Field(
default="prefect.server.utilities.messaging.memory",
description="Which cache implementation to use for the events system. Should point to a module that exports a Cache class.",
validation_alias=AliasChoices(
AliasPath("messaging_cache"),
"prefect_server_events_messaging_cache",
"prefect_messaging_cache",
),
)
causal_ordering: str = Field(
default="prefect.server.events.ordering.memory",
description="Which causal ordering implementation to use for the events system. Should point to a module that exports a CausalOrdering class.",
)
maximum_event_name_length: int = Field(
default=1024,
gt=0,
description="The maximum length of an event name.",
validation_alias=AliasChoices(
AliasPath("maximum_event_name_length"),
"prefect_server_events_maximum_event_name_length",
),
)
| ServerEventsSettings |
python | huggingface__transformers | src/transformers/models/poolformer/configuration_poolformer.py | {
"start": 803,
"end": 5076
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of [`PoolFormerModel`]. It is used to instantiate a
PoolFormer model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the PoolFormer
[sail/poolformer_s12](https://huggingface.co/sail/poolformer_s12) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
num_channels (`int`, *optional*, defaults to 3):
The number of channels in the input image.
patch_size (`int`, *optional*, defaults to 16):
The size of the input patch.
stride (`int`, *optional*, defaults to 16):
The stride of the input patch.
pool_size (`int`, *optional*, defaults to 3):
The size of the pooling window.
mlp_ratio (`float`, *optional*, defaults to 4.0):
The ratio of the number of channels in the output of the MLP to the number of channels in the input.
depths (`list`, *optional*, defaults to `[2, 2, 6, 2]`):
The depth of each encoder block.
hidden_sizes (`list`, *optional*, defaults to `[64, 128, 320, 512]`):
The hidden sizes of each encoder block.
patch_sizes (`list`, *optional*, defaults to `[7, 3, 3, 3]`):
The size of the input patch for each encoder block.
strides (`list`, *optional*, defaults to `[4, 2, 2, 2]`):
The stride of the input patch for each encoder block.
padding (`list`, *optional*, defaults to `[2, 1, 1, 1]`):
The padding of the input patch for each encoder block.
num_encoder_blocks (`int`, *optional*, defaults to 4):
The number of encoder blocks.
drop_path_rate (`float`, *optional*, defaults to 0.0):
The dropout rate for the dropout layers.
hidden_act (`str`, *optional*, defaults to `"gelu"`):
The activation function for the hidden layers.
use_layer_scale (`bool`, *optional*, defaults to `True`):
Whether to use layer scale.
layer_scale_init_value (`float`, *optional*, defaults to 1e-05):
The initial value for the layer scale.
initializer_range (`float`, *optional*, defaults to 0.02):
The initializer range for the weights.
Example:
```python
>>> from transformers import PoolFormerConfig, PoolFormerModel
>>> # Initializing a PoolFormer sail/poolformer_s12 style configuration
>>> configuration = PoolFormerConfig()
>>> # Initializing a model (with random weights) from the sail/poolformer_s12 style configuration
>>> model = PoolFormerModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```
"""
model_type = "poolformer"
def __init__(
self,
num_channels=3,
patch_size=16,
stride=16,
pool_size=3,
mlp_ratio=4.0,
depths=[2, 2, 6, 2],
hidden_sizes=[64, 128, 320, 512],
patch_sizes=[7, 3, 3, 3],
strides=[4, 2, 2, 2],
padding=[2, 1, 1, 1],
num_encoder_blocks=4,
drop_path_rate=0.0,
hidden_act="gelu",
use_layer_scale=True,
layer_scale_init_value=1e-5,
initializer_range=0.02,
**kwargs,
):
self.num_channels = num_channels
self.patch_size = patch_size
self.stride = stride
self.padding = padding
self.pool_size = pool_size
self.hidden_sizes = hidden_sizes
self.mlp_ratio = mlp_ratio
self.depths = depths
self.patch_sizes = patch_sizes
self.strides = strides
self.num_encoder_blocks = num_encoder_blocks
self.drop_path_rate = drop_path_rate
self.hidden_act = hidden_act
self.use_layer_scale = use_layer_scale
self.layer_scale_init_value = layer_scale_init_value
self.initializer_range = initializer_range
super().__init__(**kwargs)
__all__ = ["PoolFormerConfig"]
| PoolFormerConfig |
python | tensorflow__tensorflow | tensorflow/python/distribute/experimental/mirrored_strategy_test.py | {
"start": 19762,
"end": 20430
} | class ____(test_util.DTensorBaseTest):
def setUp(self):
super().setUp()
global_ids = test_util.create_device_ids_array((2, 1))
local_ids = np.ravel(global_ids).tolist()
mesh_dict = {
device: layout.Mesh(['batch', 'model'], global_ids, local_ids,
test_util.create_device_list((2,), device))
for device in ['TPU', 'GPU', 'CPU']
}
self.mesh_2d = self.configTestMesh(mesh_dict)
def test_invalid_mesh_shape(self):
with self.assertRaisesRegex(
ValueError, 'The mesh for MirroredStrategy must be 1D, received: 2D'):
mirrored_strategy.MirroredStrategy(mesh=self.mesh_2d)
| InvalidMeshTest |
python | getsentry__sentry | src/sentry/models/dashboard_widget.py | {
"start": 7238,
"end": 10788
} | class ____(Model):
"""
Tracks on_demand state and values for dashboard widget queries.
Only a subset of dashboard widget queries have conditions or columns that would
require on-demand extraction, and others are simply not applicable (eg. different dataset).
"""
__relocation_scope__ = RelocationScope.Organization
dashboard_widget_query = FlexibleForeignKey("sentry.DashboardWidgetQuery")
spec_hashes = ArrayField(models.TextField(), default=list)
class OnDemandExtractionState(models.TextChoices):
DISABLED_NOT_APPLICABLE = "disabled:not-applicable", gettext_lazy("disabled:not-applicable")
""" This widget does not have on-demand metrics needing extraction. """
DISABLED_PREROLLOUT = "disabled:pre-rollout", gettext_lazy("disabled:pre-rollout")
""" This represents a pre-filled on-demand value to do load estimates before enabling extraction. """
DISABLED_MANUAL = "disabled:manual", gettext_lazy("disabled:manual")
""" The widget was manually disabled by a user """
DISABLED_SPEC_LIMIT = "disabled:spec-limit", gettext_lazy("disabled:spec-limit")
""" This widget query was disabled during rollout due to the organization reaching it's spec limit. """
DISABLED_HIGH_CARDINALITY = "disabled:high-cardinality", gettext_lazy(
"disabled:high-cardinality"
)
""" This widget query was disabled by the cardinality cron due to one of the columns having high cardinality """
ENABLED_ENROLLED = "enabled:enrolled", gettext_lazy("enabled:enrolled")
""" This widget query was enabled automatically during rollout for automatic support for users migrating from AM1. """
ENABLED_CREATION = "enabled:creation", gettext_lazy("enabled:creation")
""" This widget query was opted into on-demand during creation. """
ENABLED_MANUAL = "enabled:manual", gettext_lazy("enabled:manual")
""" This widget query was enabled manually post creation or otherwise. """
spec_version = models.IntegerField(null=True)
extraction_state = models.CharField(max_length=30, choices=OnDemandExtractionState.choices)
date_modified = models.DateTimeField(default=timezone.now)
date_added = models.DateTimeField(default=timezone.now, db_default=Now())
def can_extraction_be_auto_overridden(self):
"""Determines whether tasks can override extraction state"""
if self.extraction_state == self.OnDemandExtractionState.DISABLED_MANUAL:
# Manually disabling a widget will cause it to stay off until manually re-enabled.
return False
if self.extraction_state == self.OnDemandExtractionState.DISABLED_HIGH_CARDINALITY:
# High cardinality should remain off until manually re-enabled.
return False
if self.extraction_state == self.OnDemandExtractionState.DISABLED_SPEC_LIMIT:
# Spec limits also can only be re-enabled manually.
return False
return True
def extraction_enabled(self):
"""Whether on-demand is enabled or disabled for this widget.
If this is enabled, Relay should be extracting metrics from events matching the associated widget_query upon ingest.
"""
return self.extraction_state.startswith(ON_DEMAND_ENABLED_KEY)
class Meta:
app_label = "sentry"
db_table = "sentry_dashboardwidgetqueryondemand"
__repr__ = sane_repr("extraction_state", "spec_hashes")
@region_silo_model
| DashboardWidgetQueryOnDemand |
python | pypa__warehouse | tests/unit/admin/views/test_organization_applications.py | {
"start": 9874,
"end": 16357
} | class ____:
@pytest.mark.usefixtures("_enable_organizations")
def test_detail(self, db_request):
organization_application = OrganizationApplicationFactory.create()
db_request.matchdict["organization_application_id"] = (
organization_application.id
)
result = views.organization_application_detail(db_request)
assert result["user"] == organization_application.submitted_by
assert result["form"].name.data == organization_application.name
assert result["conflicting_applications"] == []
assert result["organization_application"] == organization_application
@pytest.mark.usefixtures("_enable_organizations")
def test_detail_edit(self, db_request):
organization_application = OrganizationApplicationFactory.create()
db_request.matchdict["organization_application_id"] = (
organization_application.id
)
new_org_name = f"New-Org-Name-{organization_application.name}"
db_request.method = "POST"
db_request.POST["name"] = new_org_name
db_request.POST["description"] = organization_application.description
db_request.POST["display_name"] = organization_application.display_name
db_request.POST["link_url"] = organization_application.link_url
db_request.POST["orgtype"] = organization_application.orgtype
db_request.POST = MultiDict(db_request.POST)
db_request.session.flash = pretend.call_recorder(lambda *a, **kw: None)
db_request.current_route_path = lambda *a, **kw: "/the/url/"
result = views.organization_application_detail(db_request)
assert result.status_code == 303
assert result.location == "/the/url/"
assert db_request.session.flash.calls == [
pretend.call(
f"Application for {organization_application.name!r} updated",
queue="success",
)
]
assert organization_application.name == new_org_name
@pytest.mark.usefixtures("_enable_organizations")
def test_detail_edit_invalid(self, db_request):
existing_organization = OrganizationFactory.create()
organization_application = OrganizationApplicationFactory.create()
db_request.matchdict["organization_application_id"] = (
organization_application.id
)
db_request.method = "POST"
db_request.POST["name"] = existing_organization.name
db_request.POST = MultiDict(db_request.POST)
result = views.organization_application_detail(db_request)
assert result["user"] == organization_application.submitted_by
assert result["form"].name.data == existing_organization.name
assert result["form"].name.errors != []
assert result["conflicting_applications"] == []
assert result["organization_application"] == organization_application
@pytest.mark.usefixtures("_enable_organizations")
def test_detail_is_approved_true(self, db_request):
organization_application = OrganizationApplicationFactory.create(
status=OrganizationApplicationStatus.Approved
)
db_request.matchdict["organization_application_id"] = (
organization_application.id
)
result = views.organization_application_detail(db_request)
assert result["user"] == organization_application.submitted_by
assert result["form"].name.data == organization_application.name
assert result["conflicting_applications"] == []
assert result["organization_application"] == organization_application
@pytest.mark.usefixtures("_enable_organizations")
def test_detail_is_approved_false(self, db_request):
organization_application = OrganizationApplicationFactory.create(
status=OrganizationApplicationStatus.Declined
)
db_request.matchdict["organization_application_id"] = (
organization_application.id
)
result = views.organization_application_detail(db_request)
assert result["user"] == organization_application.submitted_by
assert result["form"].name.data == organization_application.name
assert result["conflicting_applications"] == []
assert result["organization_application"] == organization_application
@pytest.mark.usefixtures("_enable_organizations")
@pytest.mark.parametrize(
("name", "conflicts", "conflicting_prefixes", "not_conflicting"),
[
(
"pypi",
["PyPI", "pypi"],
["pypi-common", "PyPi_rocks", "pypi-team-garbage"],
["py-pi"],
),
("py-pi", ["Py-PI", "PY-PI"], ["py", "py-pi_dot-com"], ["pypi"]),
],
)
def test_detail_conflicting_applications(
self, db_request, name, conflicts, conflicting_prefixes, not_conflicting
):
organization_application = OrganizationApplicationFactory.create(
name=name, status=OrganizationApplicationStatus.Declined
)
conflicting_applications = sorted(
[
OrganizationApplicationFactory.create(name=conflict)
for conflict in conflicts + conflicting_prefixes
],
key=lambda o: o.submitted,
)
[OrganizationApplicationFactory.create(name=name) for name in not_conflicting]
db_request.matchdict["organization_application_id"] = (
organization_application.id
)
result = views.organization_application_detail(db_request)
assert result["user"] == organization_application.submitted_by
assert result["form"].name.data == organization_application.name
assert set(result["conflicting_applications"]) == set(conflicting_applications)
assert result["organization_application"] == organization_application
@pytest.mark.usefixtures("_enable_organizations")
def test_detail_not_found(self):
organization_service = pretend.stub(
get_organization_application=lambda *a, **kw: None,
)
request = pretend.stub(
flags=pretend.stub(enabled=lambda *a: False),
find_service=lambda *a, **kw: organization_service,
matchdict={"organization_application_id": pretend.stub()},
)
with pytest.raises(HTTPNotFound):
views.organization_application_detail(request)
| TestOrganizationApplicationDetail |
python | numba__numba | numba/cuda/cudadrv/driver.py | {
"start": 39326,
"end": 56018
} | class ____(object):
"""
This object wraps a CUDA Context resource.
Contexts should not be constructed directly by user code.
"""
def __init__(self, device, handle):
self.device = device
self.handle = handle
self.allocations = utils.UniqueDict()
self.deallocations = _PendingDeallocs()
_ensure_memory_manager()
self.memory_manager = _memory_manager(context=self)
self.modules = utils.UniqueDict()
# For storing context specific data
self.extras = {}
def reset(self):
"""
Clean up all owned resources in this context.
"""
# Free owned resources
_logger.info('reset context of device %s', self.device.id)
self.memory_manager.reset()
self.modules.clear()
# Clear trash
self.deallocations.clear()
def get_memory_info(self):
"""Returns (free, total) memory in bytes in the context.
"""
return self.memory_manager.get_memory_info()
def get_active_blocks_per_multiprocessor(self, func, blocksize, memsize,
flags=None):
"""Return occupancy of a function.
:param func: kernel for which occupancy is calculated
:param blocksize: block size the kernel is intended to be launched with
:param memsize: per-block dynamic shared memory usage intended, in bytes
"""
args = (func, blocksize, memsize, flags)
if USE_NV_BINDING:
return self._cuda_python_active_blocks_per_multiprocessor(*args)
else:
return self._ctypes_active_blocks_per_multiprocessor(*args)
def _cuda_python_active_blocks_per_multiprocessor(self, func, blocksize,
memsize, flags):
ps = [func.handle, blocksize, memsize]
if not flags:
return driver.cuOccupancyMaxActiveBlocksPerMultiprocessor(*ps)
ps.append(flags)
return driver.cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(*ps)
def _ctypes_active_blocks_per_multiprocessor(self, func, blocksize,
memsize, flags):
retval = c_int()
args = (byref(retval), func.handle, blocksize, memsize)
if not flags:
driver.cuOccupancyMaxActiveBlocksPerMultiprocessor(*args)
else:
driver.cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(*args)
return retval.value
def get_max_potential_block_size(self, func, b2d_func, memsize,
blocksizelimit, flags=None):
"""Suggest a launch configuration with reasonable occupancy.
:param func: kernel for which occupancy is calculated
:param b2d_func: function that calculates how much per-block dynamic
shared memory 'func' uses based on the block size.
Can also be the address of a C function.
Use `0` to pass `NULL` to the underlying CUDA API.
:param memsize: per-block dynamic shared memory usage intended, in bytes
:param blocksizelimit: maximum block size the kernel is designed to
handle
"""
args = (func, b2d_func, memsize, blocksizelimit, flags)
if USE_NV_BINDING:
return self._cuda_python_max_potential_block_size(*args)
else:
return self._ctypes_max_potential_block_size(*args)
def _ctypes_max_potential_block_size(self, func, b2d_func, memsize,
blocksizelimit, flags):
gridsize = c_int()
blocksize = c_int()
b2d_cb = cu_occupancy_b2d_size(b2d_func)
args = [byref(gridsize), byref(blocksize), func.handle, b2d_cb,
memsize, blocksizelimit]
if not flags:
driver.cuOccupancyMaxPotentialBlockSize(*args)
else:
args.append(flags)
driver.cuOccupancyMaxPotentialBlockSizeWithFlags(*args)
return (gridsize.value, blocksize.value)
def _cuda_python_max_potential_block_size(self, func, b2d_func, memsize,
blocksizelimit, flags):
b2d_cb = ctypes.CFUNCTYPE(c_size_t, c_int)(b2d_func)
ptr = int.from_bytes(b2d_cb, byteorder='little')
driver_b2d_cb = binding.CUoccupancyB2DSize(ptr)
args = [func.handle, driver_b2d_cb, memsize, blocksizelimit]
if not flags:
return driver.cuOccupancyMaxPotentialBlockSize(*args)
else:
args.append(flags)
return driver.cuOccupancyMaxPotentialBlockSizeWithFlags(*args)
def prepare_for_use(self):
"""Initialize the context for use.
It's safe to be called multiple times.
"""
self.memory_manager.initialize()
def push(self):
"""
Pushes this context on the current CPU Thread.
"""
driver.cuCtxPushCurrent(self.handle)
self.prepare_for_use()
def pop(self):
"""
Pops this context off the current CPU thread. Note that this context
must be at the top of the context stack, otherwise an error will occur.
"""
popped = driver.pop_active_context()
if USE_NV_BINDING:
assert int(popped) == int(self.handle)
else:
assert popped.value == self.handle.value
def memalloc(self, bytesize):
return self.memory_manager.memalloc(bytesize)
def memallocmanaged(self, bytesize, attach_global=True):
return self.memory_manager.memallocmanaged(bytesize, attach_global)
def memhostalloc(self, bytesize, mapped=False, portable=False, wc=False):
return self.memory_manager.memhostalloc(bytesize, mapped, portable, wc)
def mempin(self, owner, pointer, size, mapped=False):
if mapped and not self.device.CAN_MAP_HOST_MEMORY:
raise CudaDriverError("%s cannot map host memory" % self.device)
return self.memory_manager.mempin(owner, pointer, size, mapped)
def get_ipc_handle(self, memory):
"""
Returns an *IpcHandle* from a GPU allocation.
"""
if not SUPPORTS_IPC:
raise OSError('OS does not support CUDA IPC')
return self.memory_manager.get_ipc_handle(memory)
def open_ipc_handle(self, handle, size):
# open the IPC handle to get the device pointer
flags = 1 # CU_IPC_MEM_LAZY_ENABLE_PEER_ACCESS
if USE_NV_BINDING:
dptr = driver.cuIpcOpenMemHandle(handle, flags)
else:
dptr = drvapi.cu_device_ptr()
driver.cuIpcOpenMemHandle(byref(dptr), handle, flags)
# wrap it
return MemoryPointer(context=weakref.proxy(self), pointer=dptr,
size=size)
def enable_peer_access(self, peer_context, flags=0):
"""Enable peer access between the current context and the peer context
"""
assert flags == 0, '*flags* is reserved and MUST be zero'
driver.cuCtxEnablePeerAccess(peer_context, flags)
def can_access_peer(self, peer_device):
"""Returns a bool indicating whether the peer access between the
current and peer device is possible.
"""
if USE_NV_BINDING:
peer_device = binding.CUdevice(peer_device)
can_access_peer = driver.cuDeviceCanAccessPeer(self.device.id,
peer_device)
else:
can_access_peer = c_int()
driver.cuDeviceCanAccessPeer(byref(can_access_peer),
self.device.id, peer_device,)
return bool(can_access_peer)
def create_module_ptx(self, ptx):
if isinstance(ptx, str):
ptx = ptx.encode('utf8')
if USE_NV_BINDING:
image = ptx
else:
image = c_char_p(ptx)
return self.create_module_image(image)
def create_module_image(self, image):
module = load_module_image(self, image)
if USE_NV_BINDING:
key = module.handle
else:
key = module.handle.value
self.modules[key] = module
return weakref.proxy(module)
def unload_module(self, module):
if USE_NV_BINDING:
key = module.handle
else:
key = module.handle.value
del self.modules[key]
def get_default_stream(self):
if USE_NV_BINDING:
handle = binding.CUstream(CU_STREAM_DEFAULT)
else:
handle = drvapi.cu_stream(drvapi.CU_STREAM_DEFAULT)
return Stream(weakref.proxy(self), handle, None)
def get_legacy_default_stream(self):
if USE_NV_BINDING:
handle = binding.CUstream(binding.CU_STREAM_LEGACY)
else:
handle = drvapi.cu_stream(drvapi.CU_STREAM_LEGACY)
return Stream(weakref.proxy(self), handle, None)
def get_per_thread_default_stream(self):
if USE_NV_BINDING:
handle = binding.CUstream(binding.CU_STREAM_PER_THREAD)
else:
handle = drvapi.cu_stream(drvapi.CU_STREAM_PER_THREAD)
return Stream(weakref.proxy(self), handle, None)
def create_stream(self):
if USE_NV_BINDING:
# The default stream creation flag, specifying that the created
# stream synchronizes with stream 0 (this is different from the
# default stream, which we define also as CU_STREAM_DEFAULT when
# the NV binding is in use).
flags = binding.CUstream_flags.CU_STREAM_DEFAULT.value
handle = driver.cuStreamCreate(flags)
else:
handle = drvapi.cu_stream()
driver.cuStreamCreate(byref(handle), 0)
return Stream(weakref.proxy(self), handle,
_stream_finalizer(self.deallocations, handle))
def create_external_stream(self, ptr):
if not isinstance(ptr, int):
raise TypeError("ptr for external stream must be an int")
if USE_NV_BINDING:
handle = binding.CUstream(ptr)
else:
handle = drvapi.cu_stream(ptr)
return Stream(weakref.proxy(self), handle, None,
external=True)
def create_event(self, timing=True):
flags = 0
if not timing:
flags |= enums.CU_EVENT_DISABLE_TIMING
if USE_NV_BINDING:
handle = driver.cuEventCreate(flags)
else:
handle = drvapi.cu_event()
driver.cuEventCreate(byref(handle), flags)
return Event(weakref.proxy(self), handle,
finalizer=_event_finalizer(self.deallocations, handle))
def synchronize(self):
driver.cuCtxSynchronize()
@contextlib.contextmanager
def defer_cleanup(self):
with self.memory_manager.defer_cleanup():
with self.deallocations.disable():
yield
def __repr__(self):
return "<CUDA context %s of device %d>" % (self.handle, self.device.id)
def __eq__(self, other):
if isinstance(other, Context):
return self.handle == other.handle
else:
return NotImplemented
def __ne__(self, other):
return not self.__eq__(other)
def load_module_image(context, image):
"""
image must be a pointer
"""
if USE_NV_BINDING:
return load_module_image_cuda_python(context, image)
else:
return load_module_image_ctypes(context, image)
def load_module_image_ctypes(context, image):
logsz = config.CUDA_LOG_SIZE
jitinfo = (c_char * logsz)()
jiterrors = (c_char * logsz)()
options = {
enums.CU_JIT_INFO_LOG_BUFFER: addressof(jitinfo),
enums.CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES: c_void_p(logsz),
enums.CU_JIT_ERROR_LOG_BUFFER: addressof(jiterrors),
enums.CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES: c_void_p(logsz),
enums.CU_JIT_LOG_VERBOSE: c_void_p(config.CUDA_VERBOSE_JIT_LOG),
}
option_keys = (drvapi.cu_jit_option * len(options))(*options.keys())
option_vals = (c_void_p * len(options))(*options.values())
handle = drvapi.cu_module()
try:
driver.cuModuleLoadDataEx(byref(handle), image, len(options),
option_keys, option_vals)
except CudaAPIError as e:
msg = "cuModuleLoadDataEx error:\n%s" % jiterrors.value.decode("utf8")
raise CudaAPIError(e.code, msg)
info_log = jitinfo.value
return CtypesModule(weakref.proxy(context), handle, info_log,
_module_finalizer(context, handle))
def load_module_image_cuda_python(context, image):
"""
image must be a pointer
"""
logsz = config.CUDA_LOG_SIZE
jitinfo = bytearray(logsz)
jiterrors = bytearray(logsz)
jit_option = binding.CUjit_option
options = {
jit_option.CU_JIT_INFO_LOG_BUFFER: jitinfo,
jit_option.CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES: logsz,
jit_option.CU_JIT_ERROR_LOG_BUFFER: jiterrors,
jit_option.CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES: logsz,
jit_option.CU_JIT_LOG_VERBOSE: config.CUDA_VERBOSE_JIT_LOG,
}
option_keys = [k for k in options.keys()]
option_vals = [v for v in options.values()]
try:
handle = driver.cuModuleLoadDataEx(image, len(options), option_keys,
option_vals)
except CudaAPIError as e:
err_string = jiterrors.decode('utf-8')
msg = "cuModuleLoadDataEx error:\n%s" % err_string
raise CudaAPIError(e.code, msg)
info_log = jitinfo.decode('utf-8')
return CudaPythonModule(weakref.proxy(context), handle, info_log,
_module_finalizer(context, handle))
def _alloc_finalizer(memory_manager, ptr, alloc_key, size):
allocations = memory_manager.allocations
deallocations = memory_manager.deallocations
def core():
if allocations:
del allocations[alloc_key]
deallocations.add_item(driver.cuMemFree, ptr, size)
return core
def _hostalloc_finalizer(memory_manager, ptr, alloc_key, size, mapped):
"""
Finalize page-locked host memory allocated by `context.memhostalloc`.
This memory is managed by CUDA, and finalization entails deallocation. The
issues noted in `_pin_finalizer` are not relevant in this case, and the
finalization is placed in the `context.deallocations` queue along with
finalization of device objects.
"""
allocations = memory_manager.allocations
deallocations = memory_manager.deallocations
if not mapped:
size = _SizeNotSet
def core():
if mapped and allocations:
del allocations[alloc_key]
deallocations.add_item(driver.cuMemFreeHost, ptr, size)
return core
def _pin_finalizer(memory_manager, ptr, alloc_key, mapped):
"""
Finalize temporary page-locking of host memory by `context.mempin`.
This applies to memory not otherwise managed by CUDA. Page-locking can
be requested multiple times on the same memory, and must therefore be
lifted as soon as finalization is requested, otherwise subsequent calls to
`mempin` may fail with `CUDA_ERROR_HOST_MEMORY_ALREADY_REGISTERED`, leading
to unexpected behavior for the context managers `cuda.{pinned,mapped}`.
This function therefore carries out finalization immediately, bypassing the
`context.deallocations` queue.
"""
allocations = memory_manager.allocations
def core():
if mapped and allocations:
del allocations[alloc_key]
driver.cuMemHostUnregister(ptr)
return core
def _event_finalizer(deallocs, handle):
def core():
deallocs.add_item(driver.cuEventDestroy, handle)
return core
def _stream_finalizer(deallocs, handle):
def core():
deallocs.add_item(driver.cuStreamDestroy, handle)
return core
def _module_finalizer(context, handle):
dealloc = context.deallocations
modules = context.modules
if USE_NV_BINDING:
key = handle
else:
key = handle.value
def core():
shutting_down = utils.shutting_down # early bind
def module_unload(handle):
# If we are not shutting down, we must be called due to
# Context.reset() of Context.unload_module(). Both must have
# cleared the module reference from the context.
assert shutting_down() or key not in modules
driver.cuModuleUnload(handle)
dealloc.add_item(module_unload, handle)
return core
| Context |
python | huggingface__transformers | examples/modular-transformers/modeling_roberta.py | {
"start": 16417,
"end": 19175
} | class ____(GradientCheckpointingLayer):
def __init__(self, config, layer_idx=None):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = RobertaAttention(config, is_causal=config.is_decoder, layer_idx=layer_idx)
self.is_decoder = config.is_decoder
self.add_cross_attention = config.add_cross_attention
if self.add_cross_attention:
if not self.is_decoder:
raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
self.crossattention = RobertaAttention(
config,
is_causal=False,
layer_idx=layer_idx,
is_cross_attention=True,
)
self.intermediate = RobertaIntermediate(config)
self.output = RobertaOutput(config)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
past_key_value: Optional[Cache] = None,
cache_position: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor]:
self_attention_output, _ = self.attention(
hidden_states,
attention_mask,
past_key_value=past_key_value,
cache_position=cache_position,
**kwargs,
)
attention_output = self_attention_output
if self.is_decoder and encoder_hidden_states is not None:
if not hasattr(self, "crossattention"):
raise ValueError(
f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
" by setting `config.add_cross_attention=True`"
)
cross_attention_output, _ = self.crossattention(
self_attention_output,
None, # attention_mask
encoder_hidden_states,
encoder_attention_mask,
past_key_value=past_key_value,
**kwargs,
)
attention_output = cross_attention_output
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
)
return layer_output
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
| RobertaLayer |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/cloud_run.py | {
"start": 19523,
"end": 22313
} | class ____(GoogleCloudBaseOperator):
"""
Deletes a Service without executing it. Pushes the deleted service to xcom.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param service_name: Required. The name of the service to create.
:param gcp_conn_id: The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = ("project_id", "region", "gcp_conn_id", "impersonation_chain", "service_name")
def __init__(
self,
project_id: str,
region: str,
service_name: str,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
):
super().__init__(**kwargs)
self.project_id = project_id
self.region = region
self.service_name = service_name
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self._validate_inputs()
def _validate_inputs(self):
missing_fields = [k for k in ["project_id", "region", "service_name"] if not getattr(self, k)]
if not self.project_id or not self.region or not self.service_name:
raise AirflowException(
f"Required parameters are missing: {missing_fields}. These parameters be passed either as "
"keyword parameter or as extra field in Airflow connection definition. Both are not set!"
)
def execute(self, context: Context):
hook: CloudRunServiceHook = CloudRunServiceHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
try:
service = hook.delete_service(
service_name=self.service_name,
region=self.region,
project_id=self.project_id,
)
except google.cloud.exceptions.NotFound as e:
self.log.error("An error occurred. Not Found.")
raise e
return Service.to_dict(service)
| CloudRunDeleteServiceOperator |
python | run-llama__llama_index | llama-index-core/llama_index/core/query_engine/pandas/output_parser.py | {
"start": 126,
"end": 773
} | class ____:
"""
Pandas instruction parser.
DEPRECATED: This class has been moved to `llama-index-experimental`.
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
raise DeprecationWarning(
"PandasInstructionParser has been moved to `llama-index-experimental`.\n"
"`pip install llama-index-experimental`\n"
"`from llama_index.experimental.query_engine.pandas import PandasInstructionParser`\n"
"Note that the PandasInstructionParser allows for arbitrary code execution, \n"
"and should be used in a secure environment."
)
| PandasInstructionParser |
python | crytic__slither | slither/slithir/operations/init_array.py | {
"start": 302,
"end": 1476
} | class ____(OperationWithLValue):
def __init__(
self, init_values: List[RVALUE], lvalue: Union[TemporaryVariableSSA, TemporaryVariable]
) -> None:
# init_values can be an array of n dimension
# reduce was removed in py3
super().__init__()
def reduce(xs):
result = True
for i in xs:
result = result and i
return result
def check(elem):
if isinstance(elem, (list,)):
return reduce(elem)
return is_valid_rvalue(elem)
assert check(init_values)
self._init_values = init_values
self._lvalue = lvalue
@property
def read(self) -> List[RVALUE]:
return self._unroll(self.init_values)
@property
def init_values(self) -> List[RVALUE]:
return list(self._init_values)
def __str__(self):
def convert(elem):
if isinstance(elem, (list,)):
return str([convert(x) for x in elem])
return f"{elem}({elem.type})"
init_values = convert(self.init_values)
return f"{self.lvalue}({self.lvalue.type}) = {init_values}"
| InitArray |
python | django-extensions__django-extensions | tests/test_runscript.py | {
"start": 6771,
"end": 10981
} | class ____(RunScriptTests):
def setUp(self):
super().setUp()
self.curwd = os.getcwd()
os.chdir(project_path)
def tearDown(self):
super().setUp()
os.chdir(self.curwd)
def _execute_script_with_chdir(
self, dir_policy, start_path, expected_path, chdir=None
):
os.chdir(os.path.join(project_path, *start_path))
expected_path = os.path.join(project_path, *expected_path)
call_command(
"runscript", "directory_checker_script", dir_policy=dir_policy, chdir=chdir
)
output = sys.stdout.getvalue().split("Script called from: ")[1]
self.assertEqual(output, expected_path + "\n")
def test_none_policy_command_run(self):
self._execute_script_with_chdir(DirPolicyChoices.NONE, [], [])
def test_none_policy_command_run_with_chdir(self):
self._execute_script_with_chdir(DirPolicyChoices.NONE, ["tests"], ["tests"])
def test_none_policy_freezing_start_directory(self):
self._execute_script_with_chdir(DirPolicyChoices.NONE, ["tests"], ["tests"])
self._execute_script_with_chdir(DirPolicyChoices.NONE, ["tests"], ["tests"])
def test_root_policy_command_run(self):
self._execute_script_with_chdir(DirPolicyChoices.ROOT, ["tests"], [])
def test_each_policy_command_run(self):
os.chdir(os.path.join(project_path, "tests"))
call_command(
"runscript",
"directory_checker_script",
"other_directory_checker_script",
dir_policy=DirPolicyChoices.EACH,
)
output = sys.stdout.getvalue()
first_output = output.split("Script called from: ")[1].split(
"Cannot import module "
)[0]
self.assertEqual(
first_output,
os.path.join(project_path, "tests", "testapp", "scripts") + "\n",
)
second_output = output.split("Script called from: ")[2].split(
"Cannot import module "
)[0]
self.assertEqual(
second_output,
os.path.join(
project_path, "tests", "testapp_with_no_models_file", "scripts"
)
+ "\n",
)
def test_chdir_specified(self):
execution_path = os.path.join(project_path, "django_extensions", "management")
self._execute_script_with_chdir(
DirPolicyChoices.ROOT,
["tests"],
["django_extensions", "management"],
chdir=execution_path,
)
@override_settings(RUNSCRIPT_CHDIR=os.path.join(project_path, "tests"))
def test_policy_from_cli_and_chdir_from_settings(self):
self._execute_script_with_chdir(DirPolicyChoices.ROOT, ["tests"], [])
@override_settings(
RUNSCRIPT_CHDIR=os.path.join(project_path, "django_extensions", "management"),
RUNSCRIPT_CHDIR_POLICY=DirPolicyChoices.ROOT,
)
def test_chdir_from_settings_and_policy_from_settings(self):
self._execute_script_with_chdir(
None, ["tests"], ["django_extensions", "management"]
)
@override_settings(RUNSCRIPT_CHDIR_POLICY=DirPolicyChoices.EACH)
def test_policy_from_settings(self):
self._execute_script_with_chdir(
None, ["tests"], ["tests", "testapp", "scripts"]
)
@override_settings(RUNSCRIPT_CHDIR=os.path.join(project_path, "tests"))
def test_chdir_django_settings(self):
self._execute_script_with_chdir(None, [], ["tests"])
@override_settings(RUNSCRIPT_CHDIR="bad path")
def test_custom_policy_django_settings_bad_path(self):
with self.assertRaisesRegex(
BadCustomDirectoryException,
"bad path is not a directory! If --dir-policy is custom than you must set "
"correct directory in --dir option or in settings.RUNSCRIPT_CHDIR",
):
self._execute_script_with_chdir(None, [], ["tests"])
def test_skip_printing_modules_which_does_not_exist(self):
call_command("runscript", "directory_checker_script")
self.assertNotIn("No module named", sys.stdout.getvalue())
self.assertNotIn("No module named", sys.stderr.getvalue())
| ChangingDirectoryTests |
python | sphinx-doc__sphinx | tests/test_builders/test_build_linkcheck.py | {
"start": 45747,
"end": 47662
} | class ____(BaseHTTPRequestHandler):
protocol_version = 'HTTP/1.1'
def do_HEAD(self) -> None:
self.close_connection = True
def do_GET(self) -> None:
self.send_response(200, 'OK')
self.send_header('Content-Length', '0')
self.end_headers()
@pytest.mark.sphinx(
'linkcheck',
testroot='linkcheck-localserver',
freshenv=True,
)
def test_get_after_head_raises_connection_error(app: SphinxTestApp) -> None:
with serve_application(app, ConnectionResetHandler) as address:
app.build()
content = (app.outdir / 'output.txt').read_text(encoding='utf8')
assert not content
content = (app.outdir / 'output.json').read_text(encoding='utf8')
assert json.loads(content) == {
'filename': 'index.rst',
'lineno': 1,
'status': 'working',
'code': 0,
'uri': f'http://{address}/',
'info': '',
}
@pytest.mark.sphinx(
'linkcheck',
testroot='linkcheck-documents_exclude',
freshenv=True,
)
def test_linkcheck_exclude_documents(app: SphinxTestApp) -> None:
with serve_application(app, DefaultsHandler):
app.build()
with open(app.outdir / 'output.json', encoding='utf-8') as fp:
content = [json.loads(record) for record in fp]
assert len(content) == 2
assert {
'filename': 'broken_link.rst',
'lineno': 4,
'status': 'ignored',
'code': 0,
'uri': 'https://www.sphinx-doc.org/this-is-a-broken-link',
'info': 'broken_link matched ^broken_link$ from linkcheck_exclude_documents',
} in content
assert {
'filename': 'br0ken_link.rst',
'lineno': 4,
'status': 'ignored',
'code': 0,
'uri': 'https://www.sphinx-doc.org/this-is-another-broken-link',
'info': 'br0ken_link matched br[0-9]ken_link from linkcheck_exclude_documents',
} in content
| ConnectionResetHandler |
python | tensorflow__tensorflow | tensorflow/python/ops/image_grad_test_base.py | {
"start": 13487,
"end": 16185
} | class ____(test.TestCase):
"""Tests scale and translate op."""
def testGrads(self):
in_shape = [1, 2, 3, 1]
out_shape = [1, 4, 6, 1]
x = np.arange(0, 6).reshape(in_shape).astype(np.float32)
kernel_types = [
'lanczos1', 'lanczos3', 'lanczos5', 'gaussian', 'box', 'triangle',
'keyscubic', 'mitchellcubic'
]
scales = [(1.0, 1.0), (0.37, 0.47), (2.1, 2.1)]
translations = [(0.0, 0.0), (3.14, 1.19), (2.1, 3.1), (100.0, 200.0)]
for scale in scales:
for translation in translations:
for kernel_type in kernel_types:
for antialias in [True, False]:
with self.cached_session():
input_tensor = constant_op.constant(x, shape=in_shape)
def scale_trans(input_tensor,
scale=scale,
translation=translation,
kernel_type=kernel_type,
antialias=antialias):
# pylint: disable=cell-var-from-loop
return image_ops.scale_and_translate(
input_tensor,
out_shape[1:3],
scale=constant_op.constant(scale),
translation=constant_op.constant(translation),
kernel_type=kernel_type,
antialias=antialias)
err = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(scale_trans,
[input_tensor]))
self.assertLess(err, 1e-3)
def testIdentityGrads(self):
"""Tests that Gradients for 1.0 scale should be ones for some kernels."""
in_shape = [1, 2, 3, 1]
out_shape = [1, 4, 6, 1]
x = np.arange(0, 6).reshape(in_shape).astype(np.float32)
kernel_types = ['lanczos1', 'lanczos3', 'lanczos5', 'triangle', 'keyscubic']
scale = (1.0, 1.0)
translation = (0.0, 0.0)
antialias = True
for kernel_type in kernel_types:
with self.cached_session():
input_tensor = constant_op.constant(x, shape=in_shape)
with backprop.GradientTape() as tape:
tape.watch(input_tensor)
scale_and_translate_out = image_ops.scale_and_translate(
input_tensor,
out_shape[1:3],
scale=constant_op.constant(scale),
translation=constant_op.constant(translation),
kernel_type=kernel_type,
antialias=antialias)
grad = tape.gradient(scale_and_translate_out, input_tensor)[0]
grad_v = self.evaluate(grad)
self.assertAllClose(np.ones_like(grad_v), grad_v)
| ScaleAndTranslateOpTestBase |
python | getsentry__sentry | src/sentry/integrations/api/serializers/rest_framework/data_forwarder.py | {
"start": 810,
"end": 876
} | class ____(TypedDict, total=False):
write_key: str
| SegmentConfig |
python | kamyu104__LeetCode-Solutions | Python/count-pairs-of-connectable-servers-in-a-weighted-tree-network.py | {
"start": 1999,
"end": 3081
} | class ____(object):
def countPairsOfConnectableServers(self, edges, signalSpeed):
"""
:type edges: List[List[int]]
:type signalSpeed: int
:rtype: List[int]
"""
def bfs(u, p, dist):
result = 0
q = [(u, p, dist)]
while q:
new_q = []
for u, p, dist in q:
if dist%signalSpeed == 0:
result += 1
for v, w in adj[u]:
if v == p:
continue
new_q.append((v, u, dist+w))
q = new_q
return result
adj = [[] for _ in xrange(len(edges)+1)]
for u, v, w in edges:
adj[u].append((v, w))
adj[v].append((u, w))
result = [0]*(len(edges)+1)
for u in xrange(len(result)):
curr = 0
for v, w in adj[u]:
cnt = bfs(v, u, w)
result[u] += curr*cnt
curr += cnt
return result
| Solution3 |
python | keras-team__keras | keras/src/layers/convolutional/separable_conv_test.py | {
"start": 6551,
"end": 11866
} | class ____(testing.TestCase):
@parameterized.parameters(
{
"depth_multiplier": 5,
"filters": 5,
"kernel_size": 2,
"strides": 1,
"padding": "valid",
"data_format": "channels_last",
"dilation_rate": 1,
},
{
"depth_multiplier": 6,
"filters": 6,
"kernel_size": 2,
"strides": 1,
"padding": "same",
"data_format": "channels_last",
"dilation_rate": (2,),
},
{
"depth_multiplier": 6,
"filters": 6,
"kernel_size": (2,),
"strides": (2,),
"padding": "valid",
"data_format": "channels_last",
"dilation_rate": 1,
},
)
def test_separable_conv1d(
self,
depth_multiplier,
filters,
kernel_size,
strides,
padding,
data_format,
dilation_rate,
):
layer = layers.SeparableConv1D(
depth_multiplier=depth_multiplier,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
)
inputs = np.random.normal(size=[2, 8, 4])
layer.build(input_shape=inputs.shape)
depthwise_kernel_shape = layer.depthwise_kernel.shape
depthwise_kernel_weights = np.random.normal(size=depthwise_kernel_shape)
layer.depthwise_kernel.assign(depthwise_kernel_weights)
pointwise_kernel_shape = layer.pointwise_kernel.shape
pointwise_kernel_weights = np.random.normal(size=pointwise_kernel_shape)
layer.pointwise_kernel.assign(pointwise_kernel_weights)
bias_weights = np.random.normal(size=(filters,))
layer.bias.assign(bias_weights)
outputs = layer(inputs)
expected_depthwise = np_depthwise_conv1d(
inputs,
depthwise_kernel_weights,
np.zeros(4 * depth_multiplier),
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
)
expected = np_conv1d(
expected_depthwise,
pointwise_kernel_weights,
bias_weights,
strides=1,
padding=padding,
data_format=data_format,
dilation_rate=1,
groups=1,
)
self.assertAllClose(outputs.shape, expected.shape)
self.assertAllClose(outputs, expected, rtol=1e-5, atol=1e-5)
@parameterized.parameters(
{
"depth_multiplier": 5,
"filters": 5,
"kernel_size": 2,
"strides": 1,
"padding": "valid",
"data_format": "channels_last",
"dilation_rate": 1,
},
{
"depth_multiplier": 6,
"filters": 6,
"kernel_size": 2,
"strides": 1,
"padding": "same",
"data_format": "channels_last",
"dilation_rate": (2, 2),
},
{
"depth_multiplier": 6,
"filters": 6,
"kernel_size": (2, 2),
"strides": (2, 2),
"padding": "valid",
"data_format": "channels_last",
"dilation_rate": (1, 1),
},
)
def test_separable_conv2d(
self,
depth_multiplier,
filters,
kernel_size,
strides,
padding,
data_format,
dilation_rate,
):
layer = layers.SeparableConv2D(
depth_multiplier=depth_multiplier,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
)
inputs = np.random.normal(size=[2, 8, 8, 4])
layer.build(input_shape=inputs.shape)
depthwise_kernel_shape = layer.depthwise_kernel.shape
depthwise_kernel_weights = np.random.normal(size=depthwise_kernel_shape)
layer.depthwise_kernel.assign(depthwise_kernel_weights)
pointwise_kernel_shape = layer.pointwise_kernel.shape
pointwise_kernel_weights = np.random.normal(size=pointwise_kernel_shape)
layer.pointwise_kernel.assign(pointwise_kernel_weights)
bias_weights = np.random.normal(size=(filters,))
layer.bias.assign(bias_weights)
outputs = layer(inputs)
expected_depthwise = np_depthwise_conv2d(
inputs,
depthwise_kernel_weights,
np.zeros(4 * depth_multiplier),
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
)
expected = np_conv2d(
expected_depthwise,
pointwise_kernel_weights,
bias_weights,
strides=1,
padding=padding,
data_format=data_format,
dilation_rate=1,
groups=1,
)
self.assertAllClose(outputs.shape, expected.shape)
self.assertAllClose(outputs, expected, rtol=1e-5, atol=1e-5)
| SeparableConvCorrectnessTest |
python | joke2k__faker | faker/providers/company/fr_FR/__init__.py | {
"start": 131,
"end": 22924
} | class ____(CompanyProvider):
formats = (
"{{last_name}} {{company_suffix}}",
"{{last_name}} {{last_name}} {{company_suffix}}",
"{{last_name}}",
"{{last_name}}",
)
catch_phrase_formats = ("{{catch_phrase_noun}} {{catch_phrase_verb}} {{catch_phrase_attribute}}",)
nouns = (
"la sécurité",
"le plaisir",
"le confort",
"la simplicité",
"l'assurance",
"l'art",
"le pouvoir",
"le droit",
"la possibilité",
"l'avantage",
"la liberté",
)
verbs = (
"de rouler",
"d'avancer",
"d'évoluer",
"de changer",
"d'innover",
"de louer",
"d'atteindre vos buts",
"de concrétiser vos projets",
)
attributes = (
"de manière efficace",
"plus rapidement",
"plus facilement",
"plus simplement",
"en toute tranquilité",
"avant-tout",
"autrement",
"naturellement",
"à la pointe",
"sans soucis",
"à l'état pur",
"à sa source",
"de manière sûre",
"en toute sécurité",
)
company_suffixes: Tuple[str, ...] = (
"SA",
"S.A.",
"SARL",
"S.A.R.L.",
"S.A.S.",
"et Fils",
)
siren_format = "### ### ###"
# Data from:
# https://www.insee.fr/fr/information/2120875
# fmt: off
ape_codes_naf_2003 = [
"01.11Z", "01.12Z", "01.13Z", "01.14Z", "01.15Z", "01.16Z", "01.19Z",
"01.21Z", "01.22Z", "01.23Z", "01.24Z", "01.25Z", "01.26Z", "01.27Z",
"01.28Z", "01.29Z", "01.30Z", "01.41Z", "01.42Z", "01.43Z", "01.44Z",
"01.45Z", "01.46Z", "01.47Z", "01.49Z", "01.50Z", "01.61Z", "01.62Z",
"01.63Z", "01.64Z", "01.70Z", "02.10Z", "02.20Z", "02.30Z", "02.40Z",
"03.11Z", "03.12Z", "03.21Z", "03.22Z", "05.10Z", "05.20Z", "06.10Z",
"06.20Z", "07.10Z", "07.21Z", "07.29Z", "08.11Z", "08.12Z", "08.91Z",
"08.92Z", "08.93Z", "08.99Z", "09.10Z", "09.90Z", "10.11Z", "10.12Z",
"10.13A", "10.13B", "10.20Z", "10.31Z", "10.32Z", "10.39A", "10.39B",
"10.41A", "10.41B", "10.42Z", "10.51A", "10.51B", "10.51C", "10.51D",
"10.52Z", "10.61A", "10.61B", "10.62Z", "10.71A", "10.71B", "10.71C",
"10.71D", "10.72Z", "10.73Z", "10.81Z", "10.82Z", "10.83Z", "10.84Z",
"10.85Z", "10.86Z", "10.89Z", "10.91Z", "10.92Z", "11.01Z", "11.02A",
"11.02B", "11.03Z", "11.04Z", "11.05Z", "11.06Z", "11.07A", "11.07B",
"12.00Z", "13.10Z", "13.20Z", "13.30Z", "13.91Z", "13.92Z", "13.93Z",
"13.94Z", "13.95Z", "13.96Z", "13.99Z", "14.11Z", "14.12Z", "14.13Z",
"14.14Z", "14.19Z", "14.20Z", "14.31Z", "14.39Z", "15.11Z", "15.12Z",
"15.20Z", "16.10A", "16.10B", "16.21Z", "16.22Z", "16.23Z", "16.24Z",
"16.29Z", "17.11Z", "17.12Z", "17.21A", "17.21B", "17.21C", "17.22Z",
"17.23Z", "17.24Z", "17.29Z", "18.11Z", "18.12Z", "18.13Z", "18.14Z",
"18.20Z", "19.10Z", "19.20Z", "20.11Z", "20.12Z", "20.13A", "20.13B",
"20.14Z", "20.15Z", "20.16Z", "20.17Z", "20.20Z", "20.30Z", "20.41Z",
"20.42Z", "20.51Z", "20.52Z", "20.53Z", "20.59Z", "20.60Z", "21.10Z",
"21.20Z", "22.11Z", "22.19Z", "22.21Z", "22.22Z", "22.23Z", "22.29A",
"22.29B", "23.11Z", "23.12Z", "23.13Z", "23.14Z", "23.19Z", "23.20Z",
"23.31Z", "23.32Z", "23.41Z", "23.42Z", "23.43Z", "23.44Z", "23.49Z",
"23.51Z", "23.52Z", "23.61Z", "23.62Z", "23.63Z", "23.64Z", "23.65Z",
"23.69Z", "23.70Z", "23.91Z", "23.99Z", "24.10Z", "24.20Z", "24.31Z",
"24.32Z", "24.33Z", "24.34Z", "24.41Z", "24.42Z", "24.43Z", "24.44Z",
"24.45Z", "24.46Z", "24.51Z", "24.52Z", "24.53Z", "24.54Z", "25.11Z",
"25.12Z", "25.21Z", "25.29Z", "25.30Z", "25.40Z", "25.50A", "25.50B",
"25.61Z", "25.62A", "25.62B", "25.71Z", "25.72Z", "25.73A", "25.73B",
"25.91Z", "25.92Z", "25.93Z", "25.94Z", "25.99A", "25.99B", "26.11Z",
"26.12Z", "26.20Z", "26.30Z", "26.40Z", "26.51A", "26.51B", "26.52Z",
"26.60Z", "26.70Z", "26.80Z", "27.11Z", "27.12Z", "27.20Z", "27.31Z",
"27.32Z", "27.33Z", "27.40Z", "27.51Z", "27.52Z", "27.90Z", "28.11Z",
"28.12Z", "28.13Z", "28.14Z", "28.15Z", "28.21Z", "28.22Z", "28.23Z",
"28.24Z", "28.25Z", "28.29A", "28.29B", "28.30Z", "28.41Z", "28.49Z",
"28.91Z", "28.92Z", "28.93Z", "28.94Z", "28.95Z", "28.96Z", "28.99A",
"28.99B", "29.10Z", "29.20Z", "29.31Z", "29.32Z", "30.11Z", "30.12Z",
"30.20Z", "30.30Z", "30.40Z", "30.91Z", "30.92Z", "30.99Z", "31.01Z",
"31.02Z", "31.03Z", "31.09A", "31.09B", "32.11Z", "32.12Z", "32.13Z",
"32.20Z", "32.30Z", "32.40Z", "32.50A", "32.50B", "32.91Z", "32.99Z",
"33.11Z", "33.12Z", "33.13Z", "33.14Z", "33.15Z", "33.16Z", "33.17Z",
"33.19Z", "33.20A", "33.20B", "33.20C", "33.20D", "35.11Z", "35.12Z",
"35.13Z", "35.14Z", "35.21Z", "35.22Z", "35.23Z", "35.30Z", "36.00Z",
"37.00Z", "38.11Z", "38.12Z", "38.21Z", "38.22Z", "38.31Z", "38.32Z",
"39.00Z", "41.10A", "41.10B", "41.10C", "41.10D", "41.20A", "41.20B",
"42.11Z", "42.12Z", "42.13A", "42.13B", "42.21Z", "42.22Z", "42.91Z",
"42.99Z", "43.11Z", "43.12A", "43.12B", "43.13Z", "43.21A", "43.21B",
"43.22A", "43.22B", "43.29A", "43.29B", "43.31Z", "43.32A", "43.32B",
"43.32C", "43.33Z", "43.34Z", "43.39Z", "43.91A", "43.91B", "43.99A",
"43.99B", "43.99C", "43.99D", "43.99E", "45.11Z", "45.19Z", "45.20A",
"45.20B", "45.31Z", "45.32Z", "45.40Z", "46.11Z", "46.12A", "46.12B",
"46.13Z", "46.14Z", "46.15Z", "46.16Z", "46.17A", "46.17B", "46.18Z",
"46.19A", "46.19B", "46.21Z", "46.22Z", "46.23Z", "46.24Z", "46.31Z",
"46.32A", "46.32B", "46.32C", "46.33Z", "46.34Z", "46.35Z", "46.36Z",
"46.37Z", "46.38A", "46.38B", "46.39A", "46.39B", "46.41Z", "46.42Z",
"46.43Z", "46.44Z", "46.45Z", "46.46Z", "46.47Z", "46.48Z", "46.49Z",
"46.51Z", "46.52Z", "46.61Z", "46.62Z", "46.63Z", "46.64Z", "46.65Z",
"46.66Z", "46.69A", "46.69B", "46.69C", "46.71Z", "46.72Z", "46.73A",
"46.73B", "46.74A", "46.74B", "46.75Z", "46.76Z", "46.77Z", "46.90Z",
"47.11A", "47.11B", "47.11C", "47.11D", "47.11E", "47.11F", "47.19A",
"47.19B", "47.21Z", "47.22Z", "47.23Z", "47.24Z", "47.25Z", "47.26Z",
"47.29Z", "47.30Z", "47.41Z", "47.42Z", "47.43Z", "47.51Z", "47.52A",
"47.52B", "47.53Z", "47.54Z", "47.59A", "47.59B", "47.61Z", "47.62Z",
"47.63Z", "47.64Z", "47.65Z", "47.71Z", "47.72A", "47.72B", "47.73Z",
"47.74Z", "47.75Z", "47.76Z", "47.77Z", "47.78A", "47.78B", "47.78C",
"47.79Z", "47.81Z", "47.82Z", "47.89Z", "47.91A", "47.91B", "47.99A",
"47.99B", "49.10Z", "49.20Z", "49.31Z", "49.32Z", "49.39A", "49.39B",
"49.39C", "49.41A", "49.41B", "49.41C", "49.42Z", "49.50Z", "50.10Z",
"50.20Z", "50.30Z", "50.40Z", "51.10Z", "51.21Z", "51.22Z", "52.10A",
"52.10B", "52.21Z", "52.22Z", "52.23Z", "52.24A", "52.24B", "52.29A",
"52.29B", "53.10Z", "53.20Z", "55.10Z", "55.20Z", "55.30Z", "55.90Z",
"56.10A", "56.10B", "56.10C", "56.21Z", "56.29A", "56.29B", "56.30Z",
"58.11Z", "58.12Z", "58.13Z", "58.14Z", "58.19Z", "58.21Z", "58.29A",
"58.29B", "58.29C", "59.11A", "59.11B", "59.11C", "59.12Z", "59.13A",
"59.13B", "59.14Z", "59.20Z", "60.10Z", "60.20A", "60.20B", "61.10Z",
"61.20Z", "61.30Z", "61.90Z", "62.01Z", "62.02A", "62.02B", "62.03Z",
"62.09Z", "63.11Z", "63.12Z", "63.91Z", "63.99Z", "64.11Z", "64.19Z",
"64.20Z", "64.30Z", "64.91Z", "64.92Z", "64.99Z", "65.11Z", "65.12Z",
"65.20Z", "65.30Z", "66.11Z", "66.12Z", "66.19A", "66.19B", "66.21Z",
"66.22Z", "66.29Z", "66.30Z", "68.10Z", "68.20A", "68.20B", "68.31Z",
"68.32A", "68.32B", "69.10Z", "69.20Z", "70.10Z", "70.21Z", "70.22Z",
"71.11Z", "71.12A", "71.12B", "71.20A", "71.20B", "72.11Z", "72.19Z",
"72.20Z", "73.11Z", "73.12Z", "73.20Z", "74.10Z", "74.20Z", "74.30Z",
"74.90A", "74.90B", "75.00Z", "77.11A", "77.11B", "77.12Z", "77.21Z",
"77.22Z", "77.29Z", "77.31Z", "77.32Z", "77.33Z", "77.34Z", "77.35Z",
"77.39Z", "77.40Z", "78.10Z", "78.20Z", "78.30Z", "79.11Z", "79.12Z",
"79.90Z", "80.10Z", "80.20Z", "80.30Z", "81.10Z", "81.21Z", "81.22Z",
"81.29A", "81.29B", "81.30Z", "82.11Z", "82.19Z", "82.20Z", "82.30Z",
"82.91Z", "82.92Z", "82.99Z", "84.11Z", "84.12Z", "84.13Z", "84.21Z",
"84.22Z", "84.23Z", "84.24Z", "84.25Z", "84.30A", "84.30B", "84.30C",
"85.10Z", "85.20Z", "85.31Z", "85.32Z", "85.41Z", "85.42Z", "85.51Z",
"85.52Z", "85.53Z", "85.59A", "85.59B", "85.60Z", "86.10Z", "86.21Z",
"86.22A", "86.22B", "86.22C", "86.23Z", "86.90A", "86.90B", "86.90C",
"86.90D", "86.90E", "86.90F", "87.10A", "87.10B", "87.10C", "87.20A",
"87.20B", "87.30A", "87.30B", "87.90A", "87.90B", "88.10A", "88.10B",
"88.10C", "88.91A", "88.91B", "88.99A", "88.99B", "90.01Z", "90.02Z",
"90.03A", "90.03B", "90.04Z", "91.01Z", "91.02Z", "91.03Z", "91.04Z",
"92.00Z", "93.11Z", "93.12Z", "93.13Z", "93.19Z", "93.21Z", "93.29Z",
"94.11Z", "94.12Z", "94.20Z", "94.91Z", "94.92Z", "94.99Z", "95.11Z",
"95.12Z", "95.21Z", "95.22Z", "95.23Z", "95.24Z", "95.25Z", "95.29Z",
"96.01A", "96.01B", "96.02A", "96.02B", "96.03Z", "96.04Z", "96.09Z",
"97.00Z", "98.10Z", "98.20Z", "99.00Z",
]
# fmt: on
# Data from:
# https://www.insee.fr/fr/information/8181066
# fmt: off
ape_codes_naf_2025 = [
"01.11Y", "01.12Y", "01.13Y", "01.14Y", "01.15Y", "01.16Y", "01.19Y",
"01.21Y", "01.22Y", "01.23Y", "01.24Y", "01.25Y", "01.26Y", "01.27Y",
"01.28Y", "01.29Y", "01.30Y", "01.41Y", "01.42Y", "01.43Y", "01.44Y",
"01.45Y", "01.46Y", "01.47Y", "01.48G", "01.48H", "01.48J", "01.50Y",
"01.61Y", "01.62Y", "01.63Y", "01.70Y", "02.10Y", "02.20Y", "02.30Y",
"02.40Y", "03.11Y", "03.12Y", "03.21Y", "03.22Y", "03.30Y", "05.10Y",
"05.20Y", "06.10Y", "06.20Y", "07.10Y", "07.21Y", "07.29Y", "08.11Y",
"08.12Y", "08.91Y", "08.92Y", "08.93Y", "08.99Y", "09.10Y", "09.90Y",
"10.11Y", "10.12Y", "10.13G", "10.13H", "10.20Y", "10.31Y", "10.32Y",
"10.39G", "10.39H", "10.41Y", "10.42Y", "10.51G", "10.51H", "10.51J",
"10.52Y", "10.61G", "10.61H", "10.62Y", "10.71G", "10.71H", "10.71J",
"10.72Y", "10.73Y", "10.81Y", "10.82Y", "10.83Y", "10.84Y", "10.85Y",
"10.86Y", "10.89Y", "10.91Y", "10.92Y", "11.01Y", "11.02G", "11.02H",
"11.03Y", "11.04Y", "11.05Y", "11.06Y", "11.07G", "11.07H", "12.00Y",
"13.10Y", "13.20Y", "13.30Y", "13.91Y", "13.92Y", "13.93Y", "13.94Y",
"13.95Y", "13.96Y", "13.99Y", "14.10Y", "14.21Y", "14.22Y", "14.23Y",
"14.24Y", "14.29Y", "15.11Y", "15.12Y", "15.20Y", "16.11Y", "16.12Y",
"16.21Y", "16.22Y", "16.23Y", "16.24Y", "16.25Y", "16.26Y", "16.27Y",
"16.28Y", "17.11Y", "17.12Y", "17.21Y", "17.22Y", "17.23Y", "17.24Y",
"17.25Y", "18.11Y", "18.12Y", "18.13Y", "18.14Y", "18.20Y", "19.10Y",
"19.20Y", "20.11Y", "20.12Y", "20.13Y", "20.14Y", "20.15Y", "20.16Y",
"20.17Y", "20.20Y", "20.30Y", "20.41Y", "20.42Y", "20.51Y", "20.59Y",
"20.60Y", "21.10Y", "21.20Y", "22.11Y", "22.12Y", "22.21Y", "22.22Y",
"22.23Y", "22.24Y", "22.25Y", "22.26Y", "23.11Y", "23.12Y", "23.13Y",
"23.14Y", "23.15Y", "23.20Y", "23.31Y", "23.32Y", "23.41Y", "23.42Y",
"23.43Y", "23.44Y", "23.45Y", "23.51Y", "23.52Y", "23.61Y", "23.62Y",
"23.63Y", "23.64Y", "23.65Y", "23.66Y", "23.70Y", "23.91Y", "23.99Y",
"24.10Y", "24.20Y", "24.31Y", "24.32Y", "24.33Y", "24.34Y", "24.41Y",
"24.42Y", "24.43Y", "24.44Y", "24.45Y", "24.46Y", "24.51Y", "24.52Y",
"24.53Y", "24.54Y", "25.11Y", "25.12Y", "25.21Y", "25.22Y", "25.30Y",
"25.40Y", "25.51Y", "25.52Y", "25.53Y", "25.61Y", "25.62Y", "25.63Y",
"25.91Y", "25.92Y", "25.93Y", "25.94Y", "25.99Y", "26.11Y", "26.12Y",
"26.20Y", "26.30Y", "26.40Y", "26.51Y", "26.52Y", "26.60Y", "26.70Y",
"27.11Y", "27.12Y", "27.20Y", "27.31Y", "27.32Y", "27.33Y", "27.40Y",
"27.51Y", "27.52Y", "27.90Y", "28.11Y", "28.12Y", "28.13G", "28.13H",
"28.14Y", "28.15Y", "28.21Y", "28.22Y", "28.23Y", "28.24Y", "28.25Y",
"28.29Y", "28.30Y", "28.41Y", "28.42Y", "28.91Y", "28.92Y", "28.93Y",
"28.94Y", "28.95Y", "28.96Y", "28.97Y", "28.99Y", "29.10Y", "29.20Y",
"29.31Y", "29.32Y", "30.11Y", "30.12Y", "30.13Y", "30.20Y", "30.31Y",
"30.32Y", "30.40Y", "30.91Y", "30.92Y", "30.99Y", "31.00G", "31.00H",
"31.00J", "32.11Y", "32.12Y", "32.13Y", "32.20Y", "32.30Y", "32.40Y",
"32.50Y", "32.91Y", "32.99Y", "33.11Y", "33.12Y", "33.13Y", "33.14Y",
"33.15Y", "33.16Y", "33.17Y", "33.18G", "33.18H", "33.19Y", "33.20Y",
"35.11Y", "35.12Y", "35.13Y", "35.14Y", "35.15G", "35.15H", "35.16Y",
"35.21Y", "35.22Y", "35.23Y", "35.24Y", "35.30Y", "35.40Y", "36.00Y",
"37.00Y", "38.11Y", "38.12Y", "38.21Y", "38.22Y", "38.23Y", "38.31Y",
"38.32Y", "38.33Y", "39.00Y", "41.00G", "41.00H", "42.11Y", "42.12Y",
"42.13G", "42.13H", "42.21Y", "42.22Y", "42.91Y", "42.99Y", "43.11Y",
"43.12G", "43.12H", "43.13Y", "43.21G", "43.21H", "43.22G", "43.22H",
"43.23Y", "43.24Y", "43.31Y", "43.32G", "43.32H", "43.33Y", "43.34G",
"43.34H", "43.35Y", "43.41G", "43.41H", "43.41J", "43.42G", "43.42H",
"43.42J", "43.50Y", "43.60Y", "43.91Y", "43.99G", "43.99H", "46.11Y",
"46.12Y", "46.13Y", "46.14Y", "46.15Y", "46.16Y", "46.17G", "46.17H",
"46.18Y", "46.19G", "46.19H", "46.21Y", "46.22Y", "46.23Y", "46.24Y",
"46.31Y", "46.32G", "46.32H", "46.33Y", "46.34Y", "46.35Y", "46.36Y",
"46.37Y", "46.38Y", "46.39Y", "46.41Y", "46.42Y", "46.43G", "46.43H",
"46.44Y", "46.45Y", "46.46Y", "46.47Y", "46.48Y", "46.49Y", "46.50Y",
"46.61Y", "46.62Y", "46.63Y", "46.64G", "46.64H", "46.64J", "46.64K",
"46.71G", "46.71H", "46.72Y", "46.73Y", "46.81Y", "46.82Y", "46.83G",
"46.83H", "46.83J", "46.84G", "46.84H", "46.85Y", "46.86Y", "46.87Y",
"46.89Y", "46.90Y", "47.11G", "47.11H", "47.11J", "47.11K", "47.11L",
"47.12G", "47.12H", "47.21Y", "47.22Y", "47.23Y", "47.24Y", "47.25Y",
"47.26Y", "47.27G", "47.27H", "47.30Y", "47.40Y", "47.51Y", "47.52G",
"47.52H", "47.53Y", "47.54Y", "47.55G", "47.55H", "47.61Y", "47.62Y",
"47.63Y", "47.64Y", "47.69Y", "47.71Y", "47.72G", "47.72H", "47.73Y",
"47.74G", "47.74H", "47.75Y", "47.76Y", "47.77Y", "47.78G", "47.78H",
"47.79G", "47.79H", "47.81Y", "47.82Y", "47.83Y", "47.91Y", "47.92G",
"47.92H", "47.92J", "49.11Y", "49.12Y", "49.20Y", "49.31G", "49.31H",
"49.32Y", "49.33G", "49.33H", "49.34Y", "49.39Y", "49.41G", "49.41H",
"49.41J", "49.42Y", "49.50Y", "50.10Y", "50.20Y", "50.30Y", "50.40Y",
"51.10Y", "51.21Y", "51.22Y", "52.10G", "52.10H", "52.21Y", "52.22Y",
"52.23Y", "52.24G", "52.24H", "52.25Y", "52.26Y", "52.31Y", "52.32Y",
"53.10Y", "53.20G", "53.20H", "53.30Y", "55.10Y", "55.20Y", "55.30Y",
"55.40Y", "55.90Y", "56.11G", "56.11H", "56.11J", "56.12Y", "56.21Y",
"56.22Y", "56.30Y", "56.40Y", "58.11Y", "58.12Y", "58.13Y", "58.19Y",
"58.21Y", "58.29Y", "59.11G", "59.11H", "59.11J", "59.11K", "59.12Y",
"59.13Y", "59.14Y", "59.20Y", "60.10Y", "60.20G", "60.20H", "60.31Y",
"60.39Y", "61.10Y", "61.20Y", "61.90Y", "62.10Y", "62.20G", "62.20H",
"62.90Y", "63.10Y", "63.91Y", "63.92Y", "64.11Y", "64.19Y", "64.21Y",
"64.22Y", "64.31Y", "64.32Y", "64.91Y", "64.92Y", "64.99Y", "65.11Y",
"65.12Y", "65.20Y", "65.30Y", "66.11Y", "66.12Y", "66.19G", "66.19H",
"66.21Y", "66.22Y", "66.29Y", "66.30Y", "68.11Y", "68.12Y", "68.20G",
"68.20H", "68.31Y", "68.32G", "68.32H", "69.10Y", "69.20Y", "70.10Y",
"70.20Y", "71.11Y", "71.12Y", "71.20G", "71.20H", "72.10G", "72.10H",
"72.20Y", "73.11Y", "73.12Y", "73.20Y", "73.30Y", "74.11Y", "74.12Y",
"74.13Y", "74.14Y", "74.20Y", "74.30Y", "74.91Y", "74.99Y", "75.00Y",
"77.11Y", "77.12Y", "77.21Y", "77.22Y", "77.31Y", "77.32Y", "77.33Y",
"77.34Y", "77.35Y", "77.39Y", "77.40G", "77.40H", "77.51Y", "77.52Y",
"78.10Y", "78.20G", "78.20H", "79.11Y", "79.12Y", "79.90Y", "80.01Y",
"80.09Y", "81.10Y", "81.21Y", "81.22Y", "81.23G", "81.23H", "81.30Y",
"82.10Y", "82.20Y", "82.30Y", "82.40Y", "82.91Y", "82.92Y", "82.99Y",
"84.11Y", "84.12Y", "84.13Y", "84.21Y", "84.22Y", "84.23Y", "84.24Y",
"84.25Y", "84.30G", "84.30H", "84.30J", "85.10Y", "85.20Y", "85.31Y",
"85.32Y", "85.33Y", "85.40Y", "85.51Y", "85.52Y", "85.53Y", "85.59G",
"85.59H", "85.61Y", "85.69Y", "86.10Y", "86.21Y", "86.22Y", "86.23Y",
"86.91Y", "86.92Y", "86.93Y", "86.94G", "86.94H", "86.95Y", "86.96Y",
"86.97Y", "86.99Y", "87.10G", "87.10H", "87.10J", "87.20G", "87.20H",
"87.30G", "87.30H", "87.91Y", "87.99G", "87.99H", "88.10G", "88.10H",
"88.10J", "88.91G", "88.91H", "88.91J", "88.99G", "88.99H", "90.11Y",
"90.12Y", "90.13Y", "90.20Y", "90.31G", "90.31H", "90.39G", "90.39H",
"91.11Y", "91.12Y", "91.21Y", "91.22Y", "91.30Y", "91.41Y", "91.42Y",
"92.00Y", "93.11Y", "93.12Y", "93.13Y", "93.19Y", "93.21Y", "93.29Y",
"94.11Y", "94.12Y", "94.20Y", "94.91Y", "94.92Y", "94.99Y", "95.10Y",
"95.21Y", "95.22Y", "95.23Y", "95.24Y", "95.25Y", "95.29G", "95.29H",
"95.31G", "95.31H", "95.32Y", "95.40Y", "96.10G", "96.10H", "96.21G",
"96.21H", "96.22Y", "96.23Y", "96.30Y", "96.40Y", "96.91Y", "96.99G",
"96.99H", "97.00Y", "98.10Y", "98.20Y", "99.00Y",
]
# fmt: on
def catch_phrase_noun(self) -> str:
"""
Returns a random catch phrase noun.
"""
return self.random_element(self.nouns)
def catch_phrase_attribute(self) -> str:
"""
Returns a random catch phrase attribute.
"""
return self.random_element(self.attributes)
def catch_phrase_verb(self) -> str:
"""
Returns a random catch phrase verb.
"""
return self.random_element(self.verbs)
def catch_phrase(self) -> str:
"""
:example: 'integrate extensible convergence'
"""
catch_phrase = ""
while True:
pattern: str = self.random_element(self.catch_phrase_formats)
catch_phrase = self.generator.parse(pattern)
catch_phrase = catch_phrase[0].upper() + catch_phrase[1:]
if self._is_catch_phrase_valid(catch_phrase):
break
return catch_phrase
# An array containing string which should not appear twice in a catch phrase
words_which_should_not_appear_twice = ("sécurité", "simpl")
def _is_catch_phrase_valid(self, catch_phrase: str) -> bool:
"""
Validates a french catch phrase.
:param catch_phrase: The catch phrase to validate.
"""
for word in self.words_which_should_not_appear_twice:
# Fastest way to check if a piece of word does not appear twice.
begin_pos = catch_phrase.find(word)
end_pos = catch_phrase.find(word, begin_pos + 1)
if begin_pos != -1 and begin_pos != end_pos:
return False
return True
def siren(self) -> str:
"""
Generates a siren number (9 digits). Formatted as '### ### ###'.
"""
code = self.numerify("########")
luhn_checksum = str(calculate_luhn(float(code)))
return f"{code[:3]} {code[3:6]} {code[6:]}{luhn_checksum}"
def siret(self, max_sequential_digits: int = 2) -> str:
"""
Generates a siret number (14 digits).
It is in fact the result of the concatenation of a siren number (9 digits),
a sequential number (4 digits) and a control number (1 digit) concatenation.
If $max_sequential_digits is invalid, it is set to 2.
The siret number is formatted as '### ### ### #####'.
:param max_sequential_digits The maximum number of digits for the sequential number (> 0 && <= 4).
"""
if max_sequential_digits > 4 or max_sequential_digits <= 0:
max_sequential_digits = 2
sequential_number = str(self.random_number(max_sequential_digits)).zfill(4)
code = self.siren().replace(" ", "") + sequential_number
luhn_checksum = str(calculate_luhn(float(code)))
return f"{code[:3]} {code[3:6]} {code[6:9]} {code[9:]}{luhn_checksum}"
def company_vat(self, siren: str = "") -> str:
"""
Generate a valid TVA (French VAT) number.
It is the concatenation of "FR", siren checksum and siren number
:param siren: Force SIREN number
:sample:
:sample: siren="123 456 789"
"""
siren = siren or self.siren()
siren_int = int("".join(c for c in siren if c.isdigit()))
checksum = (12 + 3 * (siren_int % 97)) % 97
return f"FR {checksum:02} {siren}"
def ape_code(self, version: Optional[str] = "naf-2003") -> str:
"""
Generate an APE code (also known as NAF code).
It identify french company main branch of activity.
It provide numbers from nomenclature `version` `naf-2003` (default)
or `naf-2025`.
To have it generate a truly random (and possibly invalid number) set
`version` to `None`
:param version: Set to ``"naf-2003"`` to return a valid NAF 2003 APE code.
Set to ``"naf-2025"`` to return a valid NAF 2025 APE code.
Set to ``None`` to return a truly random and possibly invalid number
Defaults to ``"naf-2003"``
:param letter: Force letter
:param siren: Force SIREN
:sample:
:sample: version="naf-2003"
:sample: version="naf-2025"
:sample: version=None
"""
if version is None:
numbers = self.numerify("##.##")
letter = self.random_uppercase_letter()
return f"{numbers}{letter}"
if version == "naf-2003":
return self.random_element(self.ape_codes_naf_2003)
if version == "naf-2025":
return self.random_element(self.ape_codes_naf_2025)
raise ValueError("Unsupported NAF version. Set version=None to a truly random number.")
| Provider |
python | mlflow__mlflow | mlflow/server/fastapi_security.py | {
"start": 678,
"end": 1928
} | class ____:
"""Middleware to validate Host headers using fnmatch patterns."""
def __init__(self, app: ASGIApp, allowed_hosts: list[str]):
self.app = app
self.allowed_hosts = allowed_hosts
async def __call__(self, scope, receive, send):
if scope["type"] != "http":
return await self.app(scope, receive, send)
if scope["path"] in HEALTH_ENDPOINTS:
return await self.app(scope, receive, send)
headers = dict(scope.get("headers", []))
host = headers.get(b"host", b"").decode("utf-8")
if not is_allowed_host_header(self.allowed_hosts, host):
_logger.warning(f"Rejected request with invalid Host header: {host}")
async def send_403(message):
if message["type"] == "http.response.start":
message["status"] = 403
message["headers"] = [(b"content-type", b"text/plain")]
await send(message)
await send_403({"type": "http.response.start", "status": 403, "headers": []})
await send({"type": "http.response.body", "body": INVALID_HOST_MSG.encode()})
return
return await self.app(scope, receive, send)
| HostValidationMiddleware |
python | django__django | tests/gis_tests/geoapp/models.py | {
"start": 2120,
"end": 2262
} | class ____(NamedModel):
point1 = models.PointField()
point2 = models.PointField()
point3 = models.PointField(srid=3857)
| ManyPointModel |
python | geekcomputers__Python | venv/Lib/site-packages/pip/_internal/resolution/resolvelib/resolver.py | {
"start": 1295,
"end": 12592
} | class ____(BaseResolver):
_allowed_strategies = {"eager", "only-if-needed", "to-satisfy-only"}
def __init__(
self,
preparer: RequirementPreparer,
finder: PackageFinder,
wheel_cache: Optional[WheelCache],
make_install_req: InstallRequirementProvider,
use_user_site: bool,
ignore_dependencies: bool,
ignore_installed: bool,
ignore_requires_python: bool,
force_reinstall: bool,
upgrade_strategy: str,
py_version_info: Optional[Tuple[int, ...]] = None,
):
super().__init__()
assert upgrade_strategy in self._allowed_strategies
self.factory = Factory(
finder=finder,
preparer=preparer,
make_install_req=make_install_req,
wheel_cache=wheel_cache,
use_user_site=use_user_site,
force_reinstall=force_reinstall,
ignore_installed=ignore_installed,
ignore_requires_python=ignore_requires_python,
py_version_info=py_version_info,
)
self.ignore_dependencies = ignore_dependencies
self.upgrade_strategy = upgrade_strategy
self._result: Optional[Result] = None
def resolve(
self, root_reqs: List[InstallRequirement], check_supported_wheels: bool
) -> RequirementSet:
collected = self.factory.collect_root_requirements(root_reqs)
provider = PipProvider(
factory=self.factory,
constraints=collected.constraints,
ignore_dependencies=self.ignore_dependencies,
upgrade_strategy=self.upgrade_strategy,
user_requested=collected.user_requested,
)
if "PIP_RESOLVER_DEBUG" in os.environ:
reporter: BaseReporter = PipDebuggingReporter()
else:
reporter = PipReporter()
resolver: RLResolver[Requirement, Candidate, str] = RLResolver(
provider,
reporter,
)
try:
limit_how_complex_resolution_can_be = 200000
result = self._result = resolver.resolve(
collected.requirements, max_rounds=limit_how_complex_resolution_can_be
)
except ResolutionImpossible as e:
error = self.factory.get_installation_error(
cast("ResolutionImpossible[Requirement, Candidate]", e),
collected.constraints,
)
raise error from e
req_set = RequirementSet(check_supported_wheels=check_supported_wheels)
# process candidates with extras last to ensure their base equivalent is
# already in the req_set if appropriate.
# Python's sort is stable so using a binary key function keeps relative order
# within both subsets.
for candidate in sorted(
result.mapping.values(), key=lambda c: c.name != c.project_name
):
ireq = candidate.get_install_requirement()
if ireq is None:
if candidate.name != candidate.project_name:
# extend existing req's extras
with contextlib.suppress(KeyError):
req = req_set.get_requirement(candidate.project_name)
req_set.add_named_requirement(
install_req_extend_extras(
req, get_requirement(candidate.name).extras
)
)
continue
# Check if there is already an installation under the same name,
# and set a flag for later stages to uninstall it, if needed.
installed_dist = self.factory.get_dist_to_uninstall(candidate)
if installed_dist is None:
# There is no existing installation -- nothing to uninstall.
ireq.should_reinstall = False
elif self.factory.force_reinstall:
# The --force-reinstall flag is set -- reinstall.
ireq.should_reinstall = True
elif installed_dist.version != candidate.version:
# The installation is different in version -- reinstall.
ireq.should_reinstall = True
elif candidate.is_editable or installed_dist.editable:
# The incoming distribution is editable, or different in
# editable-ness to installation -- reinstall.
ireq.should_reinstall = True
elif candidate.source_link and candidate.source_link.is_file:
# The incoming distribution is under file://
if candidate.source_link.is_wheel:
# is a local wheel -- do nothing.
logger.info(
"%s is already installed with the same version as the "
"provided wheel. Use --force-reinstall to force an "
"installation of the wheel.",
ireq.name,
)
continue
# is a local sdist or path -- reinstall
ireq.should_reinstall = True
else:
continue
link = candidate.source_link
if link and link.is_yanked:
# The reason can contain non-ASCII characters, Unicode
# is required for Python 2.
msg = (
"The candidate selected for download or install is a "
"yanked version: {name!r} candidate (version {version} "
"at {link})\nReason for being yanked: {reason}"
).format(
name=candidate.name,
version=candidate.version,
link=link,
reason=link.yanked_reason or "<none given>",
)
logger.warning(msg)
req_set.add_named_requirement(ireq)
reqs = req_set.all_requirements
self.factory.preparer.prepare_linked_requirements_more(reqs)
for req in reqs:
req.prepared = True
req.needs_more_preparation = False
return req_set
def get_installation_order(
self, req_set: RequirementSet
) -> List[InstallRequirement]:
"""Get order for installation of requirements in RequirementSet.
The returned list contains a requirement before another that depends on
it. This helps ensure that the environment is kept consistent as they
get installed one-by-one.
The current implementation creates a topological ordering of the
dependency graph, giving more weight to packages with less
or no dependencies, while breaking any cycles in the graph at
arbitrary points. We make no guarantees about where the cycle
would be broken, other than it *would* be broken.
"""
assert self._result is not None, "must call resolve() first"
if not req_set.requirements:
# Nothing is left to install, so we do not need an order.
return []
graph = self._result.graph
weights = get_topological_weights(graph, set(req_set.requirements.keys()))
sorted_items = sorted(
req_set.requirements.items(),
key=functools.partial(_req_set_item_sorter, weights=weights),
reverse=True,
)
return [ireq for _, ireq in sorted_items]
def get_topological_weights(
graph: "DirectedGraph[Optional[str]]", requirement_keys: Set[str]
) -> Dict[Optional[str], int]:
"""Assign weights to each node based on how "deep" they are.
This implementation may change at any point in the future without prior
notice.
We first simplify the dependency graph by pruning any leaves and giving them
the highest weight: a package without any dependencies should be installed
first. This is done again and again in the same way, giving ever less weight
to the newly found leaves. The loop stops when no leaves are left: all
remaining packages have at least one dependency left in the graph.
Then we continue with the remaining graph, by taking the length for the
longest path to any node from root, ignoring any paths that contain a single
node twice (i.e. cycles). This is done through a depth-first search through
the graph, while keeping track of the path to the node.
Cycles in the graph result would result in node being revisited while also
being on its own path. In this case, take no action. This helps ensure we
don't get stuck in a cycle.
When assigning weight, the longer path (i.e. larger length) is preferred.
We are only interested in the weights of packages that are in the
requirement_keys.
"""
path: Set[Optional[str]] = set()
weights: Dict[Optional[str], int] = {}
def visit(node: Optional[str]) -> None:
if node in path:
# We hit a cycle, so we'll break it here.
return
# Time to visit the children!
path.add(node)
for child in graph.iter_children(node):
visit(child)
path.remove(node)
if node not in requirement_keys:
return
last_known_parent_count = weights.get(node, 0)
weights[node] = max(last_known_parent_count, len(path))
# Simplify the graph, pruning leaves that have no dependencies.
# This is needed for large graphs (say over 200 packages) because the
# `visit` function is exponentially slower then, taking minutes.
# See https://github.com/pypa/pip/issues/10557
# We will loop until we explicitly break the loop.
while True:
leaves = set()
for key in graph:
if key is None:
continue
for _child in graph.iter_children(key):
# This means we have at least one child
break
else:
# No child.
leaves.add(key)
if not leaves:
# We are done simplifying.
break
# Calculate the weight for the leaves.
weight = len(graph) - 1
for leaf in leaves:
if leaf not in requirement_keys:
continue
weights[leaf] = weight
# Remove the leaves from the graph, making it simpler.
for leaf in leaves:
graph.remove(leaf)
# Visit the remaining graph.
# `None` is guaranteed to be the root node by resolvelib.
visit(None)
# Sanity check: all requirement keys should be in the weights,
# and no other keys should be in the weights.
difference = set(weights.keys()).difference(requirement_keys)
assert not difference, difference
return weights
def _req_set_item_sorter(
item: Tuple[str, InstallRequirement],
weights: Dict[Optional[str], int],
) -> Tuple[int, str]:
"""Key function used to sort install requirements for installation.
Based on the "weight" mapping calculated in ``get_installation_order()``.
The canonical package name is returned as the second member as a tie-
breaker to ensure the result is predictable, which is useful in tests.
"""
name = canonicalize_name(item[0])
return weights[name], name
| Resolver |
python | doocs__leetcode | solution/2900-2999/2997.Minimum Number of Operations to Make Array XOR Equal to K/Solution.py | {
"start": 0,
"end": 125
} | class ____:
def minOperations(self, nums: List[int], k: int) -> int:
return reduce(xor, nums, k).bit_count()
| Solution |
python | pola-rs__polars | py-polars/tests/unit/constructors/test_constructors.py | {
"start": 1855,
"end": 1924
} | class ____(pydantic.BaseModel):
x: int
y: _TestBarPD
| _TestFooPD |
python | realpython__materials | arcade-platformer/arcade_platformer/arcade_platformer.py | {
"start": 4159,
"end": 5607
} | class ____(arcade.View):
"""Show instructions to the player"""
def __init__(self) -> None:
"""Create instructions screen"""
super().__init__()
# Find the instructions image in the image folder
instructions_image_path = (
ASSETS_PATH / "images" / "instructions_image.png"
)
# Load our title image
self.instructions_image = arcade.load_texture(instructions_image_path)
def on_draw(self) -> None:
# Start the rendering loop
arcade.start_render()
# Draw a rectangle filled with the instructions image
arcade.draw_texture_rectangle(
center_x=game.SCREEN_WIDTH / 2,
center_y=game.SCREEN_HEIGHT / 2,
width=game.SCREEN_WIDTH,
height=game.SCREEN_HEIGHT,
texture=self.instructions_image,
)
def on_key_press(self, key: int, modifiers: int) -> None:
"""Start the game when the user presses Enter
Arguments:
key -- Which key was pressed
modifiers -- What modifiers were active
"""
if key == arcade.key.RETURN:
game_view = PlatformerView()
game_view.setup()
self.window.show_view(game_view)
elif key == arcade.key.ESCAPE:
title_view = TitleView()
self.window.show_view(title_view)
# Pause view, used when the player pauses the game
| InstructionsView |
python | mahmoud__glom | glom/matching.py | {
"start": 1228,
"end": 2086
} | class ____(MatchError, TypeError):
""":exc:`MatchError` subtype raised when a
:class:`Match` fails a type check.
>>> glom({'id': 'a'}, Match({'id': int}))
Traceback (most recent call last):
...
TypeMatchError: error raised while processing.
Target-spec trace, with error detail (most recent last):
- Target: {'id': 'a'}
- Spec: Match({'id': <type 'int'>})
- Spec: {'id': <type 'int'>}
- Target: 'a'
- Spec: int
TypeMatchError: expected type int, not str
"""
def __init__(self, actual, expected):
super().__init__(
"expected type {0.__name__}, not {1.__name__}", expected, actual)
def __copy__(self):
# __init__ args = (actual, expected)
# self.args = (fmt_str, expected, actual)
return TypeMatchError(self.args[2], self.args[1])
| TypeMatchError |
python | keras-team__keras | keras/src/trainers/data_adapters/array_slicing.py | {
"start": 3444,
"end": 3488
} | class ____(Sliceable):
pass
| NumpySliceable |
python | networkx__networkx | networkx/generators/tests/test_degree_seq.py | {
"start": 39,
"end": 7284
} | class ____:
"""Unit tests for the :func:`~networkx.configuration_model`
function.
"""
def test_empty_degree_sequence(self):
"""Tests that an empty degree sequence yields the null graph."""
G = nx.configuration_model([])
assert len(G) == 0
def test_degree_zero(self):
"""Tests that a degree sequence of all zeros yields the empty
graph.
"""
G = nx.configuration_model([0, 0, 0])
assert len(G) == 3
assert G.number_of_edges() == 0
def test_degree_sequence(self):
"""Tests that the degree sequence of the generated graph matches
the input degree sequence.
"""
deg_seq = [5, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1]
G = nx.configuration_model(deg_seq, seed=12345678)
assert sorted(dict(G.degree).values()) == sorted(deg_seq)
assert sorted(dict(G.degree(range(len(deg_seq)))).values()) == sorted(deg_seq)
@pytest.mark.parametrize("seed", [10, 1000])
def test_random_seed(self, seed):
"""Tests that each call with the same random seed generates the
same graph.
"""
deg_seq = [3] * 12
G1 = nx.configuration_model(deg_seq, seed=seed)
G2 = nx.configuration_model(deg_seq, seed=seed)
assert nx.is_isomorphic(G1, G2)
def test_directed_disallowed(self):
"""Tests that attempting to create a configuration model graph
using a directed graph yields an exception.
"""
with pytest.raises(nx.NetworkXNotImplemented):
nx.configuration_model([], create_using=nx.DiGraph())
def test_odd_degree_sum(self):
"""Tests that a degree sequence whose sum is odd yields an
exception.
"""
with pytest.raises(nx.NetworkXError):
nx.configuration_model([1, 2])
def test_directed_configuration_raise_unequal():
with pytest.raises(nx.NetworkXError):
zin = [5, 3, 3, 3, 3, 2, 2, 2, 1, 1]
zout = [5, 3, 3, 3, 3, 2, 2, 2, 1, 2]
nx.directed_configuration_model(zin, zout)
def test_directed_configuration_model():
G = nx.directed_configuration_model([], [], seed=0)
assert len(G) == 0
def test_simple_directed_configuration_model():
G = nx.directed_configuration_model([1, 1], [1, 1], seed=0)
assert len(G) == 2
def test_expected_degree_graph_empty():
# empty graph has empty degree sequence
deg_seq = []
G = nx.expected_degree_graph(deg_seq)
assert dict(G.degree()) == {}
@pytest.mark.parametrize("seed", [10, 42, 1000])
@pytest.mark.parametrize("deg_seq", [[3] * 12, [2, 0], [10, 2, 2, 2, 2]])
def test_expected_degree_graph(seed, deg_seq):
G1 = nx.expected_degree_graph(deg_seq, seed=seed)
G2 = nx.expected_degree_graph(deg_seq, seed=seed)
assert len(G1) == len(G2) == len(deg_seq)
assert nx.is_isomorphic(G1, G2)
def test_expected_degree_graph_selfloops():
deg_seq = [3] * 12
G1 = nx.expected_degree_graph(deg_seq, seed=1000, selfloops=False)
G2 = nx.expected_degree_graph(deg_seq, seed=1000, selfloops=False)
assert len(G1) == len(G2) == len(deg_seq)
assert nx.is_isomorphic(G1, G2)
assert nx.number_of_selfloops(G1) == nx.number_of_selfloops(G2) == 0
def test_havel_hakimi_construction():
G = nx.havel_hakimi_graph([])
assert len(G) == 0
z = [1000, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1]
pytest.raises(nx.NetworkXError, nx.havel_hakimi_graph, z)
z = ["A", 3, 3, 3, 3, 2, 2, 2, 1, 1, 1]
pytest.raises(nx.NetworkXError, nx.havel_hakimi_graph, z)
z = [5, 4, 3, 3, 3, 2, 2, 2]
G = nx.havel_hakimi_graph(z)
G = nx.configuration_model(z)
z = [6, 5, 4, 4, 2, 1, 1, 1]
pytest.raises(nx.NetworkXError, nx.havel_hakimi_graph, z)
z = [10, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2]
G = nx.havel_hakimi_graph(z)
pytest.raises(nx.NetworkXError, nx.havel_hakimi_graph, z, create_using=nx.DiGraph())
def test_directed_havel_hakimi():
# Test range of valid directed degree sequences
n, r = 100, 10
p = 1.0 / r
for i in range(r):
G1 = nx.erdos_renyi_graph(n, p * (i + 1), None, True)
din1 = [d for n, d in G1.in_degree()]
dout1 = [d for n, d in G1.out_degree()]
G2 = nx.directed_havel_hakimi_graph(din1, dout1)
din2 = [d for n, d in G2.in_degree()]
dout2 = [d for n, d in G2.out_degree()]
assert sorted(din1) == sorted(din2)
assert sorted(dout1) == sorted(dout2)
# Test non-graphical sequence
dout = [1000, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1]
din = [103, 102, 102, 102, 102, 102, 102, 102, 102, 102]
pytest.raises(nx.exception.NetworkXError, nx.directed_havel_hakimi_graph, din, dout)
# Test valid sequences
dout = [1, 1, 1, 1, 1, 2, 2, 2, 3, 4]
din = [2, 2, 2, 2, 2, 2, 2, 2, 0, 2]
G2 = nx.directed_havel_hakimi_graph(din, dout)
dout2 = (d for n, d in G2.out_degree())
din2 = (d for n, d in G2.in_degree())
assert sorted(dout) == sorted(dout2)
assert sorted(din) == sorted(din2)
# Test unequal sums
din = [2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
pytest.raises(nx.exception.NetworkXError, nx.directed_havel_hakimi_graph, din, dout)
# Test for negative values
din = [2, 2, 2, 2, 2, 2, 2, 2, 2, 2, -2]
pytest.raises(nx.exception.NetworkXError, nx.directed_havel_hakimi_graph, din, dout)
@pytest.mark.parametrize(
"deg_seq",
[
[0],
[1, 1],
[2, 2, 2, 1, 1],
[3, 1, 1, 1],
[4, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 2, 2, 2, 3, 4],
],
)
def test_degree_sequence_tree(deg_seq):
G = nx.degree_sequence_tree(deg_seq)
assert sorted(dict(G.degree).values()) == sorted(deg_seq)
assert nx.is_tree(G)
@pytest.mark.parametrize("graph_type", [nx.DiGraph, nx.MultiDiGraph])
def test_degree_sequence_tree_directed(graph_type):
with pytest.raises(nx.NetworkXError, match="Directed Graph not supported"):
nx.degree_sequence_tree([1, 1], create_using=graph_type())
@pytest.mark.parametrize(
"deg_seq",
[
[1, 1, 1, 1, 1, 1, 2, 2, 2, 3, 4],
[],
[2, 0],
[-1, 3],
[1, 16, 1, 4, 0, 0, 1, 1, 0, 1, 2, 0, 1, 0, 1, 5, 1, 2, 1, 0],
],
)
def test_degree_sequence_tree_invalid_degree_sequence(deg_seq):
"""Test invalid degree sequences raise an error."""
with pytest.raises(nx.NetworkXError, match="tree must have"):
nx.degree_sequence_tree(deg_seq)
def test_random_degree_sequence_graph():
d = [1, 2, 2, 3]
G = nx.random_degree_sequence_graph(d, seed=42)
assert d == sorted(d for n, d in G.degree())
def test_random_degree_sequence_graph_raise():
z = [1, 1, 1, 1, 1, 1, 2, 2, 2, 3, 4]
pytest.raises(nx.NetworkXUnfeasible, nx.random_degree_sequence_graph, z)
def test_random_degree_sequence_large():
G1 = nx.fast_gnp_random_graph(100, 0.1, seed=42)
d1 = [d for n, d in G1.degree()]
G2 = nx.random_degree_sequence_graph(d1, seed=42)
d2 = [d for n, d in G2.degree()]
assert sorted(d1) == sorted(d2)
def test_random_degree_sequence_iterator():
G1 = nx.fast_gnp_random_graph(100, 0.1, seed=42)
d1 = (d for n, d in G1.degree())
G2 = nx.random_degree_sequence_graph(d1, seed=42)
assert len(G2) > 0
| TestConfigurationModel |
python | ray-project__ray | python/ray/_private/thirdparty/pynvml/pynvml.py | {
"start": 88159,
"end": 88802
} | class ____(_PrintableStructure):
_fields_ = [("version", c_uint),
("revision", c_uint),
("guestInfoState", _nvmlVgpuGuestInfoState_t),
("guestDriverVersion", c_char * NVML_SYSTEM_DRIVER_VERSION_BUFFER_SIZE),
("hostDriverVersion", c_char * NVML_SYSTEM_DRIVER_VERSION_BUFFER_SIZE),
("reserved", c_uint * 6),
("vgpuVirtualizationCaps", c_uint),
("guestVgpuVersion", c_uint),
("opaqueDataSize", c_uint),
("opaqueData", c_char * NVML_VGPU_METADATA_OPAQUE_DATA_SIZE)
]
| c_nvmlVgpuMetadata_t |
python | pypa__setuptools | setuptools/tests/config/test_pyprojecttoml.py | {
"start": 6468,
"end": 12438
} | class ____:
def test_dynamic(self, tmp_path):
# Let's create a project example that has dynamic classifiers
# coming from a txt file.
create_example(tmp_path, "src")
classifiers = cleandoc(
"""
Framework :: Flask
Programming Language :: Haskell
"""
)
(tmp_path / "classifiers.txt").write_text(classifiers, encoding="utf-8")
pyproject = tmp_path / "pyproject.toml"
config = read_configuration(pyproject, expand=False)
dynamic = config["project"]["dynamic"]
config["project"]["dynamic"] = list({*dynamic, "classifiers"})
dynamic_config = config["tool"]["setuptools"]["dynamic"]
dynamic_config["classifiers"] = {"file": "classifiers.txt"}
# When the configuration is expanded,
# each line of the file should be an different classifier.
validate(config, pyproject)
expanded = expand_configuration(config, tmp_path)
assert set(expanded["project"]["classifiers"]) == {
"Framework :: Flask",
"Programming Language :: Haskell",
}
def test_dynamic_without_config(self, tmp_path):
config = """
[project]
name = "myproj"
version = '42'
dynamic = ["classifiers"]
"""
pyproject = tmp_path / "pyproject.toml"
pyproject.write_text(cleandoc(config), encoding="utf-8")
with pytest.raises(OptionError, match="No configuration .* .classifiers."):
read_configuration(pyproject)
def test_dynamic_readme_from_setup_script_args(self, tmp_path):
config = """
[project]
name = "myproj"
version = '42'
dynamic = ["readme"]
"""
pyproject = tmp_path / "pyproject.toml"
pyproject.write_text(cleandoc(config), encoding="utf-8")
dist = Distribution(attrs={"long_description": "42"})
# No error should occur because of missing `readme`
dist = apply_configuration(dist, pyproject)
assert dist.metadata.long_description == "42"
def test_dynamic_without_file(self, tmp_path):
config = """
[project]
name = "myproj"
version = '42'
dynamic = ["classifiers"]
[tool.setuptools.dynamic]
classifiers = {file = ["classifiers.txt"]}
"""
pyproject = tmp_path / "pyproject.toml"
pyproject.write_text(cleandoc(config), encoding="utf-8")
with pytest.warns(UserWarning, match="File .*classifiers.txt. cannot be found"):
expanded = read_configuration(pyproject)
assert "classifiers" not in expanded["project"]
@pytest.mark.parametrize(
"example",
(
"""
[project]
name = "myproj"
version = "1.2"
[my-tool.that-disrespect.pep518]
value = 42
""",
),
)
def test_ignore_unrelated_config(tmp_path, example):
pyproject = tmp_path / "pyproject.toml"
pyproject.write_text(cleandoc(example), encoding="utf-8")
# Make sure no error is raised due to 3rd party configs in pyproject.toml
assert read_configuration(pyproject) is not None
@pytest.mark.parametrize(
("example", "error_msg"),
[
(
"""
[project]
name = "myproj"
version = "1.2"
requires = ['pywin32; platform_system=="Windows"' ]
""",
"configuration error: .project. must not contain ..requires.. properties",
),
],
)
def test_invalid_example(tmp_path, example, error_msg):
pyproject = tmp_path / "pyproject.toml"
pyproject.write_text(cleandoc(example), encoding="utf-8")
pattern = re.compile(
f"invalid pyproject.toml.*{error_msg}.*", re.MULTILINE | re.DOTALL
)
with pytest.raises(ValueError, match=pattern):
read_configuration(pyproject)
@pytest.mark.parametrize("config", ("", "[tool.something]\nvalue = 42"))
def test_empty(tmp_path, config):
pyproject = tmp_path / "pyproject.toml"
pyproject.write_text(config, encoding="utf-8")
# Make sure no error is raised
assert read_configuration(pyproject) == {}
@pytest.mark.parametrize("config", ("[project]\nname = 'myproj'\nversion='42'\n",))
def test_include_package_data_by_default(tmp_path, config):
"""Builds with ``pyproject.toml`` should consider ``include-package-data=True`` as
default.
"""
pyproject = tmp_path / "pyproject.toml"
pyproject.write_text(config, encoding="utf-8")
config = read_configuration(pyproject)
assert config["tool"]["setuptools"]["include-package-data"] is True
def test_include_package_data_in_setuppy(tmp_path):
"""Builds with ``pyproject.toml`` should consider ``include_package_data`` set in
``setup.py``.
See https://github.com/pypa/setuptools/issues/3197#issuecomment-1079023889
"""
files = {
"pyproject.toml": "[project]\nname = 'myproj'\nversion='42'\n",
"setup.py": "__import__('setuptools').setup(include_package_data=False)",
}
jaraco.path.build(files, prefix=tmp_path)
with Path(tmp_path):
dist = distutils.core.run_setup("setup.py", {}, stop_after="config")
assert dist.get_name() == "myproj"
assert dist.get_version() == "42"
assert dist.include_package_data is False
def test_warn_tools_typo(tmp_path):
"""Test that the common ``tools.setuptools`` typo in ``pyproject.toml`` issues a warning
See https://github.com/pypa/setuptools/issues/4150
"""
config = """
[build-system]
requires = ["setuptools"]
build-backend = "setuptools.build_meta"
[project]
name = "myproj"
version = '42'
[tools.setuptools]
packages = ["package"]
"""
pyproject = tmp_path / "pyproject.toml"
pyproject.write_text(cleandoc(config), encoding="utf-8")
with pytest.warns(_ToolsTypoInMetadata):
read_configuration(pyproject)
| TestClassifiers |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/triggers/mwaa.py | {
"start": 1198,
"end": 4753
} | class ____(AwsBaseWaiterTrigger):
"""
Trigger when an MWAA Dag Run is complete.
:param external_env_name: The external MWAA environment name that contains the DAG Run you want to wait for
(templated)
:param external_dag_id: The DAG ID in the external MWAA environment that contains the DAG Run you want to wait for
(templated)
:param external_dag_run_id: The DAG Run ID in the external MWAA environment that you want to wait for (templated)
:param success_states: Collection of DAG Run states that would make this task marked as successful, default is
``{airflow.utils.state.DagRunState.SUCCESS}`` (templated)
:param failure_states: Collection of DAG Run states that would make this task marked as failed and raise an
AirflowException, default is ``{airflow.utils.state.DagRunState.FAILED}`` (templated)
:param waiter_delay: The amount of time in seconds to wait between attempts. (default: 60)
:param waiter_max_attempts: The maximum number of attempts to be made. (default: 720)
:param aws_conn_id: The Airflow connection used for AWS credentials.
"""
def __init__(
self,
*args,
external_env_name: str,
external_dag_id: str,
external_dag_run_id: str,
success_states: Collection[str] | None = None,
failure_states: Collection[str] | None = None,
waiter_delay: int = 60,
waiter_max_attempts: int = 720,
**kwargs,
) -> None:
self.success_states = set(success_states) if success_states else {DagRunState.SUCCESS.value}
self.failure_states = set(failure_states) if failure_states else {DagRunState.FAILED.value}
if len(self.success_states & self.failure_states):
raise ValueError("success_states and failure_states must not have any values in common")
in_progress_states = {s.value for s in DagRunState} - self.success_states - self.failure_states
super().__init__(
serialized_fields={
"external_env_name": external_env_name,
"external_dag_id": external_dag_id,
"external_dag_run_id": external_dag_run_id,
"success_states": success_states,
"failure_states": failure_states,
},
waiter_name="mwaa_dag_run_complete",
waiter_args={
"Name": external_env_name,
"Path": f"/dags/{external_dag_id}/dagRuns/{external_dag_run_id}",
"Method": "GET",
},
failure_message=f"The DAG run {external_dag_run_id} of DAG {external_dag_id} in MWAA environment {external_env_name} failed with state",
status_message="State of DAG run",
status_queries=["RestApiResponse.state"],
return_key="dag_run_id",
return_value=external_dag_run_id,
waiter_delay=waiter_delay,
waiter_max_attempts=waiter_max_attempts,
waiter_config_overrides={
"acceptors": _build_waiter_acceptors(
success_states=self.success_states,
failure_states=self.failure_states,
in_progress_states=in_progress_states,
)
},
**kwargs,
)
def hook(self) -> AwsGenericHook:
return MwaaHook(
aws_conn_id=self.aws_conn_id,
region_name=self.region_name,
verify=self.verify,
config=self.botocore_config,
)
| MwaaDagRunCompletedTrigger |
python | ray-project__ray | python/ray/tests/spark/test_multicores_per_task.py | {
"start": 437,
"end": 1704
} | class ____(RayOnSparkGPUClusterTestBase):
@classmethod
def setup_class(cls):
cls.num_total_cpus = 4
cls.num_total_gpus = 4
cls.num_cpus_per_spark_task = 2
cls.num_gpus_per_spark_task = 2
cls.max_spark_tasks = 2
gpu_discovery_script_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "discover_4_gpu.sh"
)
os.environ["SPARK_WORKER_CORES"] = "4"
cls.spark = (
SparkSession.builder.master("local-cluster[1, 4, 1024]")
.config("spark.task.cpus", "2")
.config("spark.task.resource.gpu.amount", "2")
.config("spark.executor.cores", "4")
.config("spark.worker.resource.gpu.amount", "4")
.config("spark.executor.resource.gpu.amount", "4")
.config("spark.task.maxFailures", "1")
.config(
"spark.worker.resource.gpu.discoveryScript", gpu_discovery_script_path
)
.config("spark.executorEnv.RAY_ON_SPARK_WORKER_CPU_CORES", "4")
.config("spark.executorEnv.RAY_ON_SPARK_WORKER_GPU_NUM", "4")
.getOrCreate()
)
if __name__ == "__main__":
sys.exit(pytest.main(["-sv", __file__]))
| TestMultiCoresPerTaskCluster |
python | doocs__leetcode | solution/2400-2499/2419.Longest Subarray With Maximum Bitwise AND/Solution.py | {
"start": 0,
"end": 285
} | class ____:
def longestSubarray(self, nums: List[int]) -> int:
mx = max(nums)
ans = cnt = 0
for x in nums:
if x == mx:
cnt += 1
ans = max(ans, cnt)
else:
cnt = 0
return ans
| Solution |
python | huggingface__transformers | src/transformers/models/rt_detr_v2/modeling_rt_detr_v2.py | {
"start": 24268,
"end": 26622
} | class ____(ModelOutput):
r"""
intermediate_hidden_states (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, hidden_size)`):
Stacked intermediate hidden states (output of each layer of the decoder).
intermediate_logits (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, sequence_length, config.num_labels)`):
Stacked intermediate logits (logits of each layer of the decoder).
intermediate_reference_points (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, sequence_length, hidden_size)`):
Stacked intermediate reference points (reference points of each layer of the decoder).
intermediate_predicted_corners (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, 4)`):
Stacked intermediate predicted corners (predicted corners of each layer of the decoder).
initial_reference_points (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, 4)`):
Stacked initial reference points (initial reference points of each layer of the decoder).
cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax,
used to compute the weighted average in the cross-attention heads.
"""
last_hidden_state: Optional[torch.FloatTensor] = None
intermediate_hidden_states: Optional[torch.FloatTensor] = None
intermediate_logits: Optional[torch.FloatTensor] = None
intermediate_reference_points: Optional[torch.FloatTensor] = None
intermediate_predicted_corners: Optional[torch.FloatTensor] = None
initial_reference_points: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
cross_attentions: Optional[tuple[torch.FloatTensor]] = None
def inverse_sigmoid(x, eps=1e-5):
x = x.clamp(min=0, max=1)
x1 = x.clamp(min=eps)
x2 = (1 - x).clamp(min=eps)
return torch.log(x1 / x2)
| RTDetrV2DecoderOutput |
python | run-llama__llama_index | llama-index-core/tests/postprocessor/test_structured_llm_rerank.py | {
"start": 1606,
"end": 4428
} | class ____(MockLLM):
@property
def metadata(self) -> LLMMetadata:
return super().metadata.model_copy(update={"is_function_calling_model": True})
@patch.object(
MockFunctionCallingLLM,
"structured_predict",
mock_llmpredictor_structured_predict,
)
def test_llm_rerank() -> None:
"""Test LLM rerank."""
nodes = [
TextNode(text="Test"),
TextNode(text="Test2"),
TextNode(text="Test3"),
TextNode(text="Test4"),
TextNode(text="Test5"),
TextNode(text="Test6"),
TextNode(text="Test7"),
TextNode(text="Test8"),
]
nodes_with_score = [NodeWithScore(node=n) for n in nodes]
# choice batch size 4 (so two batches)
# take top-3 across all data
llm = MockFunctionCallingLLM()
llm.metadata.is_function_calling_model = True
llm_rerank = StructuredLLMRerank(
llm=llm,
format_node_batch_fn=mock_format_node_batch_fn,
choice_batch_size=4,
top_n=3,
)
query_str = "What is?"
result_nodes = llm_rerank.postprocess_nodes(
nodes_with_score, QueryBundle(query_str)
)
assert len(result_nodes) == 3
assert result_nodes[0].node.get_content() == "Test7"
assert result_nodes[1].node.get_content() == "Test5"
assert result_nodes[2].node.get_content() == "Test3"
def mock_errored_structured_predict(
self: Any, prompt: BasePromptTemplate, **prompt_args: Any
) -> str:
return "fake error"
@patch.object(
MockFunctionCallingLLM,
"structured_predict",
mock_errored_structured_predict,
)
@pytest.mark.parametrize("raise_on_failure", [True, False])
def test_llm_rerank_errored_structured_predict(raise_on_failure: bool) -> None:
"""Test LLM rerank with errored structured predict."""
nodes = [
TextNode(text="Test"),
TextNode(text="Test2"),
TextNode(text="Test3"),
TextNode(text="Test4"),
]
nodes_with_score = [NodeWithScore(node=n) for n in nodes]
llm = MockFunctionCallingLLM()
llm.metadata.is_function_calling_model = True
top_n = 3
llm_rerank = StructuredLLMRerank(
llm=llm,
format_node_batch_fn=mock_format_node_batch_fn,
choice_batch_size=4,
top_n=top_n,
raise_on_structured_prediction_failure=raise_on_failure, # Set to False to test logging behavior
)
query_str = "What is?"
if raise_on_failure:
with pytest.raises(ValueError, match="Structured prediction failed for nodes"):
llm_rerank.postprocess_nodes(nodes_with_score, QueryBundle(query_str))
else:
result_nodes = llm_rerank.postprocess_nodes(
nodes_with_score, QueryBundle(query_str)
)
assert len(result_nodes) == top_n
assert all(n.score == 0 for n in result_nodes)
| MockFunctionCallingLLM |
python | encode__django-rest-framework | tests/test_validation.py | {
"start": 406,
"end": 616
} | class ____(serializers.ModelSerializer):
class Meta:
model = ValidationModel
fields = ('blank_validated_field',)
read_only_fields = ('blank_validated_field',)
| ValidationModelSerializer |
python | pandas-dev__pandas | pandas/tests/arrays/categorical/test_sorting.py | {
"start": 116,
"end": 5052
} | class ____:
def test_argsort(self):
c = Categorical([5, 3, 1, 4, 2], ordered=True)
expected = np.array([2, 4, 1, 3, 0])
tm.assert_numpy_array_equal(
c.argsort(ascending=True), expected, check_dtype=False
)
expected = expected[::-1]
tm.assert_numpy_array_equal(
c.argsort(ascending=False), expected, check_dtype=False
)
def test_numpy_argsort(self):
c = Categorical([5, 3, 1, 4, 2], ordered=True)
expected = np.array([2, 4, 1, 3, 0])
tm.assert_numpy_array_equal(np.argsort(c), expected, check_dtype=False)
tm.assert_numpy_array_equal(
np.argsort(c, kind="mergesort"), expected, check_dtype=False
)
msg = "the 'axis' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.argsort(c, axis=0)
msg = "the 'order' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.argsort(c, order="C")
def test_sort_values(self):
# unordered cats are sortable
cat = Categorical(["a", "b", "b", "a"], ordered=False)
cat.sort_values()
cat = Categorical(["a", "c", "b", "d"], ordered=True)
# sort_values
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=object)
tm.assert_numpy_array_equal(res.__array__(), exp)
tm.assert_index_equal(res.categories, cat.categories)
cat = Categorical(
["a", "c", "b", "d"], categories=["a", "b", "c", "d"], ordered=True
)
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=object)
tm.assert_numpy_array_equal(res.__array__(), exp)
tm.assert_index_equal(res.categories, cat.categories)
res = cat.sort_values(ascending=False)
exp = np.array(["d", "c", "b", "a"], dtype=object)
tm.assert_numpy_array_equal(res.__array__(), exp)
tm.assert_index_equal(res.categories, cat.categories)
# sort (inplace order)
cat1 = cat.copy()
orig_codes = cat1._codes
cat1.sort_values(inplace=True)
assert cat1._codes is orig_codes
exp = np.array(["a", "b", "c", "d"], dtype=object)
tm.assert_numpy_array_equal(cat1.__array__(), exp)
tm.assert_index_equal(res.categories, cat.categories)
# reverse
cat = Categorical(["a", "c", "c", "b", "d"], ordered=True)
res = cat.sort_values(ascending=False)
exp_val = np.array(["d", "c", "c", "b", "a"], dtype=object)
exp_categories = Index(["a", "b", "c", "d"])
tm.assert_numpy_array_equal(res.__array__(), exp_val)
tm.assert_index_equal(res.categories, exp_categories)
def test_sort_values_na_position(self):
# see gh-12882
cat = Categorical([5, 2, np.nan, 2, np.nan], ordered=True)
exp_categories = Index([2, 5])
exp = np.array([2.0, 2.0, 5.0, np.nan, np.nan])
res = cat.sort_values() # default arguments
tm.assert_numpy_array_equal(res.__array__(), exp)
tm.assert_index_equal(res.categories, exp_categories)
exp = np.array([np.nan, np.nan, 2.0, 2.0, 5.0])
res = cat.sort_values(ascending=True, na_position="first")
tm.assert_numpy_array_equal(res.__array__(), exp)
tm.assert_index_equal(res.categories, exp_categories)
exp = np.array([np.nan, np.nan, 5.0, 2.0, 2.0])
res = cat.sort_values(ascending=False, na_position="first")
tm.assert_numpy_array_equal(res.__array__(), exp)
tm.assert_index_equal(res.categories, exp_categories)
exp = np.array([2.0, 2.0, 5.0, np.nan, np.nan])
res = cat.sort_values(ascending=True, na_position="last")
tm.assert_numpy_array_equal(res.__array__(), exp)
tm.assert_index_equal(res.categories, exp_categories)
exp = np.array([5.0, 2.0, 2.0, np.nan, np.nan])
res = cat.sort_values(ascending=False, na_position="last")
tm.assert_numpy_array_equal(res.__array__(), exp)
tm.assert_index_equal(res.categories, exp_categories)
cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True)
res = cat.sort_values(ascending=False, na_position="last")
exp_val = np.array(["d", "c", "b", "a", np.nan], dtype=object)
exp_categories = Index(["a", "b", "c", "d"])
tm.assert_numpy_array_equal(res.__array__(), exp_val)
tm.assert_index_equal(res.categories, exp_categories)
cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True)
res = cat.sort_values(ascending=False, na_position="first")
exp_val = np.array([np.nan, "d", "c", "b", "a"], dtype=object)
exp_categories = Index(["a", "b", "c", "d"])
tm.assert_numpy_array_equal(res.__array__(), exp_val)
tm.assert_index_equal(res.categories, exp_categories)
| TestCategoricalSort |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.